예제 #1
0
/* ----------------------------------------------------------------
 *		ExecEndBitmapIndexScan
 * ----------------------------------------------------------------
 */
void
ExecEndBitmapIndexScan(BitmapIndexScanState *node)
{
	Relation	indexRelationDesc;
	IndexScanDesc indexScanDesc;
	IndexScanDesc odIndexScanDesc;

	/*
	 * extract information from the node
	 */
	indexRelationDesc = node->biss_RelationDesc;
	indexScanDesc = node->biss_ScanDesc;
	odIndexScanDesc = node->odbiss_ScanDesc;

	/*
	 * Free the exprcontext ... now dead code, see ExecFreeExprContext
	 */
#ifdef NOT_USED
	if (node->biss_RuntimeContext)
		FreeExprContext(node->biss_RuntimeContext);
#endif

	/*
	 * close the index relation
	 */
	index_endscan(indexScanDesc);
	if (odIndexScanDesc != NULL)
	{
		index_endscan(odIndexScanDesc);
		odIndexScanDesc = NULL;
	}
	index_close(indexRelationDesc);
}
예제 #2
0
/*
 * _bitmap_close_lov_heapandindex() -- close the heap and the index.
 */
void
_bitmap_close_lov_heapandindex(Relation lovHeap, Relation lovIndex,
							   LOCKMODE lockMode)
{
	heap_close(lovHeap, lockMode);
	index_close(lovIndex, lockMode);
}
/*
 * Release resources for one part (this includes closing the index and
 * the relation).
 */
static inline void
CleanupOnePartition(IndexScanState *indexState)
{
	Assert(NULL != indexState);

	/* Reset index state and release locks. */
	ExecClearTuple(indexState->ss.ps.ps_ResultTupleSlot);
	ExecClearTuple(indexState->ss.ss_ScanTupleSlot);

	if ((indexState->ss.scan_state & SCAN_SCAN) != 0)
	{
		Assert(indexState->iss_ScanDesc != NULL);
		Assert(indexState->iss_RelationDesc != NULL);
		Assert(indexState->ss.ss_currentRelation != NULL);

		index_endscan(indexState->iss_ScanDesc);
		indexState->iss_ScanDesc = NULL;

		index_close(indexState->iss_RelationDesc, NoLock);
		indexState->iss_RelationDesc = NULL;

		ExecCloseScanRelation(indexState->ss.ss_currentRelation);
		indexState->ss.ss_currentRelation = NULL;
	}

	indexState->ss.scan_state = SCAN_INIT;
}
예제 #4
0
파일: lo_inv_api.c 프로젝트: colinet/sqlix
/*
 * Clean up at main transaction end
 */
void
close_lo_relation(bool isCommit)
{
	if (lo_heap_r || lo_index_r) {
		/*
		 * Only bother to close if committing; else abort cleanup will handle
		 * it
		 */
		if (isCommit) {
			struct resource* currentOwner;

			currentOwner = current_resource;
			PG_TRY();
			{
				current_resource = top_xact_resource;

				if (lo_index_r)
					index_close(lo_index_r, NO_LOCK);

				if (lo_heap_r)
					heap_close(lo_heap_r, NO_LOCK);
			}
			PG_CATCH();
			{
				/* Ensure current_resource is restored on error */
				current_resource = currentOwner;
				PG_RE_THROW();
			}
			PG_END_TRY();
			current_resource = currentOwner;
		}
		lo_heap_r = NULL;
		lo_index_r = NULL;
	}
}
예제 #5
0
void
AppendOnlyBlockDirectory_End_forSearch(
	AppendOnlyBlockDirectory *blockDirectory)
{
	int groupNo;
	
	if (blockDirectory->blkdirRel == NULL ||
		blockDirectory->blkdirIdx == NULL)
		return;

	for (groupNo = 0; groupNo < blockDirectory->numColumnGroups; groupNo++)
	{
		if (blockDirectory->minipages[groupNo].minipage != NULL)
			pfree(blockDirectory->minipages[groupNo].minipage);
	}

	ereportif(Debug_appendonly_print_blockdirectory, LOG,
				(errmsg("Append-only block directory end for search: "
						"(totalSegfiles, numColumnGroups, isAOCol)="
						"(%d, %d, %d)",
						blockDirectory->totalSegfiles,
						blockDirectory->numColumnGroups,
						blockDirectory->isAOCol)));

	pfree(blockDirectory->values);
	pfree(blockDirectory->nulls);
	pfree(blockDirectory->minipages);
	pfree(blockDirectory->scanKeys);
	pfree(blockDirectory->strategyNumbers);
	
	index_close(blockDirectory->blkdirIdx, AccessShareLock);
	heap_close(blockDirectory->blkdirRel, AccessShareLock);

	MemoryContextDelete(blockDirectory->memoryContext);
}
예제 #6
0
/* ----------------------------------------------------------------
 *		ExecEndBitmapIndexScan
 * ----------------------------------------------------------------
 */
void
ExecEndBitmapIndexScan(BitmapIndexScanState *node)
{
	Relation	indexRelationDesc;
	IndexScanDesc indexScanDesc;

	/*
	 * extract information from the node
	 */
	indexRelationDesc = node->biss_RelationDesc;
	indexScanDesc = node->biss_ScanDesc;

	/*
	 * Free the exprcontext ... now dead code, see ExecFreeExprContext
	 */
#ifdef NOT_USED
	if (node->biss_RuntimeContext)
		FreeExprContext(node->biss_RuntimeContext, true);
#endif

	/*
	 * close the index relation (no-op if we didn't open it)
	 */
	if (indexScanDesc)
		index_endscan(indexScanDesc);
	if (indexRelationDesc)
		index_close(indexRelationDesc, NoLock);
}
예제 #7
0
파일: genam.c 프로젝트: sunyangkobe/cscd43
/*
 * systable_beginscan --- set up for heap-or-index scan
 *
 *	rel: catalog to scan, already opened and suitably locked
 *	indexRelname: name of index to conditionally use
 *	indexOK: if false, forces a heap scan (see notes below)
 *	snapshot: time qual to use (usually should be SnapshotNow)
 *	nkeys, key: scan keys
 *
 * The attribute numbers in the scan key should be set for the heap case.
 * If we choose to index, we reset them to 1..n to reference the index
 * columns.  Note this means there must be one scankey qualification per
 * index column!  This is checked by the Asserts in the normal, index-using
 * case, but won't be checked if the heapscan path is taken.
 *
 * The routine checks the normal cases for whether an indexscan is safe,
 * but caller can make additional checks and pass indexOK=false if needed.
 * In standard case indexOK can simply be constant TRUE.
 */
SysScanDesc
systable_beginscan(Relation heapRelation,
				   const char *indexRelname,
				   bool indexOK,
				   Snapshot snapshot,
				   int nkeys, ScanKey key)
{
	SysScanDesc sysscan;
	Relation	irel;

	if (indexOK && !IsIgnoringSystemIndexes())
	{
		/* We assume it's a system index, so index_openr is OK */
		irel = index_openr(indexRelname);

		if (ReindexIsProcessingIndex(RelationGetRelid(irel)))
		{
			/* oops, can't use index that's being rebuilt */
			index_close(irel);
			irel = NULL;
		}
	}
	else
		irel = NULL;

	sysscan = (SysScanDesc) palloc(sizeof(SysScanDescData));

	sysscan->heap_rel = heapRelation;
	sysscan->irel = irel;

	if (irel)
	{
		int			i;

		/*
		 * Change attribute numbers to be index column numbers.
		 *
		 * This code could be generalized to search for the index key numbers
		 * to substitute, but for now there's no need.
		 */
		for (i = 0; i < nkeys; i++)
		{
			Assert(key[i].sk_attno == irel->rd_index->indkey[i]);
			key[i].sk_attno = i + 1;
		}

		sysscan->iscan = index_beginscan(heapRelation, irel, snapshot,
										 nkeys, key);
		sysscan->scan = NULL;
	}
	else
	{
		sysscan->scan = heap_beginscan(heapRelation, snapshot, nkeys, key);
		sysscan->iscan = NULL;
	}

	return sysscan;
}
예제 #8
0
/*
 * InsertFileSegInfo
 *
 * Adds an entry into the pg_paqseg_*  table for this Parquet
 * relation. Use use frozen_heap_insert so the tuple is
 * frozen on insert.
 *
 * Also insert a new entry to gp_fastsequence for this segment file.
 */
void InsertInitialParquetSegnoEntry(AppendOnlyEntry *aoEntry, int segno) {
	Relation pg_parquetseg_rel;
	Relation pg_parquetseg_idx;
	TupleDesc pg_parquetseg_dsc;
	HeapTuple pg_parquetseg_tuple = NULL;
	int natts = 0;
	bool *nulls;
	Datum *values;
	ItemPointerData tid;

	Assert(aoEntry != NULL);

	InsertFastSequenceEntry(aoEntry->segrelid, (int64) segno, 0,
			&tid);

	if (segno == 0)
	{
		return;
	}

	pg_parquetseg_rel = heap_open(aoEntry->segrelid, RowExclusiveLock);

	pg_parquetseg_dsc = RelationGetDescr(pg_parquetseg_rel);
	natts = pg_parquetseg_dsc->natts;
	nulls = palloc(sizeof(bool) * natts);
	values = palloc0(sizeof(Datum) * natts);
	MemSet(nulls, 0, sizeof(char) * natts);

	if (Gp_role != GP_ROLE_EXECUTE)
		pg_parquetseg_idx = index_open(aoEntry->segidxid,
				RowExclusiveLock);
	else
		pg_parquetseg_idx = NULL;

	values[Anum_pg_parquetseg_segno - 1] = Int32GetDatum(segno);
	values[Anum_pg_parquetseg_tupcount - 1] = Float8GetDatum(0);
	values[Anum_pg_parquetseg_eof - 1] = Float8GetDatum(0);
	values[Anum_pg_parquetseg_eofuncompressed - 1] = Float8GetDatum(0);

	/*
	 * form the tuple and insert it
	 */
	pg_parquetseg_tuple = heap_form_tuple(pg_parquetseg_dsc, values, nulls);
	if (!HeapTupleIsValid(pg_parquetseg_tuple))
		elog(ERROR, "failed to build Parquet file segment tuple");

	frozen_heap_insert(pg_parquetseg_rel, pg_parquetseg_tuple);

	if (Gp_role != GP_ROLE_EXECUTE)
		CatalogUpdateIndexes(pg_parquetseg_rel, pg_parquetseg_tuple);

	heap_freetuple(pg_parquetseg_tuple);

	if (Gp_role != GP_ROLE_EXECUTE)
		index_close(pg_parquetseg_idx, RowExclusiveLock);
	heap_close(pg_parquetseg_rel, RowExclusiveLock);
}
예제 #9
0
/*
 * Generate a WHERE clause for UPDATE or DELETE.
 */
static void
print_where_clause(StringInfo s,
				   Relation relation,
				   HeapTuple oldtuple,
				   HeapTuple newtuple)
{
	TupleDesc		tupdesc = RelationGetDescr(relation);
	int				natt;
	bool			first_column = true;

	Assert(relation->rd_rel->relreplident == REPLICA_IDENTITY_DEFAULT ||
		   relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL ||
		   relation->rd_rel->relreplident == REPLICA_IDENTITY_INDEX);

	/* Build the WHERE clause */
	appendStringInfoString(s, " WHERE ");

	RelationGetIndexList(relation);
	/* Generate WHERE clause using new values of REPLICA IDENTITY */
	if (OidIsValid(relation->rd_replidindex))
	{
		Relation    indexRel;
		int			key;

		/* Use all the values associated with the index */
		indexRel = index_open(relation->rd_replidindex, AccessShareLock);
		for (key = 0; key < indexRel->rd_index->indnatts; key++)
		{
			int	relattr = indexRel->rd_index->indkey.values[key];

			/*
			 * For a relation having REPLICA IDENTITY set at DEFAULT
			 * or INDEX, if one of the columns used for tuple selectivity
			 * is changed, the old tuple data is not NULL and need to
			 * be used for tuple selectivity. If no such columns are
			 * updated, old tuple data is NULL.
			 */
			print_where_clause_item(s, relation,
									oldtuple ? oldtuple : newtuple,
									relattr, &first_column);
		}
		index_close(indexRel, NoLock);
		return;
	}

	/* We need absolutely some values for tuple selectivity now */
	Assert(oldtuple != NULL &&
		   relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL);

	/*
	 * Fallback to default case, use of old values and print WHERE clause
	 * using all the columns. This is actually the code path for FULL.
	 */
	for (natt = 0; natt < tupdesc->natts; natt++)
		print_where_clause_item(s, relation, oldtuple,
								natt + 1, &first_column);
}
예제 #10
0
파일: catalog.c 프로젝트: phan-pivotal/gpdb
/*
 * GetNewOid
 *		Generate a new OID that is unique within the given relation.
 *
 * Caller must have a suitable lock on the relation.
 *
 * Uniqueness is promised only if the relation has a unique index on OID.
 * This is true for all system catalogs that have OIDs, but might not be
 * true for user tables.  Note that we are effectively assuming that the
 * table has a relatively small number of entries (much less than 2^32)
 * and there aren't very long runs of consecutive existing OIDs.  Again,
 * this is reasonable for system catalogs but less so for user tables.
 *
 * Since the OID is not immediately inserted into the table, there is a
 * race condition here; but a problem could occur only if someone else
 * managed to cycle through 2^32 OIDs and generate the same OID before we
 * finish inserting our row.  This seems unlikely to be a problem.	Note
 * that if we had to *commit* the row to end the race condition, the risk
 * would be rather higher; therefore we use SnapshotDirty in the test,
 * so that we will see uncommitted rows.
 */
Oid
GetNewOid(Relation relation)
{
	Oid			newOid;
	Oid			oidIndex;
	Relation	indexrel;

	/* If relation doesn't have OIDs at all, caller is confused */
	Assert(relation->rd_rel->relhasoids);

	/* In bootstrap mode, we don't have any indexes to use */
	if (IsBootstrapProcessingMode())
		return GetNewObjectId();

	/* The relcache will cache the identity of the OID index for us */
	oidIndex = RelationGetOidIndex(relation);

	/* If no OID index, just hand back the next OID counter value */
	if (!OidIsValid(oidIndex))
	{
		/*
		 * System catalogs that have OIDs should *always* have a unique OID
		 * index; we should only take this path for user tables. Give a
		 * warning if it looks like somebody forgot an index.
		 */
		if (IsSystemRelation(relation))
			elog(WARNING, "generating possibly-non-unique OID for \"%s\"",
				 RelationGetRelationName(relation));

		return GetNewObjectId();
	}

	/* Otherwise, use the index to find a nonconflicting OID */
	indexrel = index_open(oidIndex, AccessShareLock);
	do {
		newOid = GetNewOidWithIndex(relation, indexrel);
	} while(!IsOidAcceptable(newOid));
	index_close(indexrel, AccessShareLock);

	/*
	 * Most catalog objects need to have the same OID in the master and all
	 * segments. When creating a new object, the master should allocate the
	 * OID and tell the segments to use the same, so segments should have no
	 * need to ever allocate OIDs on their own. Therefore, give a WARNING if
	 * GetNewOid() is called in a segment. (There are a few exceptions, see
	 * RelationNeedsSynchronizedOIDs()).
	 */
	if (Gp_role == GP_ROLE_EXECUTE && RelationNeedsSynchronizedOIDs(relation))
		elog(PANIC, "allocated OID %u for relation \"%s\" in segment",
			 newOid, RelationGetRelationName(relation));

	return newOid;
}
예제 #11
0
파일: catalog.c 프로젝트: qiuyesuifeng/gpdb
/*
 * GetNewOid
 *		Generate a new OID that is unique within the given relation.
 *
 * Caller must have a suitable lock on the relation.
 *
 * Uniqueness is promised only if the relation has a unique index on OID.
 * This is true for all system catalogs that have OIDs, but might not be
 * true for user tables.  Note that we are effectively assuming that the
 * table has a relatively small number of entries (much less than 2^32)
 * and there aren't very long runs of consecutive existing OIDs.  Again,
 * this is reasonable for system catalogs but less so for user tables.
 *
 * Since the OID is not immediately inserted into the table, there is a
 * race condition here; but a problem could occur only if someone else
 * managed to cycle through 2^32 OIDs and generate the same OID before we
 * finish inserting our row.  This seems unlikely to be a problem.	Note
 * that if we had to *commit* the row to end the race condition, the risk
 * would be rather higher; therefore we use SnapshotDirty in the test,
 * so that we will see uncommitted rows.
 */
Oid
GetNewOid(Relation relation)
{
	Oid			newOid;
	Oid			oidIndex;
	Relation	indexrel;

	/* If relation doesn't have OIDs at all, caller is confused */
	Assert(relation->rd_rel->relhasoids);

	/* In bootstrap mode, we don't have any indexes to use */
	if (IsBootstrapProcessingMode())
		return GetNewObjectId();

	/* The relcache will cache the identity of the OID index for us */
	oidIndex = RelationGetOidIndex(relation);

	/* If no OID index, just hand back the next OID counter value */
	if (!OidIsValid(oidIndex))
	{
		Oid result;
		/*
		 * System catalogs that have OIDs should *always* have a unique OID
		 * index; we should only take this path for user tables. Give a
		 * warning if it looks like somebody forgot an index.
		 */
		if (IsSystemRelation(relation))
			elog(WARNING, "generating possibly-non-unique OID for \"%s\"",
				 RelationGetRelationName(relation));

		result=  GetNewObjectId();
		
		if (IsSystemNamespace(RelationGetNamespace(relation)))
		{
			if (Gp_role == GP_ROLE_EXECUTE)
			{
				elog(DEBUG1,"Allocating Oid %u on relid %u %s in EXECUTE mode",result,relation->rd_id,RelationGetRelationName(relation));
			}
			if (Gp_role == GP_ROLE_DISPATCH)
			{
				elog(DEBUG5,"Allocating Oid %u on relid %u %s in DISPATCH mode",result,relation->rd_id,RelationGetRelationName(relation));
			}
		}
		return result;
	}

	/* Otherwise, use the index to find a nonconflicting OID */
	indexrel = index_open(oidIndex, AccessShareLock);
	newOid = GetNewOidWithIndex(relation, indexrel);
	index_close(indexrel, AccessShareLock);

	return newOid;
}
예제 #12
0
/* ----------
 * toast_delete_datum -
 *
 *	Delete a single external stored value.
 * ----------
 */
static void
toast_delete_datum(Relation rel, Datum value)
{
	struct varlena *attr = (struct varlena *) DatumGetPointer(value);
	struct varatt_external toast_pointer;
	Relation	toastrel;
	Relation	toastidx;
	ScanKeyData toastkey;
	SysScanDesc toastscan;
	HeapTuple	toasttup;

	if (!VARATT_IS_EXTERNAL(attr))
		return;

	/* Must copy to access aligned fields */
	VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr);

	/*
	 * Open the toast relation and its index
	 */
	toastrel = heap_open(toast_pointer.va_toastrelid, RowExclusiveLock);
	toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock);

	/*
	 * Setup a scan key to find chunks with matching va_valueid
	 */
	ScanKeyInit(&toastkey,
				(AttrNumber) 1,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(toast_pointer.va_valueid));

	/*
	 * Find all the chunks.  (We don't actually care whether we see them in
	 * sequence or not, but since we've already locked the index we might as
	 * well use systable_beginscan_ordered.)
	 */
	toastscan = systable_beginscan_ordered(toastrel, toastidx,
										   SnapshotToast, 1, &toastkey);
	while ((toasttup = systable_getnext_ordered(toastscan, ForwardScanDirection)) != NULL)
	{
		/*
		 * Have a chunk, delete it
		 */
		simple_heap_delete(toastrel, &toasttup->t_self);
	}

	/*
	 * End scan and close relations
	 */
	systable_endscan_ordered(toastscan);
	index_close(toastidx, RowExclusiveLock);
	heap_close(toastrel, RowExclusiveLock);
}
예제 #13
0
void
AppendOnlyBlockDirectory_End_addCol(
	AppendOnlyBlockDirectory *blockDirectory)
{
	int groupNo;

	/* newly added columns have attribute number beginning with this */
	AttrNumber colno = blockDirectory->aoRel->rd_att->natts -
			blockDirectory->numColumnGroups;

	if (blockDirectory->blkdirRel == NULL ||
		blockDirectory->blkdirIdx == NULL)
		return;
	for (groupNo = 0; groupNo < blockDirectory->numColumnGroups; groupNo++)
	{
		MinipagePerColumnGroup *minipageInfo =
				&blockDirectory->minipages[groupNo];

		if (minipageInfo->numMinipageEntries > 0)
		{
			write_minipage(blockDirectory, groupNo + colno, minipageInfo);
			ereportif(Debug_appendonly_print_blockdirectory, LOG,
					  (errmsg("Append-only block directory end of insert write"
							  " minipage: (columnGroupNo, nEntries) = (%d, %u)",
							  groupNo, minipageInfo->numMinipageEntries)));
		}
		pfree(minipageInfo->minipage);
	}

	ereportif(Debug_appendonly_print_blockdirectory, LOG,
				(errmsg("Append-only block directory end for insert: "
						"(segno, numColumnGroups, isAOCol)="
						"(%d, %d, %d)",
						blockDirectory->currentSegmentFileNum,
						blockDirectory->numColumnGroups,
						blockDirectory->isAOCol)));

	pfree(blockDirectory->values);
	pfree(blockDirectory->nulls);
	pfree(blockDirectory->minipages);
	pfree(blockDirectory->scanKeys);
	pfree(blockDirectory->strategyNumbers);
	/*
	 * We already hold transaction-scope exclusive lock on the AOCS
	 * relation.  Let's defer release of locks on block directory as
	 * well until the end of alter-table transaction.
	 */
	index_close(blockDirectory->blkdirIdx, NoLock);
	heap_close(blockDirectory->blkdirRel, NoLock);

	MemoryContextDelete(blockDirectory->memoryContext);
}
예제 #14
0
/*
 * systable_endscan --- close scan, release resources
 *
 * Note that it's still up to the caller to close the heap relation.
 */
void
systable_endscan(SysScanDesc sysscan)
{
	if (sysscan->irel)
	{
		index_endscan(sysscan->iscan);
		index_close(sysscan->irel, AccessShareLock);
	}
	else
		heap_endscan(sysscan->scan);

	pfree(sysscan);
}
예제 #15
0
파일: tuptoaster.c 프로젝트: nskyzh/gpdb
/* ----------
 * toast_delete_datum -
 *
 *	Delete a single external stored value.
 * ----------
 */
static void
toast_delete_datum(Relation rel __attribute__((unused)), Datum value)
{
	varattrib  *attr = (varattrib *) DatumGetPointer(value);
	Relation	toastrel;
	Relation	toastidx;
	ScanKeyData toastkey;
	IndexScanDesc toastscan;
	HeapTuple	toasttup;

	if (!VARATT_IS_EXTERNAL(attr))
		return;

	/*
	 * Open the toast relation and its index
	 */
	toastrel = heap_open(attr->va_external.va_toastrelid,
						 RowExclusiveLock);
	toastidx = index_open(toastrel->rd_rel->reltoastidxid, RowExclusiveLock);

	/*
	 * Setup a scan key to fetch from the index by va_valueid (we don't
	 * particularly care whether we see them in sequence or not)
	 */
	ScanKeyInit(&toastkey,
				(AttrNumber) 1,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(attr->va_external.va_valueid));

	/*
	 * Find all the chunks.  (We don't actually care whether we see them in
	 * sequence or not, but since we've already locked the index we might as
	 * well use systable_beginscan_ordered.)
	 */
	toastscan = index_beginscan(toastrel, toastidx,
								SnapshotToast, 1, &toastkey);
	while ((toasttup = index_getnext(toastscan, ForwardScanDirection)) != NULL)
	{
		/*
		 * Have a chunk, delete it
		 */
		simple_heap_delete(toastrel, &toasttup->t_self);
	}

	/*
	 * End scan and close relations
	 */
	index_endscan(toastscan);
	index_close(toastidx, RowExclusiveLock);
	heap_close(toastrel, RowExclusiveLock);
}
예제 #16
0
파일: hash.c 프로젝트: sunyangkobe/cscd43
/*
 *	hashbuild() -- build a new hash index.
 *
 *		We use a global variable to record the fact that we're creating
 *		a new index.  This is used to avoid high-concurrency locking,
 *		since the index won't be visible until this transaction commits
 *		and since building is guaranteed to be single-threaded.
 */
Datum
hashbuild(PG_FUNCTION_ARGS)
{
	Relation	heap = (Relation) PG_GETARG_POINTER(0);
	Relation	index = (Relation) PG_GETARG_POINTER(1);
	IndexInfo  *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
	double		reltuples;
	HashBuildState buildstate;

	/*
	 * We expect to be called exactly once for any index relation. If
	 * that's not the case, big trouble's what we have.
	 */
	if (RelationGetNumberOfBlocks(index) != 0)
		elog(ERROR, "index \"%s\" already contains data",
			 RelationGetRelationName(index));

	/* initialize the hash index metadata page */
	_hash_metapinit(index);

	/* build the index */
	buildstate.indtuples = 0;

	/* do the heap scan */
	reltuples = IndexBuildHeapScan(heap, index, indexInfo,
								hashbuildCallback, (void *) &buildstate);

	/*
	 * Since we just counted the tuples in the heap, we update its stats
	 * in pg_class to guarantee that the planner takes advantage of the
	 * index we just created.  But, only update statistics during normal
	 * index definitions, not for indices on system catalogs created
	 * during bootstrap processing.  We must close the relations before
	 * updating statistics to guarantee that the relcache entries are
	 * flushed when we increment the command counter in UpdateStats(). But
	 * we do not release any locks on the relations; those will be held
	 * until end of transaction.
	 */
	if (IsNormalProcessingMode())
	{
		Oid			hrelid = RelationGetRelid(heap);
		Oid			irelid = RelationGetRelid(index);

		heap_close(heap, NoLock);
		index_close(index);
		UpdateStats(hrelid, reltuples);
		UpdateStats(irelid, buildstate.indtuples);
	}

	PG_RETURN_VOID();
}
예제 #17
0
/*
 * Frees the data allocated by the visimap store
 * 
 * No function using the visibility map store should be called
 * after this function call.
 */ 
void
AppendOnlyVisimapStore_Finish(
		AppendOnlyVisimapStore *visiMapStore,
		LOCKMODE lockmode)
{
	if (visiMapStore->scanKeys)
	{
		pfree(visiMapStore->scanKeys);
		visiMapStore->scanKeys = NULL;
	}

	index_close(visiMapStore->visimapIndex, lockmode);
	heap_close(visiMapStore->visimapRelation, lockmode);
}
예제 #18
0
/* ----------
 * toast_delete_datum -
 *
 *	Delete a single external stored value.
 * ----------
 */
static void
toast_delete_datum(Relation rel, Datum value)
{
	varattrib  *attr = (varattrib *) DatumGetPointer(value);
	Relation	toastrel;
	Relation	toastidx;
	ScanKeyData toastkey;
	IndexScanDesc toastscan;
	HeapTuple	toasttup;

	if (!VARATT_IS_EXTERNAL(attr))
		return;

	/*
	 * Open the toast relation and it's index
	 */
	toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
						 RowExclusiveLock);
	toastidx = index_open(toastrel->rd_rel->reltoastidxid);

	/*
	 * Setup a scan key to fetch from the index by va_valueid (we don't
	 * particularly care whether we see them in sequence or not)
	 */
	ScanKeyEntryInitialize(&toastkey,
						   (bits16) 0,
						   (AttrNumber) 1,
						   (RegProcedure) F_OIDEQ,
			  ObjectIdGetDatum(attr->va_content.va_external.va_valueid));

	/*
	 * Find the chunks by index
	 */
	toastscan = index_beginscan(toastrel, toastidx, SnapshotToast,
								1, &toastkey);
	while ((toasttup = index_getnext(toastscan, ForwardScanDirection)) != NULL)
	{
		/*
		 * Have a chunk, delete it
		 */
		simple_heap_delete(toastrel, &toasttup->t_self);
	}

	/*
	 * End scan and close relations
	 */
	index_endscan(toastscan);
	index_close(toastidx);
	heap_close(toastrel, RowExclusiveLock);
}
예제 #19
0
/* ----------------------------------------------------------------
 *		ExecEndIndexOnlyScan
 * ----------------------------------------------------------------
 */
void
ExecEndIndexOnlyScan(IndexOnlyScanState *node)
{
	Relation	indexRelationDesc;
	IndexScanDesc indexScanDesc;
	Relation	relation;

	/*
	 * extract information from the node
	 */
	indexRelationDesc = node->ioss_RelationDesc;
	indexScanDesc = node->ioss_ScanDesc;
	relation = node->ss.ss_currentRelation;

	/* Release VM buffer pin, if any. */
	if (node->ioss_VMBuffer != InvalidBuffer)
	{
		ReleaseBuffer(node->ioss_VMBuffer);
		node->ioss_VMBuffer = InvalidBuffer;
	}

	/*
	 * Free the exprcontext(s) ... now dead code, see ExecFreeExprContext
	 */
#ifdef NOT_USED
	ExecFreeExprContext(&node->ss.ps);
	if (node->ioss_RuntimeContext)
		FreeExprContext(node->ioss_RuntimeContext, true);
#endif

	/*
	 * clear out tuple table slots
	 */
	ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
	ExecClearTuple(node->ss.ss_ScanTupleSlot);

	/*
	 * close the index relation (no-op if we didn't open it)
	 */
	if (indexScanDesc)
		index_endscan(indexScanDesc);
	if (indexRelationDesc)
		index_close(indexRelationDesc, NoLock);

	/*
	 * close the heap relation.
	 */
	ExecCloseScanRelation(relation);
}
예제 #20
0
/*
 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
 *
 * The only reason to call this routine is to ensure that the relcache
 * has created entries for all the catalogs and indexes referenced by
 * catcaches.  Therefore, open the index too.  An exception is the indexes
 * on pg_am, which we don't use (cf. IndexScanOK).
 */
void
InitCatCachePhase2(CatCache *cache)
{
	if (cache->cc_tupdesc == NULL)
		CatalogCacheInitializeCache(cache);

	if (cache->id != AMOID &&
		cache->id != AMNAME)
	{
		Relation	idesc;

		idesc = index_openr(cache->cc_indname);
		index_close(idesc);
	}
}
/*
 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
 *
 * One reason to call this routine is to ensure that the relcache has
 * created entries for all the catalogs and indexes referenced by catcaches.
 * Therefore, provide an option to open the index as well as fixing the
 * cache itself.  An exception is the indexes on pg_am, which we don't use
 * (cf. IndexScanOK).
 */
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
	if (cache->cc_tupdesc == NULL)
		CatalogCacheInitializeCache(cache);

	if (touch_index &&
		cache->id != AMOID &&
		cache->id != AMNAME)
	{
		Relation	idesc;

		idesc = index_open(cache->cc_indexoid, AccessShareLock);
		index_close(idesc, AccessShareLock);
	}
}
void
AppendOnlyBlockDirectory_End_forInsert(
	AppendOnlyBlockDirectory *blockDirectory)
{
	int groupNo;

	if (blockDirectory->blkdirRel == NULL ||
		blockDirectory->blkdirIdx == NULL)
		return;
	
	for (groupNo = 0; groupNo < blockDirectory->numColumnGroups; groupNo++)
	{
		MinipagePerColumnGroup *minipageInfo =
			&blockDirectory->minipages[groupNo];
		
		if (minipageInfo->numMinipageEntries > 0)
		{
			write_minipage(blockDirectory, groupNo);
			if (Debug_appendonly_print_blockdirectory)
				ereport(LOG,
						(errmsg("Append-only block directory end of insert write minipage: "
								"(columnGroupNo, nEntries) = (%d, %u)",
								groupNo, minipageInfo->numMinipageEntries)));
		}
		
		pfree(minipageInfo->minipage);
	}

	if (Debug_appendonly_print_blockdirectory)
		ereport(LOG,
				(errmsg("Append-only block directory end for insert: "
						"(segno, numColumnGroups)="
						"(%d, %d)",
						blockDirectory->currentSegmentFileNum,
						blockDirectory->numColumnGroups)));

	pfree(blockDirectory->values);
	pfree(blockDirectory->nulls);
	pfree(blockDirectory->minipages);
	pfree(blockDirectory->scanKeys);
	pfree(blockDirectory->strategyNumbers);
	
	index_close(blockDirectory->blkdirIdx, RowExclusiveLock);
	heap_close(blockDirectory->blkdirRel, RowExclusiveLock);
	
	MemoryContextDelete(blockDirectory->memoryContext);
}
예제 #23
0
파일: ginfast.c 프로젝트: 0x0FFF/postgres
/*
 * SQL-callable function to clean the insert pending list
 */
Datum
gin_clean_pending_list(PG_FUNCTION_ARGS)
{
	Oid			indexoid = PG_GETARG_OID(0);
	Relation	indexRel = index_open(indexoid, AccessShareLock);
	IndexBulkDeleteResult stats;
	GinState	ginstate;

	if (RecoveryInProgress())
		ereport(ERROR,
				(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
				 errmsg("recovery is in progress"),
		 errhint("GIN pending list cannot be cleaned up during recovery.")));

	/* Must be a GIN index */
	if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
		indexRel->rd_rel->relam != GIN_AM_OID)
		ereport(ERROR,
				(errcode(ERRCODE_WRONG_OBJECT_TYPE),
				 errmsg("\"%s\" is not a GIN index",
						RelationGetRelationName(indexRel))));

	/*
	 * Reject attempts to read non-local temporary relations; we would be
	 * likely to get wrong data since we have no visibility into the owning
	 * session's local buffers.
	 */
	if (RELATION_IS_OTHER_TEMP(indexRel))
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
			   errmsg("cannot access temporary indexes of other sessions")));

	/* User must own the index (comparable to privileges needed for VACUUM) */
	if (!pg_class_ownercheck(indexoid, GetUserId()))
		aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
					   RelationGetRelationName(indexRel));

	memset(&stats, 0, sizeof(stats));
	initGinState(&ginstate, indexRel);
	ginInsertCleanup(&ginstate, true, true, &stats);

	index_close(indexRel, AccessShareLock);

	PG_RETURN_INT64((int64) stats.pages_deleted);
}
예제 #24
0
파일: nodeIndexscan.c 프로젝트: LJoNe/gpdb
/* ----------------------------------------------------------------
 *		ExecEndIndexScan
 * ----------------------------------------------------------------
 */
void
ExecEndIndexScan(IndexScanState *node)
{
	Relation	indexRelationDesc;
	IndexScanDesc indexScanDesc;
	Relation	relation;

	/*
	 * extract information from the node
	 */
	indexRelationDesc = node->iss_RelationDesc;
	indexScanDesc = node->iss_ScanDesc;
	relation = node->ss.ss_currentRelation;

	/*
	 * Free the exprcontext(s) ... now dead code, see ExecFreeExprContext
	 */
#ifdef NOT_USED
	ExecFreeExprContext(&node->ss.ps);
	if (node->iss_RuntimeContext)
		FreeExprContext(node->iss_RuntimeContext);
#endif

	/*
	 * clear out tuple table slots
	 */
	ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
	ExecClearTuple(node->ss.ss_ScanTupleSlot);

	/*
	 * close the index relation (no-op if we didn't open it)
	 */
	ExecEagerFreeIndexScan(node);
	if (indexRelationDesc)
		index_close(indexRelationDesc, NoLock);

	/*
	 * close the heap relation.
	 */
	ExecCloseScanRelation(relation);

	FreeRuntimeKeysContext(node);
	EndPlanStateGpmonPkt(&node->ss.ps);
}
예제 #25
0
파일: enum.c 프로젝트: Tao-Ma/postgres
/*
 * enum_endpoint: common code for enum_first/enum_last
 */
static Oid
enum_endpoint(Oid enumtypoid, ScanDirection direction)
{
	Relation	enum_rel;
	Relation	enum_idx;
	SysScanDesc enum_scan;
	HeapTuple	enum_tuple;
	ScanKeyData skey;
	Oid			minmax;

	/*
	 * Find the first/last enum member using pg_enum_typid_sortorder_index.
	 * Note we must not use the syscache.  See comments for RenumberEnumType
	 * in catalog/pg_enum.c for more info.
	 */
	ScanKeyInit(&skey,
				Anum_pg_enum_enumtypid,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(enumtypoid));

	enum_rel = heap_open(EnumRelationId, AccessShareLock);
	enum_idx = index_open(EnumTypIdSortOrderIndexId, AccessShareLock);
	enum_scan = systable_beginscan_ordered(enum_rel, enum_idx, NULL,
										   1, &skey);

	enum_tuple = systable_getnext_ordered(enum_scan, direction);
	if (HeapTupleIsValid(enum_tuple))
	{
		/* check it's safe to use in SQL */
		check_safe_enum_use(enum_tuple);
		minmax = HeapTupleGetOid(enum_tuple);
	}
	else
	{
		/* should only happen with an empty enum */
		minmax = InvalidOid;
	}

	systable_endscan_ordered(enum_scan);
	index_close(enum_idx, AccessShareLock);
	heap_close(enum_rel, AccessShareLock);

	return minmax;
}
예제 #26
0
/* ----------------------------------------------------------------
 *		ExecEndIndexScan
 * ----------------------------------------------------------------
 */
void
ExecEndIndexScan(index_ss *node)
{
	struct relation* indexRelationDesc;
	struct index_scan* indexScanDesc;
	struct relation* relation;

	/*
	 * extract information from the node
	 */
	indexRelationDesc = node->iss_RelationDesc;
	indexScanDesc = node->iss_ScanDesc;
	relation = node->ss.ss_currentRelation;

	/*
	 * Free the exprcontext(s) ... now dead code, see exec_free_expr_ctx
	 */
#ifdef NOT_USED
	exec_free_expr_ctx(&node->ss.ps);
	if (node->iss_RuntimeContext)
		free_expr_ctx(node->iss_RuntimeContext, true);
#endif

	/*
	 * clear out tuple table slots
	 */
	exec_clear_tuple(node->ss.ps.ps_ResultTupleSlot);
	exec_clear_tuple(node->ss.ss_ScanTupleSlot);

	/*
	 * close the index relation (no-op if we didn't open it)
	 */
	if (indexScanDesc)
		index_endscan(indexScanDesc);

	if (indexRelationDesc)
		index_close(indexRelationDesc, NO_LOCK);

	/*
	 * close the heap relation.
	 */
	ExecCloseScanRelation(relation);
}
예제 #27
0
/*
 * AppendOnlyBlockDirectory_DeleteSegmentFile
 *
 * Deletes all block directory entries for given segment file of an
 * append-only relation.
 */ 
void
AppendOnlyBlockDirectory_DeleteSegmentFile(
		AppendOnlyEntry *aoEntry,
		Snapshot snapshot,
		int segno,
		int columnGroupNo)
{
	Assert(aoEntry);
	Assert(OidIsValid(aoEntry->blkdirrelid));
	Assert(OidIsValid(aoEntry->blkdiridxid));

	Relation blkdirRel = heap_open(aoEntry->blkdirrelid, RowExclusiveLock);
	Relation blkdirIdx = index_open(aoEntry->blkdiridxid, RowExclusiveLock);

	ScanKeyData scanKey;
	ScanKeyInit(&scanKey,
			1, /* segno */
			BTEqualStrategyNumber,
			F_INT4EQ,
			Int32GetDatum(segno));

	IndexScanDesc indexScan = index_beginscan(
			blkdirRel,
			blkdirIdx,
			snapshot,
			1,
			&scanKey);
	
	HeapTuple tuple = NULL;
	while ((tuple = index_getnext(indexScan, ForwardScanDirection)) != NULL)
	{
		simple_heap_delete(blkdirRel,
				&tuple->t_self);
	}
	index_endscan(indexScan);

	index_close(blkdirIdx, RowExclusiveLock);
	heap_close(blkdirRel, RowExclusiveLock);

}
예제 #28
0
/*
 * enum_endpoint: common code for enum_first/enum_last
 */
static Oid
enum_endpoint(Oid enumtypoid, ScanDirection direction)
{
	Relation	enum_rel;
	Relation	enum_idx;
	SysScanDesc enum_scan;
	HeapTuple	enum_tuple;
	ScanKeyData skey;
	Oid			minmax;

	/*
	 * Find the first/last enum member using pg_enum_typid_sortorder_index.
	 * Note we must not use the syscache, and must use an MVCC snapshot here.
	 * See comments for RenumberEnumType in catalog/pg_enum.c for more info.
	 */
	ScanKeyInit(&skey,
				Anum_pg_enum_enumtypid,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(enumtypoid));

	enum_rel = heap_open(EnumRelationId, AccessShareLock);
	enum_idx = index_open(EnumTypIdSortOrderIndexId, AccessShareLock);
	enum_scan = systable_beginscan_ordered(enum_rel, enum_idx,
										   GetTransactionSnapshot(),
										   1, &skey);

	enum_tuple = systable_getnext_ordered(enum_scan, direction);
	if (HeapTupleIsValid(enum_tuple))
		minmax = HeapTupleGetOid(enum_tuple);
	else
		minmax = InvalidOid;

	systable_endscan_ordered(enum_scan);
	index_close(enum_idx, AccessShareLock);
	heap_close(enum_rel, AccessShareLock);

	return minmax;
}
예제 #29
0
/*
 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
 *
 * One reason to call this routine is to ensure that the relcache has
 * created entries for all the catalogs and indexes referenced by catcaches.
 * Therefore, provide an option to open the index as well as fixing the
 * cache itself.  An exception is the indexes on pg_am, which we don't use
 * (cf. IndexScanOK).
 */
void
InitCatCachePhase2(CatCache *cache, bool touch_index)
{
	if (cache->cc_tupdesc == NULL)
		CatalogCacheInitializeCache(cache);

	if (touch_index &&
		cache->id != AMOID &&
		cache->id != AMNAME)
	{
		Relation	idesc;

		/*
		 * We must lock the underlying catalog before opening the index to
		 * avoid deadlock, since index_open could possibly result in reading
		 * this same catalog, and if anyone else is exclusive-locking this
		 * catalog and index they'll be doing it in that order.
		 */
		LockRelationOid(cache->cc_reloid, AccessShareLock);
		idesc = index_open(cache->cc_indexoid, AccessShareLock);
		index_close(idesc, AccessShareLock);
		UnlockRelationOid(cache->cc_reloid, AccessShareLock);
	}
}
예제 #30
0
void
rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
{
    Relation              LocalNewHeap, LocalOldHeap, LocalOldIndex;
    IndexScanDesc         ScanDesc;
    RetrieveIndexResult   ScanResult;
    ItemPointer           HeapTid;
    HeapTuple             LocalHeapTuple;
    Buffer                LocalBuffer;
    Oid              	  OIDNewHeapInsert;

    /*
     * Open the relations I need. Scan through the OldHeap on the OldIndex and
     * insert each tuple into the NewHeap.
     */
    LocalNewHeap=(Relation)heap_open(OIDNewHeap);
    LocalOldHeap=(Relation)heap_open(OIDOldHeap);
    LocalOldIndex=(Relation)index_open(OIDOldIndex);

    ScanDesc=index_beginscan(LocalOldIndex, false, 0, (ScanKey) NULL);

    while ((ScanResult =
	    index_getnext(ScanDesc, ForwardScanDirection)) != NULL) {

	HeapTid = &ScanResult->heap_iptr;
	LocalHeapTuple = heap_fetch(LocalOldHeap, 0, HeapTid, &LocalBuffer);
	OIDNewHeapInsert =
	    heap_insert(LocalNewHeap, LocalHeapTuple);
	pfree(ScanResult);
	ReleaseBuffer(LocalBuffer);
    }

    index_close(LocalOldIndex);
    heap_close(LocalOldHeap);
    heap_close(LocalNewHeap);
}