Exemple #1
0
/*
 * btgetbitmap() -- gets all matching tuples, and adds them to a bitmap
 */
Datum
btgetbitmap(PG_FUNCTION_ARGS)
{
	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
	TIDBitmap  *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
	BTScanOpaque so = (BTScanOpaque) scan->opaque;
	int64		ntids = 0;
	ItemPointer heapTid;

	/*
	 * If we have any array keys, initialize them.
	 */
	if (so->numArrayKeys)
	{
		/* punt if we have any unsatisfiable array keys */
		if (so->numArrayKeys < 0)
			PG_RETURN_INT64(ntids);

		_bt_start_array_keys(scan, ForwardScanDirection);
	}

	/* This loop handles advancing to the next array elements, if any */
	do
	{
		/* Fetch the first page & tuple */
		if (_bt_first(scan, ForwardScanDirection))
		{
			/* Save tuple ID, and continue scanning */
			heapTid = &scan->xs_ctup.t_self;
			tbm_add_tuples(tbm, heapTid, 1, false);
			ntids++;

			for (;;)
			{
				/*
				 * Advance to next tuple within page.  This is the same as the
				 * easy case in _bt_next().
				 */
				if (++so->currPos.itemIndex > so->currPos.lastItem)
				{
					/* let _bt_next do the heavy lifting */
					if (!_bt_next(scan, ForwardScanDirection))
						break;
				}

				/* Save tuple ID, and continue scanning */
				heapTid = &so->currPos.items[so->currPos.itemIndex].heapTid;
				tbm_add_tuples(tbm, heapTid, 1, false);
				ntids++;
			}
		}
		/* Now see if we have more array keys to deal with */
	} while (so->numArrayKeys && _bt_advance_array_keys(scan, ForwardScanDirection));

	PG_RETURN_INT64(ntids);
}
Exemple #2
0
/*
 *	btgettuple() -- Get the next tuple in the scan.
 */
Datum
btgettuple(PG_FUNCTION_ARGS)
{
	MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_DECLARE;

	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
	ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
	BTScanOpaque so = (BTScanOpaque) scan->opaque;
	bool		res;

	MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_ENTER;

	/*
	 * If we've already initialized this scan, we can just advance it in the
	 * appropriate direction.  If we haven't done so yet, we call a routine to
	 * get the first item in the scan.
	 */
	if (BTScanPosIsValid(so->currPos))
	{
		/*
		 * Check to see if we should kill the previously-fetched tuple.
		 */
		if (scan->kill_prior_tuple)
		{
			/*
			 * Yes, remember it for later.	(We'll deal with all such tuples
			 * at once right before leaving the index page.)  The test for
			 * numKilled overrun is not just paranoia: if the caller reverses
			 * direction in the indexscan then the same item might get entered
			 * multiple times.	It's not worth trying to optimize that, so we
			 * don't detect it, but instead just forget any excess entries.
			 */
			if (so->killedItems == NULL)
				so->killedItems = (int *)
					palloc(MaxIndexTuplesPerPage * sizeof(int));
			if (so->numKilled < MaxIndexTuplesPerPage)
				so->killedItems[so->numKilled++] = so->currPos.itemIndex;
		}

		/*
		 * Now continue the scan.
		 */
		res = _bt_next(scan, dir);
	}
	else
		res = _bt_first(scan, dir);

	MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_EXIT;

	PG_RETURN_BOOL(res);
}
Exemple #3
0
/*
 * btgetbitmap() -- gets all matching tuples, and adds them to a bitmap
 */
Datum
btgetbitmap(PG_FUNCTION_ARGS)
{
	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
	TIDBitmap  *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
	BTScanOpaque so = (BTScanOpaque) scan->opaque;
	int64		ntids = 0;
	ItemPointer heapTid;

	/* Fetch the first page & tuple. */
	if (!_bt_first(scan, ForwardScanDirection))
	{
		/* empty scan */
		PG_RETURN_INT64(0);
	}
	/* Save tuple ID, and continue scanning */
	heapTid = &scan->xs_ctup.t_self;
	tbm_add_tuples(tbm, heapTid, 1, false);
	ntids++;

	for (;;)
	{
		/*
		 * Advance to next tuple within page.  This is the same as the easy
		 * case in _bt_next().
		 */
		if (++so->currPos.itemIndex > so->currPos.lastItem)
		{
			/* let _bt_next do the heavy lifting */
			if (!_bt_next(scan, ForwardScanDirection))
				break;
		}

		/* Save tuple ID, and continue scanning */
		heapTid = &so->currPos.items[so->currPos.itemIndex].heapTid;
		tbm_add_tuples(tbm, heapTid, 1, false);
		ntids++;
	}

	PG_RETURN_INT64(ntids);
}
Exemple #4
0
/*
 *	btgettuple() -- Get the next tuple in the scan.
 */
bool
btgettuple(IndexScanDesc scan, ScanDirection dir)
{
	BTScanOpaque so = (BTScanOpaque) scan->opaque;
	bool		res;

	/* btree indexes are never lossy */
	scan->xs_recheck = false;

	/*
	 * If we have any array keys, initialize them during first call for a
	 * scan.  We can't do this in btrescan because we don't know the scan
	 * direction at that time.
	 */
	if (so->numArrayKeys && !BTScanPosIsValid(so->currPos))
	{
		/* punt if we have any unsatisfiable array keys */
		if (so->numArrayKeys < 0)
			return false;

		_bt_start_array_keys(scan, dir);
	}

	/* This loop handles advancing to the next array elements, if any */
	do
	{
		/*
		 * If we've already initialized this scan, we can just advance it in
		 * the appropriate direction.  If we haven't done so yet, we call
		 * _bt_first() to get the first item in the scan.
		 */
		if (!BTScanPosIsValid(so->currPos))
			res = _bt_first(scan, dir);
		else
		{
			/*
			 * Check to see if we should kill the previously-fetched tuple.
			 */
			if (scan->kill_prior_tuple)
			{
				/*
				 * Yes, remember it for later. (We'll deal with all such
				 * tuples at once right before leaving the index page.)  The
				 * test for numKilled overrun is not just paranoia: if the
				 * caller reverses direction in the indexscan then the same
				 * item might get entered multiple times. It's not worth
				 * trying to optimize that, so we don't detect it, but instead
				 * just forget any excess entries.
				 */
				if (so->killedItems == NULL)
					so->killedItems = (int *)
						palloc(MaxIndexTuplesPerPage * sizeof(int));
				if (so->numKilled < MaxIndexTuplesPerPage)
					so->killedItems[so->numKilled++] = so->currPos.itemIndex;
			}

			/*
			 * Now continue the scan.
			 */
			res = _bt_next(scan, dir);
		}

		/* If we have a tuple, return it ... */
		if (res)
			break;
		/* ... otherwise see if we have more array keys to deal with */
	} while (so->numArrayKeys && _bt_advance_array_keys(scan, dir));

	return res;
}
Exemple #5
0
/*
 * btgetmulti() -- construct a HashBitmap.
 */
Datum
btgetmulti(PG_FUNCTION_ARGS)
{
	MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_DECLARE;

	IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
	Node *n = (Node *)PG_GETARG_POINTER(1);
	HashBitmap	*hashBitmap;

	BTScanOpaque so = (BTScanOpaque) scan->opaque;
	bool		res = true;

	MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_ENTER;

	if (n == NULL || IsA(n, StreamBitmap))
	{
		/* XXX should we use less than work_mem for this? */
		hashBitmap = tbm_create(work_mem * 1024L);
	}
	else
	{
		hashBitmap = (HashBitmap *)n;
	}
	/* If we haven't started the scan yet, fetch the first page & tuple. */
	if (!BTScanPosIsValid(so->currPos))
	{
		res = _bt_first(scan, ForwardScanDirection);
		if (res)
		{
			/* Save tuple ID, and continue scanning */
			tbm_add_tuples(hashBitmap, &(scan->xs_ctup.t_self), 1);
		}
	}

	while (res)
	{
		/*
		 * Advance to next tuple within page.  This is the same as the
		 * easy case in _bt_next().
		 */
		if (++so->currPos.itemIndex > so->currPos.lastItem)
		{
			/* let _bt_next do the heavy lifting */
			res = _bt_next(scan, ForwardScanDirection);
			if (!res)
				break;
		}

		/* Save tuple ID, and continue scanning */
		tbm_add_tuples(hashBitmap,
					   &(so->currPos.items[so->currPos.itemIndex].heapTid),
					   1);
	}

	if(n && IsA(n, StreamBitmap))
	{
		stream_add_node((StreamBitmap *)n,
			tbm_create_stream_node(hashBitmap), BMS_OR);
		PG_RETURN_POINTER(n);
	}

	MIRROREDLOCK_BUFMGR_VERIFY_NO_LOCK_LEAK_EXIT;

	PG_RETURN_POINTER(hashBitmap);
}