Exemple #1
0
/*
 * bt_mk_scankey
 *		Build an insertion scan key that contains comparison data from itup
 *		as well as comparator routines appropriate to the key datatypes.
 *
 *		The result is intended for use with bt_compare().
 */
struct scankey *bt_mk_scankey(struct relation* rel, struct index_tuple* itup)
{
	struct scankey *skey;
	struct tuple *itupdesc;
	int natts;
	int16 *indoption;
	int i;

	itupdesc = REL_DESC(rel);
	natts = REL_GET_NR_ATTR(rel);
	indoption = rel->rd_indoption;

	skey = (struct scankey *)palloc(natts * sizeof(struct scankey));
	for (i = 0; i < natts; i++) {
		struct fmgr_info *procinfo;
		datum_t arg;
		bool null;
		int flags;

		/*
		 * We can use the cached (default) support procs since no cross-type
		 * comparison can be needed.
		 */
		procinfo = index_getprocinfo(rel, i + 1, BT_ORDER_PROC);
		arg = index_getattr(itup, i + 1, itupdesc, &null);
		flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDEX_OPT_SHIFT);
		scankey_init_info(&skey[i],
			flags,
			(attr_nr_t)(i + 1),
			INVALID_STRAT,
			INVALID_OID,
			rel->rd_indcollation[i],
			procinfo,
			arg);
	}

	return skey;
}
Exemple #2
0
/* ----------------------------------------------------------------
 *		ExecInitForeignScan
 * ----------------------------------------------------------------
 */
foreign_ss *
ExecInitForeignScan(foreign_scan_sc *node, exec_state_n *estate, int eflags)
{
	foreign_ss* scanstate;
	struct relation* currentRelation;
	FdwRoutine *fdwroutine;

	/* check for unsupported flags */
	ASSERT(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));

	/*
	 * create state structure
	 */
	scanstate = MK_N(ForeignScanState,foreign_ss);
	scanstate->ss.ps.plan = (plan_n *) node;
	scanstate->ss.ps.state = estate;

	/*
	 * Miscellaneous initialization
	 *
	 * create expression context for node
	 */
	ExecAssignExprContext(estate, &scanstate->ss.ps);
	scanstate->ss.ps.ps_TupFromTlist = false;

	/*
	 * initialize child expressions
	 */
	scanstate->ss.ps.targetlist = (struct list *) exec_init_expr(
						(expr_n *) node->scan.plan.targetlist,
						(plan_state_n *) scanstate);
	scanstate->ss.ps.qual = (struct list *) exec_init_expr(
						(expr_n *) node->scan.plan.qual,
						(plan_state_n *) scanstate);

	/*
	 * tuple table initialization
	 */
	exec_init_result_tupslot(estate, &scanstate->ss.ps);
	exec_init_scan_tupslot(estate, &scanstate->ss);

	/*
	 * open the base relation and acquire appropriate lock on it.
	 */
	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid);
	scanstate->ss.ss_currentRelation = currentRelation;

	/*
	 * get the scan type from the relation descriptor.
	 */
	ExecAssignScanType(&scanstate->ss, REL_DESC(currentRelation));

	/*
	 * Initialize result tuple type and projection info.
	 */
	ExecAssignResultTypeFromTL(&scanstate->ss.ps);
	ExecAssignScanProjectionInfo(&scanstate->ss);

	/*
	 * Acquire function pointers from the FDW's handler, and init fdw_state.
	 */
	fdwroutine = GetFdwRoutineByRelId(REL_ID(currentRelation));
	scanstate->fdwroutine = fdwroutine;
	scanstate->fdw_state = NULL;

	/*
	 * Tell the FDW to initiate the scan.
	 */
	fdwroutine->BeginForeignScan(scanstate, eflags);

	return scanstate;
}
Exemple #3
0
/*
 * unique_key_recheck - trigger function to do a deferred uniqueness check.
 *
 * This now also does deferred exclusion-constraint checks, so the name is
 * somewhat historical.
 *
 * This is invoked as an AFTER ROW trigger for both INSERT and UPDATE,
 * for any rows recorded as potentially violating a deferrable unique
 * or exclusion constraint.
 *
 * This may be an end-of-statement check, a commit-time check, or a
 * check triggered by a SET CONSTRAINTS command.
 */
datum_t unique_key_recheck(PG_FUNC_ARGS)
{
	TriggerData *trigdata = (TriggerData *) fcinfo->context;
	const char *funcname = "unique_key_recheck";
	struct heap_tuple * new_row;
	struct item_ptr tmptid;
	struct relation * indexRel;
	index_info_n *indexInfo;
	exec_state_n *estate;
	expr_ctx_n *econtext;
	struct tupslot *slot;
	datum_t values[INDEX_MAX_KEYS];
	bool isnull[INDEX_MAX_KEYS];

	/*
	 * Make sure this is being called as an AFTER ROW trigger.      Note:
	 * translatable error strings are shared with ri_triggers.c, so resist the
	 * temptation to fold the function name into them.
	 */
	if (!CALLED_AS_TRIGGER(fcinfo))
		ereport(ERROR, (
		errcode(E_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
		errmsg("function \"%s\" was not called by trigger manager",
			funcname)));

	if (!TRIGGER_FIRED_AFTER(trigdata->tg_event)
		|| !TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
		ereport(ERROR, (
		errcode(E_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
		errmsg("function \"%s\" must be fired AFTER ROW",
			funcname)));

	/*
	 * Get the new data that was inserted/updated.
	 */
	if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
		new_row = trigdata->tg_trigtuple;
	else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
		new_row = trigdata->tg_newtuple;
	else {
		ereport(ERROR, (
		errcode(E_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
		errmsg("function \"%s\" must be fired for INSERT or UPDATE",
			funcname)));
		new_row = NULL;	/* keep compiler quiet */
	}

	/*
	 * If the new_row is now dead (ie, inserted and then deleted within our
	 * transaction), we can skip the check.  However, we have to be careful,
	 * because this trigger gets queued only in response to index insertions;
	 * which means it does not get queued for HOT updates.  The row we are
	 * called for might now be dead, but have a live HOT child, in which case
	 * we still need to make the check.  Therefore we have to use
	 * heap_hot_search, not just HEAPTUP_VISIBILITY as is done in
	 * the comparable test in RI_FKey_check.
	 *
	 * This might look like just an optimization, because the index AM will
	 * make this identical test before throwing an error.  But it's actually
	 * needed for correctness, because the index AM will also throw an error
	 * if it doesn't find the index entry for the row.  If the row's dead then
	 * it's possible the index entry has also been marked dead, and even
	 * removed.
	 */
	tmptid = new_row->t_self;
	if (!heap_hot_search(&tmptid, trigdata->tg_relation, snap_self, NULL)) {
		/*
		 * All rows in the HOT chain are dead, so skip the check.
		 */
		return PTR_TO_D(NULL);
	}

	/*
	 * Open the index, acquiring a ROW_EXCL_LOCK, just as if we were going
	 * to update it.  (This protects against possible changes of the index
	 * schema, not against concurrent updates.)
	 */
	indexRel = index_open(trigdata->tg_trigger->tgconstrindid, ROW_EXCL_LOCK);
	indexInfo = build_index_info(indexRel);

	/*
	 * The heap tuple must be put into a slot for form_index_datum.
	 */
	slot = make_single_tupslot(REL_DESC(trigdata->tg_relation));
	exec_store_tuple(new_row, slot, INVALID_BUF, false);

	/*
	 * Typically the index won't have expressions, but if it does we need an
	 * exec_state_n to evaluate them.  We need it for exclusion constraints too,
	 * even if they are just on simple columns.
	 */
	if (indexInfo->ii_Expressions != NIL
		|| indexInfo->ii_ExclusionOps != NULL) {
		estate = create_exec_state();
		econtext = get_per_tup_expr_ctx(estate);
		econtext->ecxt_scantuple = slot;
	} else {
		estate = NULL;
	}

	/*
	 * Form the index values and isnull flags for the index entry that we need
	 * to check.
	 *
	 * Note: if the index uses functions that are not as immutable as they are
	 * supposed to be, this could produce an index tuple different from the
	 * original.  The index AM can catch such errors by verifying that it
	 * finds a matching index entry with the tuple's TID.  For exclusion
	 * constraints we check this in check_exclusion_constraint().
	 */
	form_index_datum(indexInfo, slot, estate, values, isnull);

	/*
	 * Now do the appropriate check.
	 */
	if (indexInfo->ii_ExclusionOps == NULL) {
		/*
		 * Note: this is not a real insert; it is a check that the index entry
		 * that has already been inserted is unique.
		 */
		index_insert(
			indexRel,
			values,
			isnull,
			&(new_row->t_self),
			trigdata->tg_relation,
			UNIQUE_CHECK_EXISTING);
	} else {
		/*
		 * For exclusion constraints we just do the normal check, but now it's
		 * okay to throw error.
		 */
		check_exclusion_constraint(
			trigdata->tg_relation,
			indexRel,
			indexInfo,
			&(new_row->t_self),
			values,
			isnull,
			estate,
			false,
			false);
	}

	/*
	 * If that worked, then this index entry is unique or non-excluded, and we
	 * are done.
	 */
	if (estate != NULL)
		free_exec_state(estate);

	exec_drop_single_tupslot(slot);
	index_close(indexRel, ROW_EXCL_LOCK);

	return PTR_TO_D(NULL);
}
Exemple #4
0
void
inv_truncate(struct lobj *obj_desc, int len)
{
	int32 pageno = (int32) (len / LO_BLK_SIZE);
	int off;
	struct scankey skey[2];
	struct sys_scan* sd;
	struct heap_tuple* oldtuple;
	Form_pg_largeobject olddata;
	struct {
		bytea	hdr;
		char	data[LO_BLK_SIZE];	/* make struct big enough */
		int32	align_it;/* ensure struct is aligned well enough */
	} workbuf;
	char* workb = VLA_DATA(&workbuf.hdr);
	struct heap_tuple* newtup;
	datum_t	values[Natts_pg_largeobject];
	bool nulls[Natts_pg_largeobject];
	bool replace[Natts_pg_largeobject];
	CatalogIndexState indstate;

	ASSERT(PTR_VALID(obj_desc));

	/* enforce writability because snapshot is probably wrong otherwise */
	if ((obj_desc->flags & IFS_WRLOCK) == 0)
		ereport(ERROR, (
		errcode(E_OBJECT_NOT_IN_PREREQUISITE_STATE),
		errmsg("large object %u was not opened for writing",
			obj_desc->id)));

	/* check existence of the target largeobject */
	if (!large_obj_exists(obj_desc->id))
		ereport(ERROR, (
		errcode(E_UNDEFINED_OBJECT),
		errmsg("large object %u was already dropped", obj_desc->id)));

	open_lo_relation();
	indstate = cat_open_indexes(lo_heap_r);

	/*
	 * Set up to find all pages with desired loid and pageno >= target
	 */
	scankey_init(&skey[0], Anum_pg_largeobject_loid, BT_EQ_STRAT_NR, F_OIDEQ, OID_TO_D(obj_desc->id));
	scankey_init(&skey[1], Anum_pg_largeobject_pageno, BT_GE_STRAT_NR, F_INT4GE, INT32_TO_D(pageno));

	sd = systable_beginscan_ordered(lo_heap_r, lo_index_r, obj_desc->snapshot, 2, skey);

	/*
	 * If possible, get the page the truncation point is in. The truncation
	 * point may be beyond the end of the LO or in a hole.
	 */
	olddata = NULL;
	if ((oldtuple = systable_getnext_ordered(sd, FORWARD_SCANDIR)) != NULL) {
		if (HT_HAS_NULLS(oldtuple))		/* paranoia */
			elog(ERROR, "null field found in pg_largeobject");

		olddata = (Form_pg_largeobject) GET_STRUCT(oldtuple);
		ASSERT(olddata->pageno >= pageno);
	}

	/*
	 * If we found the page of the truncation point we need to truncate the
	 * data in it.	Otherwise if we're in a hole, we need to create a page to
	 * mark the end of data.
	 */
	if (olddata != NULL && olddata->pageno == pageno) {
		/* First, load old data into workbuf */
		bytea* datafield = &(olddata->data);	/* see note at top of file */
		bool pfreeit = false;
		int pagelen;

		if (VLA_EXTENDED(datafield)) {
			datafield = (bytea *) heap_tuple_untoast_attr((struct vla *) datafield);
			pfreeit = true;
		}

		pagelen = getbytealen(datafield);
		ASSERT(pagelen <= LO_BLK_SIZE);
		memcpy(workb, VLA_DATA(datafield), pagelen);
		if (pfreeit)
			pfree(datafield);

		/*
		 * Fill any hole
		 */
		off = len % LO_BLK_SIZE;
		if (off > pagelen)
			pg_memset(workb + pagelen, 0, off - pagelen);

		/* compute length of new page */
		VLA_SET_SZ_STND(&workbuf.hdr, off + VAR_HDR_SZ);

		/*
		 * Form and insert updated tuple
		 */
		memset(values, 0, sizeof(values));
		memset(nulls, false, sizeof(nulls));
		memset(replace, false, sizeof(replace));

		values[Anum_pg_largeobject_data - 1] = PTR_TO_D(&workbuf);
		replace[Anum_pg_largeobject_data - 1] = true;

		newtup = heap_modify_tuple(oldtuple, REL_DESC(lo_heap_r), values, nulls, replace);
		simple_heap_update(lo_heap_r, &newtup->t_self, newtup);
		cat_index_insert(indstate, newtup);
		heap_free_tuple(newtup);
	} else {
		/*
		 * If the first page we found was after the truncation point, we're in
		 * a hole that we'll fill, but we need to delete the later page
		 * because the loop below won't visit it again.
		 */
		if (olddata != NULL) {
			ASSERT(olddata->pageno > pageno);
			simple_heap_delete(lo_heap_r, &oldtuple->t_self);
		}

		/*
		 * Write a brand new page.
		 *
		 * Fill the hole up to the truncation point
		 */
		off = len % LO_BLK_SIZE;
		if (off > 0)
			pg_memset(workb, 0, off);

		/* compute length of new page */
		VLA_SET_SZ_STND(&workbuf.hdr, off + VAR_HDR_SZ);

		/*
		 * Form and insert new tuple
		 */
		memset(values, 0, sizeof(values));
		memset(nulls, false, sizeof(nulls));

		values[Anum_pg_largeobject_loid - 1] = OID_TO_D(obj_desc->id);
		values[Anum_pg_largeobject_pageno - 1] = INT32_TO_D(pageno);
		values[Anum_pg_largeobject_data - 1] = PTR_TO_D(&workbuf);

		newtup = heap_form_tuple(lo_heap_r->rd_att, values, nulls);
		simple_heap_insert(lo_heap_r, newtup);
		cat_index_insert(indstate, newtup);
		heap_free_tuple(newtup);
	}

	/*
	 * Delete any pages after the truncation point.  If the initial search
	 * didn't find a page, then of course there's nothing more to do.
	 */
	if (olddata != NULL) {
		while ((oldtuple = systable_getnext_ordered(sd, FORWARD_SCANDIR)) != NULL) {
			simple_heap_delete(lo_heap_r, &oldtuple->t_self);
		}
	}

	systable_endscan_ordered(sd);
	cat_close_indexes(indstate);

	/*
	 * Advance command counter so that tuple updates will be seen by later
	 * large-object operations in this transaction.
	 */
	cmd_count_incr();
}
Exemple #5
0
int
inv_write(struct lobj *obj_desc, const char *buf, int nbytes)
{
	int nwritten = 0;
	int n;
	int off;
	int len;
	int32	pageno = (int32) (obj_desc->offset / LO_BLK_SIZE);
	struct scankey skey[2];
	struct sys_scan * sd;
	struct heap_tuple *	oldtuple;
	Form_pg_largeobject olddata;
	bool	neednextpage;
	bytea* datafield;
	bool pfreeit;
	struct {
		bytea	hdr;
		char	data[LO_BLK_SIZE];	/* make struct big enough */
		int32	align_it;	/* ensure struct is aligned well enough */
	} workbuf;
	char* workb = VLA_DATA(&workbuf.hdr);
	struct heap_tuple* newtup;
	datum_t	values[Natts_pg_largeobject];
	bool nulls[Natts_pg_largeobject];
	bool replace[Natts_pg_largeobject];
	CatalogIndexState indstate;

	ASSERT(PTR_VALID(obj_desc));
	ASSERT(buf != NULL);

	/* enforce writability because snapshot is probably wrong otherwise */
	if ((obj_desc->flags & IFS_WRLOCK) == 0)
		ereport(ERROR, (
		errcode(E_OBJECT_NOT_IN_PREREQUISITE_STATE),
		errmsg("large object %u was not opened for writing",
			obj_desc->id)));

	/* check existence of the target largeobject */
	if (!large_obj_exists(obj_desc->id))
		ereport(ERROR, (
		errcode(E_UNDEFINED_OBJECT),
		errmsg("large object %u was already dropped", obj_desc->id)));

	if (nbytes <= 0)
		return 0;

	open_lo_relation();
	indstate = cat_open_indexes(lo_heap_r);

	scankey_init(&skey[0], Anum_pg_largeobject_loid, BT_EQ_STRAT_NR, F_OIDEQ, OID_TO_D(obj_desc->id));
	scankey_init(&skey[1], Anum_pg_largeobject_pageno, BT_GE_STRAT_NR, F_INT4GE, INT32_TO_D(pageno));

	sd = systable_beginscan_ordered(lo_heap_r, lo_index_r, obj_desc->snapshot, 2, skey);
	oldtuple = NULL;
	olddata = NULL;
	neednextpage = true;

	while (nwritten < nbytes) {
		/*
		 * If possible, get next pre-existing page of the LO.  We expect the
		 * indexscan will deliver these in order --- but there may be holes.
		 */
		if (neednextpage) {
			if ((oldtuple = systable_getnext_ordered(sd, FORWARD_SCANDIR)) != NULL) {
				if (HT_HAS_NULLS(oldtuple))		/* paranoia */
					elog(ERROR, "null field found in pg_largeobject");

				olddata = (Form_pg_largeobject) GET_STRUCT(oldtuple);
				ASSERT(olddata->pageno >= pageno);
			}

			neednextpage = false;
		}

		/*
		 * If we have a pre-existing page, see if it is the page we want to
		 * write, or a later one.
		 */
		if (olddata != NULL && olddata->pageno == pageno) {
			/*
			 * Update an existing page with fresh data.
			 *
			 * First, load old data into workbuf
			 */
			datafield = &(olddata->data);		/* see note at top of file */
			pfreeit = false;
			if (VLA_EXTENDED(datafield)) {
				datafield = (bytea *)
					heap_tuple_untoast_attr((struct vla *) datafield);
				pfreeit = true;
			}

			len = getbytealen(datafield);
			ASSERT(len <= LO_BLK_SIZE);
			memcpy(workb, VLA_DATA(datafield), len);
			if (pfreeit)
				pfree(datafield);

			/*
			 * Fill any hole
			 */
			off = (int)(obj_desc->offset % LO_BLK_SIZE);
			if (off > len)
				pg_memset(workb + len, 0, off - len);

			/*
			 * Insert appropriate portion of new data
			 */
			n = LO_BLK_SIZE - off;
			n = (n <= (nbytes - nwritten))? n : (nbytes - nwritten);
			memcpy(workb + off, buf + nwritten, n);
			nwritten += n;
			obj_desc->offset += n;
			off += n;

			/* compute valid length of new page */
			len = (len >= off) ? len : off;
			VLA_SET_SZ_STND(&workbuf.hdr, len + VAR_HDR_SZ);

			/*
			 * Form and insert updated tuple
			 */
			memset(values, 0, sizeof(values));
			memset(nulls, false, sizeof(nulls));
			memset(replace, false, sizeof(replace));

			values[Anum_pg_largeobject_data - 1] =  PTR_TO_D(&workbuf);
			replace[Anum_pg_largeobject_data - 1] = true;

			newtup = heap_modify_tuple(oldtuple, REL_DESC(lo_heap_r), values, nulls, replace);
			simple_heap_update(lo_heap_r, &newtup->t_self, newtup);
			cat_index_insert(indstate, newtup);
			heap_free_tuple(newtup);

			/*
			 * We're done with this old page.
			 */
			oldtuple = NULL;
			olddata = NULL;
			neednextpage = true;
		} else {
			/*
			 * Write a brand new page.
			 *
			 * First, fill any hole
			 */
			off = (int)(obj_desc->offset % LO_BLK_SIZE);
			if (off > 0)
				pg_memset(workb, 0, off);

			/*
			 * Insert appropriate portion of new data
			 */
			n = LO_BLK_SIZE - off;
			n = (n <= (nbytes - nwritten))? n : (nbytes - nwritten);
			memcpy(workb + off, buf + nwritten, n);
			nwritten += n;
			obj_desc->offset += n;

			/* compute valid length of new page */
			len = off + n;
			VLA_SET_SZ_STND(&workbuf.hdr, len + VAR_HDR_SZ);

			/*
			 * Form and insert updated tuple
			 */
			memset(values, 0, sizeof(values));
			memset(nulls, false, sizeof(nulls));

			values[Anum_pg_largeobject_loid - 1] = OID_TO_D(obj_desc->id);
			values[Anum_pg_largeobject_pageno - 1] = INT32_TO_D(pageno);
			values[Anum_pg_largeobject_data - 1] = PTR_TO_D(&workbuf);

			newtup = heap_form_tuple(lo_heap_r->rd_att, values, nulls);
			simple_heap_insert(lo_heap_r, newtup);
			cat_index_insert(indstate, newtup);
			heap_free_tuple(newtup);
		}

		pageno++;
	}

	systable_endscan_ordered(sd);
	cat_close_indexes(indstate);

	/*
	 * Advance command counter so that my tuple updates will be seen by 
	 * later large-object operations in this transaction.
	 */
	cmd_count_incr();

	return nwritten;
}
Exemple #6
0
/*
 * Test whether an indextuple satisfies all the scankey conditions.
 *
 * If so, copy its TID into scan->xs_ctup.t_self, and return TRUE.
 * If not, return FALSE (xs_ctup is not changed).
 *
 * If the tuple fails to pass the qual, we also determine whether there's
 * any need to continue the scan beyond this tuple, and set *continuescan
 * accordingly.  See comments for bt_preproc_keys(), above, about how
 * this is done.
 *
 * scan: index scan descriptor (containing a search-type scankey)
 * page: buffer page containing index tuple
 * offnum: offset number of index tuple (must be a valid item!)
 * dir: direction we are scanning in
 * continuescan: output parameter (will be set correctly in all cases)
 */
bool
bt_check_keys(
	struct index_scan* scan,
	page_p page,
	item_id_t offnum,
	enum scandir dir,
	bool* continuescan)
{
	struct item_id *iid;
	bool tuple_valid;
	struct index_tuple *tuple;
	struct tuple *tupdesc;
	struct bt_scan_opaque *so;
	int keysz;
	int ikey;
	struct scankey *key;

	iid = PAGE_ITEM_ID(page, offnum);
	*continuescan = true;	/* default assumption */

	/*
	 * If the scan specifies not to return killed tuples, then we treat a
	 * killed tuple as not passing the qual.  Most of the time, it's a win to
	 * not bother examining the tuple's index keys, but just return
	 * immediately with continuescan = true to proceed to the next tuple.
	 * However, if this is the last tuple on the page, we should check the
	 * index keys to prevent uselessly advancing to the next page.
	 */
	if (scan->ignore_killed_tuples && ITEMID_DEAD(iid)) {
		/* return immediately if there are more tuples on the page */
		if (SCANDIR_FORWARD(dir)) {
			if (offnum < PAGE_MAX_ITEM_ID(page))
				return false;
		} else {
			struct bt_page_opaque *opaque;

			opaque = (struct bt_page_opaque *) PAGE_SPECIAL_PTR(page);
			if (offnum > P_FIRSTDATAKEY(opaque))
				return false;
		}

		/*
		 * OK, we want to check the keys, but we'll return FALSE even if the
		 * tuple passes the key tests.
		 */
		tuple_valid = false;
	} else
		tuple_valid = true;

	tuple = (struct index_tuple*) PAGE_GET_ITEM(page, iid);
	tupdesc = REL_DESC(scan->indexRelation);
	so = (struct bt_scan_opaque*) scan->opaque;
	keysz = so->numberOfKeys;

	for (key = so->keyData, ikey = 0; ikey < keysz; key++, ikey++) {
		datum_t datum;
		bool isNull;
		datum_t test;

		/* row-comparison keys need special processing */
		if (key->sk_flags & SK_ROW_HEADER) {
			if (bt_check_rowcompare(key, tuple, tupdesc, dir, continuescan))
				continue;

			return false;
		}

		datum = index_getattr(tuple, key->sk_attno, tupdesc, &isNull);
		if (key->sk_flags & SK_ISNULL) {
			/* Handle IS NULL/NOT NULL tests */
			if (key->sk_flags & SK_SEARCHNULL) {
				if (isNull)
					continue;	/* tuple satisfies this qual */
			} else {
				ASSERT(key->sk_flags & SK_SEARCHNOTNULL);
				if (!isNull)
					continue;	/* tuple satisfies this qual */
			}

			/*
			 * Tuple fails this qual.  If it's a required qual for the current
			 * scan direction, then we can conclude no further tuples will
			 * pass, either.
			 */
			if ((key->sk_flags & SK_BT_REQFWD)
				&& SCANDIR_FORWARD(dir))
				*continuescan = false;
			else if ((key->sk_flags & SK_BT_REQBKWD)
				&& SCANDIR_BACKWARD(dir))
				*continuescan = false;

			/*
			 * In any case, this indextuple doesn't match the qual.
			 */
			return false;
		}

		if (isNull) {
			if (key->sk_flags & SK_BT_NULLS_FIRST) {
				/*
				 * Since NULLs are sorted before non-NULLs, we know we have
				 * reached the lower limit of the range of values for this
				 * index attr.  On a backward scan, we can stop if this qual
				 * is one of the "must match" subset.  On a forward scan,
				 * however, we should keep going.
				 */
				if ((key->sk_flags & SK_BT_REQBKWD)
					&& SCANDIR_BACKWARD(dir))
					*continuescan = false;
			} else {
				/*
				 * Since NULLs are sorted after non-NULLs, we know we have
				 * reached the upper limit of the range of values for this
				 * index attr.  On a forward scan, we can stop if this qual is
				 * one of the "must match" subset.      On a backward scan,
				 * however, we should keep going.
				 */
				if ((key->sk_flags & SK_BT_REQFWD)
					&& SCANDIR_FORWARD(dir))
					*continuescan = false;
			}

			/*
			 * In any case, this indextuple doesn't match the qual.
			 */
			return false;
		}

		test = fc_2coll(&key->sk_func, key->sk_collation, datum, key->sk_argument);
		if (!D_TO_BOOL(test)) {
			/*
			 * Tuple fails this qual.  If it's a required qual for the current
			 * scan direction, then we can conclude no further tuples will
			 * pass, either.
			 *
			 * Note: because we stop the scan as soon as any required equality
			 * qual fails, it is critical that equality quals be used for the
			 * initial positioning in _bt_first() when they are available. See
			 * comments in _bt_first().
			 */
			if ((key->sk_flags & SK_BT_REQFWD)
				&& SCANDIR_FORWARD(dir))
				*continuescan = false;
			else if ((key->sk_flags & SK_BT_REQBKWD)
				&& SCANDIR_BACKWARD(dir))
				*continuescan = false;

			/*
			 * In any case, this indextuple doesn't match the qual.
			 */
			return false;
		}
	}

	/* If we get here, the tuple passes all index quals. */
	if (tuple_valid)
		scan->xs_ctup.t_self = tuple->t_tid;

	return tuple_valid;
}
Exemple #7
0
/* ----------------------------------------------------------------
 *		procedure_create
 *
 * Note: allParameterTypes, parameterModes, parameterNames, and proconfig
 * are either arrays of the proper types or NULL.  We declare them Datum,
 * not "ArrayType *", to avoid importing array.h into pg_proc_fn.h.
 * ----------------------------------------------------------------
 */
oid_t
procedure_create(
	const char *procedureName,
	oid_t procNamespace,
	bool replace,
	bool returnsSet,
	oid_t returnType,
	oid_t languageObjectId,
	oid_t languageValidator,
	const char *prosrc,
	const char *probin,
	bool isAgg,
	bool isWindowFunc,
	bool security_definer,
	bool isStrict,
	char volatility,
	oid_vector_s *parameterTypes,
	datum_t allParameterTypes,
	datum_t parameterModes,
	datum_t parameterNames,
	struct list *parameterDefaults,
	datum_t proconfig,
	float4 procost,
	float4 prorows)
{
	oid_t retval;
	int parameterCount;
	int allParamCount;
	oid_t* allParams;
	bool genericInParam = false;
	bool genericOutParam = false;
	bool internalInParam = false;
	bool internalOutParam = false;
	oid_t variadicType = INVALID_OID;
	oid_t proowner = get_uid();
	acl_s* proacl = NULL;
	struct relation* rel;
	struct heap_tuple* tup;
	struct heap_tuple* oldtup;
	bool nulls[Natts_pg_proc];
	datum_t	values[Natts_pg_proc];
	bool replaces[Natts_pg_proc];
	oid_t relid;
	struct name procname;
	struct tuple* tupDesc;
	bool is_update;
	struct objaddr myself;
	struct objaddr referenced;
	int i;

	/*
	 * sanity checks
	 */
	ASSERT(PTR_VALID(prosrc));

	parameterCount = parameterTypes->dim1;
	if (parameterCount < 0 || parameterCount > FUNC_MAX_ARGS) {
		ereport(ERROR, (
		errcode(E_TOO_MANY_ARGUMENTS),
		errmsg_plural("functions cannot have more than %d argument",
			"functions cannot have more than %d arguments",
			FUNC_MAX_ARGS,
			FUNC_MAX_ARGS)));
	}

	/* note: the above is correct, we do NOT count output arguments */
	if (allParameterTypes != PTR_TO_D(NULL)) {
		/*
		 * We expect the array to be a 1-D OID array; verify that. We don't
		 * need to use deconstruct_array() since the array data is just going
		 * to look like a C array of OID values.
		 */
		array_s *allParamArray;

		allParamArray = (array_s*) D_TO_PTR(allParameterTypes);
		allParamCount = ARR_DIMS(allParamArray)[0];
		if (ARR_NDIM(allParamArray) != 1
			|| allParamCount <= 0
			|| ARR_HASNULL(allParamArray)
			|| ARR_ELEMTYPE(allParamArray) != OIDOID)
			elog(ERROR, "allParameterTypes is not a 1-D oid_t array");

		allParams = (oid_t*) ARR_DATA_PTR(allParamArray);
		ASSERT(allParamCount >= parameterCount);
		/* we assume caller got the contents right */
	} else {
		allParamCount = parameterCount;
		allParams = parameterTypes->values;
	}

	/*
	 * Do not allow polymorphic return type unless at least one input argument
	 * is polymorphic.	Also, do not allow return type INTERNAL unless at
	 * least one input argument is INTERNAL.
	 */
	for (i = 0; i < parameterCount; i++) {
		switch (parameterTypes->values[i]) {
		case ANYARRAYOID:
		case ANYELEMENTOID:
		case ANYNONARRAYOID:
		case ANYENUMOID:
			genericInParam = true;
			break;

		case INTERNALOID:
			internalInParam = true;
			break;
		}
	}

	if (allParameterTypes != PTR_TO_D(NULL)) {
		for (i = 0; i < allParamCount; i++) {
			/*
			 * We don't bother to distinguish input and output params here, so
			 * if there is, say, just an input INTERNAL param then we will
			 * still set internalOutParam.	This is OK since we don't really
			 * care.
			 */
			switch (allParams[i]) {
			case ANYARRAYOID:
			case ANYELEMENTOID:
			case ANYNONARRAYOID:
			case ANYENUMOID:
				genericOutParam = true;
				break;

			case INTERNALOID:
				internalOutParam = true;
				break;
			}
		}
	}

	if ((is_polymorphic_type(returnType) || genericOutParam)
		&& !genericInParam) {
		ereport(ERROR, (
		errcode(E_INVALID_FUNCTION_DEFINITION),
		errmsg("cannot determine result data type"),
		errdetail("A function returning a polymorphic type must have"
			" at least one polymorphic argument.")));
	}

	if ((returnType == INTERNALOID || internalOutParam)
		&& !internalInParam) {
		ereport(ERROR, (
		errcode(E_INVALID_FUNCTION_DEFINITION),
		errmsg("unsafe use of pseudo-type \"internal\""),
		errdetail("A function returning \"internal\" must have at"
			" least one \"internal\" argument.")));
	}

	/*
	 * don't allow functions of complex types that have the same name as
	 * existing attributes of the type
	 */
	if (parameterCount == 1
		&& OID_VALID(parameterTypes->values[0])
		&& (relid = typeid_to_relid(parameterTypes->values[0])) != INVALID_OID
		&& get_attnum(relid, procedureName) != INVALID_ATTR_NR) {
		ereport(ERROR, (
		errcode(E_DUPLICATE_COLUMN),
		errmsg("\"%s\" is already an attribute of type %s",
			procedureName,
			format_type_be(parameterTypes->values[0]))));
	}

	if (parameterModes != PTR_TO_D(NULL)) {
		/*
		 * We expect the array to be a 1-D CHAR array; verify that. We don't
		 * need to use deconstruct_array() since the array data is just going
		 * to look like a C array of char values.
		 */
		array_s* modesArray;
		char* modes;

		modesArray = (array_s *) D_TO_PTR(parameterModes);
		if (ARR_NDIM(modesArray) != 1
			|| ARR_DIMS(modesArray)[0] != allParamCount
			|| ARR_HASNULL(modesArray)
			|| ARR_ELEMTYPE(modesArray) != CHAROID)
			elog(ERROR, "parameterModes is not a 1-D char array");

		modes = (char*) ARR_DATA_PTR(modesArray);

		/*
		 * Only the last input parameter can be variadic; if it is, save its
		 * element type.  Errors here are just elog since caller should have
		 * checked this already.
		 */
		for (i = 0; i < allParamCount; i++) {
			switch (modes[i]) {
			case PROARGMODE_IN:
			case PROARGMODE_INOUT:
				if (OID_VALID(variadicType))
					elog(ERROR, "variadic parameter must be last");
				break;

			case PROARGMODE_OUT:
			case PROARGMODE_TABLE:
				/* okay */
				break;

			case PROARGMODE_VARIADIC:
				if (OID_VALID(variadicType))
					elog(ERROR, "variadic parameter must be last");

				switch (allParams[i]) {
				case ANYOID:
					variadicType = ANYOID;
					break;

				case ANYARRAYOID:
					variadicType = ANYELEMENTOID;
					break;

				default:
					variadicType = get_element_type(allParams[i]);
					if (!OID_VALID(variadicType))
						elog(ERROR, "variadic parameter is not an array");
					break;
				}
				break;

			default:
				elog(ERROR, "invalid parameter mode '%c'", modes[i]);
				break;
			}
		}
	}

	/*
	 * All seems OK; prepare the data to be inserted into pg_proc.
	 */

	for (i = 0; i < Natts_pg_proc; ++i) {
		nulls[i] = false;
		values[i] = (datum_t) 0;
		replaces[i] = true;
	}

	namestrcpy(&procname, procedureName);
	values[Anum_pg_proc_proname - 1] = NAME_TO_D(&procname);
	values[Anum_pg_proc_pronamespace - 1] = OID_TO_D(procNamespace);
	values[Anum_pg_proc_proowner - 1] = OID_TO_D(proowner);
	values[Anum_pg_proc_prolang - 1] = OID_TO_D(languageObjectId);
	values[Anum_pg_proc_procost - 1] = FLOAT4_TO_D(procost);
	values[Anum_pg_proc_prorows - 1] = FLOAT4_TO_D(prorows);
	values[Anum_pg_proc_provariadic - 1] = OID_TO_D(variadicType);
	values[Anum_pg_proc_proisagg - 1] = BOOL_TO_D(isAgg);
	values[Anum_pg_proc_proiswindow - 1] = BOOL_TO_D(isWindowFunc);
	values[Anum_pg_proc_prosecdef - 1] = BOOL_TO_D(security_definer);
	values[Anum_pg_proc_proisstrict - 1] = BOOL_TO_D(isStrict);
	values[Anum_pg_proc_proretset - 1] = BOOL_TO_D(returnsSet);
	values[Anum_pg_proc_provolatile - 1] = CHAR_TO_D(volatility);
	values[Anum_pg_proc_pronargs - 1] = UINT16_TO_D(parameterCount);
	values[Anum_pg_proc_pronargdefaults - 1] = UINT16_TO_D(list_length(parameterDefaults));
	values[Anum_pg_proc_prorettype - 1] = OID_TO_D(returnType);
	values[Anum_pg_proc_proargtypes - 1] = PTR_TO_D(parameterTypes);

	if (allParameterTypes != PTR_TO_D(NULL))
		values[Anum_pg_proc_proallargtypes - 1] = allParameterTypes;
	else
		nulls[Anum_pg_proc_proallargtypes - 1] = true;

	if (parameterModes != PTR_TO_D(NULL))
		values[Anum_pg_proc_proargmodes - 1] = parameterModes;
	else
		nulls[Anum_pg_proc_proargmodes - 1] = true;

	if (parameterNames != PTR_TO_D(NULL))
		values[Anum_pg_proc_proargnames - 1] = parameterNames;
	else
		nulls[Anum_pg_proc_proargnames - 1] = true;

	if (parameterDefaults != NIL)
		values[Anum_pg_proc_proargdefaults - 1] = CStringGetTextDatum(
			node_to_string(parameterDefaults));
	else
		nulls[Anum_pg_proc_proargdefaults - 1] = true;

	values[Anum_pg_proc_prosrc - 1] = CStringGetTextDatum(prosrc);
	if (probin)
		values[Anum_pg_proc_probin - 1] = CStringGetTextDatum(probin);
	else
		nulls[Anum_pg_proc_probin - 1] = true;

	if (proconfig != PTR_TO_D(NULL))
		values[Anum_pg_proc_proconfig - 1] = proconfig;
	else
		nulls[Anum_pg_proc_proconfig - 1] = true;

	/* 
	 * proacl will be determined later
	 */

	rel = heap_open(ProcedureRelationId, ROW_EXCL_LOCK);
	tupDesc = REL_DESC(rel);

	/* Check for pre-existing definition */
	oldtup = search_syscache3(
		PROCNAMEARGSNSP,
		PTR_TO_D(procedureName),
		PTR_TO_D(parameterTypes),
		OID_TO_D(procNamespace));

	if (HT_VALID(oldtup)) {
		/* There is one; okay to replace it? */
		Form_pg_proc oldproc;
		datum_t	proargnames;
		bool isnull;

		oldproc = (Form_pg_proc) GET_STRUCT(oldtup);
		if (!replace) {
			ereport(ERROR, (
			errcode(E_DUPLICATE_FUNCTION),
			errmsg("function \"%s\" already exists with same argument types",
				procedureName)));
		}

		if (!pg_proc_ownercheck(HEAPTUP_OID(oldtup), proowner))
			aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC, procedureName);

		/*
		 * Not okay to change the return type of the existing proc, since
		 * existing rules, views, etc may depend on the return type.
		 */
		if (returnType != oldproc->prorettype
			|| returnsSet != oldproc->proretset) {
			ereport(ERROR, (
			errcode(E_INVALID_FUNCTION_DEFINITION),
			errmsg("cannot change return type of existing function"),
			errhint("Use DROP FUNCTION first.")));
		}

		/*
		 * If it returns RECORD, check for possible change of record type
		 * implied by OUT parameters
		 */
		if (returnType == RECORDOID) {
			struct tuple* olddesc;
			struct tuple* newdesc;

			olddesc = build_function_result_tupdesc_t(oldtup);
			newdesc = build_function_result_tupdesc_d(
				allParameterTypes,
				parameterModes,
				parameterNames);

			if (olddesc == NULL
				&& newdesc == NULL) {
				 /* ok, both are runtime-defined RECORDs */ ;
			} else if (olddesc == NULL
				|| newdesc == NULL
				|| !tupdesc_equal(olddesc, newdesc)) {
				ereport(ERROR, (
				errcode(E_INVALID_FUNCTION_DEFINITION),
				errmsg("cannot change return type of existing function"),
				errdetail("Row type defined by OUT parameters is different."),
				errhint("Use DROP FUNCTION first.")));
			}
		}

		/*
		 * If there were any named input parameters, check to make sure the
		 * names have not been changed, as this could break existing calls. We
		 * allow adding names to formerly unnamed parameters, though.
		 */
		proargnames = syscache_attr(
			PROCNAMEARGSNSP,
			oldtup,
			Anum_pg_proc_proargnames,
			&isnull);
		if (!isnull) {
			datum_t	proargmodes;
			char** old_arg_names;
			char** new_arg_names;
			int n_old_arg_names;
			int n_new_arg_names;
			int j;

			proargmodes = syscache_attr(
				PROCNAMEARGSNSP,
				oldtup,
				Anum_pg_proc_proargmodes,
				&isnull);
			if (isnull)
				proargmodes = PTR_TO_D(NULL);	/* just to be sure */

			n_old_arg_names = get_func_input_arg_names(
				proargnames,
				proargmodes,
				&old_arg_names);
			n_new_arg_names = get_func_input_arg_names(
				parameterNames,
				parameterModes,
				&new_arg_names);
			for (j = 0; j < n_old_arg_names; j++) {
				if (old_arg_names[j] == NULL)
					continue;

				if (j >= n_new_arg_names
					|| new_arg_names[j] == NULL
					|| strcmp(old_arg_names[j], new_arg_names[j]) != 0) {
					ereport(ERROR,(
					errcode(E_INVALID_FUNCTION_DEFINITION),
					errmsg("cannot change name of input parameter \"%s\"",
						old_arg_names[j]),
					errhint("Use DROP FUNCTION first.")));
				}
			}
		}

		/*
		 * If there are existing defaults, check compatibility: redefinition
		 * must not remove any defaults nor change their types.  (Removing a
		 * default might cause a function to fail to satisfy an existing call.
		 * Changing type would only be possible if the associated parameter is
		 * polymorphic, and in such cases a change of default type might alter
		 * the resolved output type of existing calls.)
		 */
		if (oldproc->pronargdefaults != 0) {
			datum_t	proargdefaults;
			struct list* oldDefaults;
			struct list_cell* oldlc;
			struct list_cell* newlc;

			if (list_length(parameterDefaults) < oldproc->pronargdefaults) {
				ereport(ERROR, (
				errcode(E_INVALID_FUNCTION_DEFINITION),
				errmsg("cannot remove parameter defaults from existing function"),
				errhint("Use DROP FUNCTION first.")));
			}

			proargdefaults = syscache_attr(
				PROCNAMEARGSNSP,
				oldtup,
				Anum_pg_proc_proargdefaults,
				&isnull);
			ASSERT(!isnull);

			oldDefaults = (struct list*) string_to_node(
				TextD_TO_CSTRING(proargdefaults));

			ASSERT(IS_A(oldDefaults, List));
			ASSERT(list_length(oldDefaults) == oldproc->pronargdefaults);

			/* new list can have more defaults than old, advance over 'em */
			newlc = list_head(parameterDefaults);
			for (i = list_length(parameterDefaults) - oldproc->pronargdefaults;
				i > 0;
				i--)
				newlc = lnext(newlc);

			foreach(oldlc, oldDefaults) {
				node_n* oldDef;
				node_n* newDef;

				oldDef = (node_n*) lfirst(oldlc);
				newDef = (node_n*) lfirst(newlc);
				if (expr_type(oldDef) != expr_type(newDef)) {
					ereport(ERROR,(
					errcode(E_INVALID_FUNCTION_DEFINITION),
					errmsg("cannot change data type of existing"
						" parameter default value"),
					errhint("Use DROP FUNCTION first.")));
				}

				newlc = lnext(newlc);
			}
		}
};

struct sparc_rel_desc {
	unsigned char nbytes;
	unsigned char width;
	unsigned char shift;
	unsigned char sparc_rel_check: 2;
	unsigned char pc_relative:     1;
	unsigned char unaligned:       1;
};

#define REL_DESC(t, n, w, s, c , r, u) \
	[t] { nbytes: n, width: w, shift: s, sparc_rel_check: c, pc_relative: r, unaligned: u }

static struct sparc_rel_desc sparc_rels[] = {
	REL_DESC(R_SPARC_NONE,    0,  0,  0, sparc_rel_check_none, 0, 0),
	REL_DESC(R_SPARC_LO10,    4, 10,  0, sparc_rel_check_none, 0, 0),
	REL_DESC(R_SPARC_8,       1,  8,  0, sparc_rel_check_bits, 0, 0),
	REL_DESC(R_SPARC_16,      2, 16,  0, sparc_rel_check_bits, 0, 0),
	REL_DESC(R_SPARC_32,      4, 32,  0, sparc_rel_check_bits, 0, 0),
	REL_DESC(R_SPARC_DISP8,   1,  8,  0, sparc_rel_check_sign, 1, 0),
	REL_DESC(R_SPARC_DISP16,  2, 16,  0, sparc_rel_check_sign, 1, 0),
	REL_DESC(R_SPARC_DISP32,  4, 32,  0, sparc_rel_check_sign, 1, 0),
	REL_DESC(R_SPARC_WDISP30, 4, 30,  2, sparc_rel_check_sign, 1, 0),
	REL_DESC(R_SPARC_WDISP22, 4, 22,  2, sparc_rel_check_sign, 1, 0),
	REL_DESC(R_SPARC_HI22,    4, 22, 10, sparc_rel_check_none, 0, 0),
	REL_DESC(R_SPARC_22,      4, 22,  0, sparc_rel_check_bits, 0, 0),
	REL_DESC(R_SPARC_13,      4, 13,  0, sparc_rel_check_bits, 0, 0),
	REL_DESC(R_SPARC_LO10,    4, 10,  0, sparc_rel_check_none, 0, 0),
	REL_DESC(R_SPARC_PC10,    4, 10,  0, sparc_rel_check_none, 1, 0),
	REL_DESC(R_SPARC_PC22,    4, 22, 10, sparc_rel_check_bits, 1, 0),
Exemple #9
0
/*
 * EnumValuesCreate
 *		Create an entry in pg_enum for each of the supplied enum values.
 *
 * vals is a list of value_n strings.
 */
void EnumValuesCreate(oid_t enumTypeOid, struct list *vals)
{
	struct relation *pg_enum;
	struct name enumlabel;
	oid_t *oids;
	int elemno;
	int num_elems;
	datum_t	values[Natts_pg_enum];
	bool nulls[Natts_pg_enum];
	struct list_cell *lc;
	struct heap_tuple *tup;

	num_elems = list_length(vals);

	/*
	 * We do not bother to check the list of values for duplicates. If you
	 * have any, you'll get a less-than-friendly unique-index violation. It
	 * is probably not worth trying harder.
	 */
	pg_enum = heap_open(EnumRelationId, ROW_EXCL_LOCK);

	/*
	 * Allocate OIDs for the enum's members.
	 *
	 * While this method does not absolutely guarantee that we generate no
	 * duplicate OIDs (since we haven't entered each oid into the table before
	 * allocating the next), trouble could only occur if the OID counter wraps
	 * all the way around before we finish. Which seems unlikely.
	 */
	oids = (oid_t *) palloc(num_elems * sizeof(oid_t));
	for (elemno = 0; elemno < num_elems; elemno++) {
		/*
		 * We assign even-numbered OIDs to all the new enum labels.  This
		 * tells the comparison functions the OIDs are in the correct sort
		 * order and can be compared directly.
		 */
		oid_t new_oid;

		do {
			new_oid = get_new_oid(pg_enum);
		} while (new_oid & 1);
		oids[elemno] = new_oid;
	}

	/* sort them, just in case OID counter wrapped from high to low */
	qsort(oids, num_elems, sizeof(oid_t), oid_cmp);

	/* and make the entries */
	memset(nulls, false, sizeof(nulls));

	elemno = 0;
	foreach(lc, vals) {
		char *lab;

		lab = str_value(lfirst(lc));

		/*
		 * labels are stored in a name field, for easier syscache lookup, so
		 * check the length to make sure it's within range.
		 */
		if (strlen(lab) > (NAMEDATALEN - 1))
			ereport(ERROR, (
			errcode(E_INVALID_NAME),
			errmsg("invalid enum label \"%s\"", lab),
			errdetail("Labels must be %d characters or less.",
				NAMEDATALEN - 1)));

		values[Anum_pg_enum_enumtypid - 1] = OID_TO_D(enumTypeOid);
		values[Anum_pg_enum_enumsortorder - 1] = FLOAT4_TO_D(elemno + 1);
		namestrcpy(&enumlabel, lab);
		values[Anum_pg_enum_enumlabel - 1] = NAME_TO_D(&enumlabel);

		tup = heap_form_tuple(REL_DESC(pg_enum), values, nulls);
		HT_SET_OID(tup, oids[elemno]);

		simple_heap_insert(pg_enum, tup);
		cat_update_indexes(pg_enum, tup);
		heap_free_tuple(tup);

		elemno++;
	}
Exemple #10
0
/* ----------------------------------------------------------------
 *		ExecInitIndexScan
 *
 *		Initializes the index scan's state information, creates
 *		scan keys, and opens the base and index relations.
 *
 *		Note: index scans have 2 sets of state information because
 *			  we have to keep track of the base relation and the
 *			  index relation.
 * ----------------------------------------------------------------
 */
index_ss *
ExecInitIndexScan(index_scan_sc *node, exec_state_n *estate, int eflags)
{
	index_ss* indexstate;
	struct relation* currentRelation;
	bool relistarget;

	/*
	 * create state structure
	 */
	indexstate = MK_N(IndexScanState,index_ss);
	indexstate->ss.ps.plan = (plan_n *) node;
	indexstate->ss.ps.state = estate;

	/*
	 * Miscellaneous initialization
	 * create expression context for node
	 */
	ExecAssignExprContext(estate, &indexstate->ss.ps);
	indexstate->ss.ps.ps_TupFromTlist = false;

	/*
	 * initialize child expressions
	 *
	 * Note: we don't initialize all of the indexqual expression, only the
	 * sub-parts corresponding to runtime keys (see below).  Likewise for
	 * indexorderby, if any.  But the indexqualorig expression is always
	 * initialized even though it will only be used in some uncommon cases ---
	 * would be nice to improve that.  (Problem is that any SubPlans present
	 * in the expression must be found now...)
	 */
	indexstate->ss.ps.targetlist = (struct list*) exec_init_expr(
						(expr_n*) node->scan.plan.targetlist,
						(plan_state_n*) indexstate);
	indexstate->ss.ps.qual = (struct list*) exec_init_expr(
						(expr_n*) node->scan.plan.qual,
						(plan_state_n*) indexstate);
	indexstate->indexqualorig = (struct list*) exec_init_expr(
						(expr_n*) node->indexqualorig,
						(plan_state_n*) indexstate);

	/*
	 * tuple table initialization
	 */
	exec_init_result_tupslot(estate, &indexstate->ss.ps);
	exec_init_scan_tupslot(estate, &indexstate->ss);

	/*
	 * open the base relation and acquire appropriate lock on it.
	 */
	currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid);
	indexstate->ss.ss_currentRelation = currentRelation;
	indexstate->ss.ss_currentScanDesc = NULL;	/* no heap scan here */

	/*
	 * get the scan type from the relation descriptor.
	 */
	ExecAssignScanType(&indexstate->ss, REL_DESC(currentRelation));

	/*
	 * Initialize result tuple type and projection info.
	 */
	ExecAssignResultTypeFromTL(&indexstate->ss.ps);
	ExecAssignScanProjectionInfo(&indexstate->ss);

	/*
	 * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop
	 * here.  This allows an index-advisor plugin to EXPLAIN a plan containing
	 * references to nonexistent indexes.
	 */
	if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
		return indexstate;

	/*
	 * Open the index relation.
	 *
	 * If the parent table is one of the target relations of the query, then
	 * init_plan already opened and write-locked the index, so we can avoid
	 * taking another lock here.  Otherwise we need a normal reader's lock.
	 */
	relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
	indexstate->iss_RelationDesc = index_open(node->indexid,
						relistarget ? NO_LOCK : ACCESS_SHR_LOCK);

	/*
	 * Initialize index-specific scan state
	 */
	indexstate->iss_RuntimeKeysReady = false;
	indexstate->iss_RuntimeKeys = NULL;
	indexstate->iss_NumRuntimeKeys = 0;

	/*
	 * build the index scan keys from the index qualification
	 */
	ExecIndexBuildScanKeys((plan_state_n *) indexstate,
		indexstate->iss_RelationDesc,
		node->scan.scanrelid,
		node->indexqual,
		false,
		&indexstate->iss_ScanKeys,
		&indexstate->iss_NumScanKeys,
		&indexstate->iss_RuntimeKeys,
		&indexstate->iss_NumRuntimeKeys,
		NULL,	/* no ArrayKeys */
		NULL);

	/*
	 * any ORDER BY exprs have to be turned into scankeys in the same way
	 */
	ExecIndexBuildScanKeys((plan_state_n *) indexstate,
						   indexstate->iss_RelationDesc,
						   node->scan.scanrelid,
						   node->indexorderby,
						   true,
						   &indexstate->iss_OrderByKeys,
						   &indexstate->iss_NumOrderByKeys,
						   &indexstate->iss_RuntimeKeys,
						   &indexstate->iss_NumRuntimeKeys,
						   NULL,	/* no ArrayKeys */
						   NULL);

	/*
	 * If we have runtime keys, we need an expr_ctx_n to evaluate them. The
	 * node's standard context won't do because we want to reset that context
	 * for every tuple.  So, build another context just like the other one...
	 * -tgl 7/11/00
	 */
	if (indexstate->iss_NumRuntimeKeys != 0) {
		expr_ctx_n *stdecontext = indexstate->ss.ps.ps_ExprContext;

		ExecAssignExprContext(estate, &indexstate->ss.ps);
		indexstate->iss_RuntimeContext = indexstate->ss.ps.ps_ExprContext;
		indexstate->ss.ps.ps_ExprContext = stdecontext;
	} else {
		indexstate->iss_RuntimeContext = NULL;
	}

	/*
	 * Initialize scan descriptor.
	 */
	indexstate->iss_ScanDesc = index_beginscan(currentRelation,
					 indexstate->iss_RelationDesc,
					 estate->es_snapshot,
					 indexstate->iss_NumScanKeys,
					 indexstate->iss_NumOrderByKeys);

	/*
	 * If no run-time keys to calculate, go ahead and pass the scankeys to the
	 * index AM.
	 */
	if (indexstate->iss_NumRuntimeKeys == 0)
		index_rescan(indexstate->iss_ScanDesc,
			indexstate->iss_ScanKeys,
			indexstate->iss_NumScanKeys,
			indexstate->iss_OrderByKeys,
			indexstate->iss_NumOrderByKeys);

	/*
	 * all done.
	 */
	return indexstate;
}