Esempio n. 1
0
loliObj* loliSym::eval(loliObj* e){
    this->type = typeSYM;
    if(this->name == "nil" || this == nil){
      return nil;
    }
    if(this->name == "t" || this == t){
      return t;
    }
    if(e->nilp() || e == NULL){
      loliObj* r = lookup_top_env(this);
      if(r->nilp()){
        loli_err("Symbol: " + this->toString() + " is unbound.");
        return nil;
      }else{
        return lcons(r)->head();
      }
    }else{
      loliObj* r = lookup_env(this, e);
      if(r->nilp()){
        loli_err("Symbol: " + this->toString() + " is unbound in its environment.");
        return nil;
      }else{
        return lcons(r)->head();
      }
    }
}
Esempio n. 2
0
static void validate_structure_layout(size_t slots, lref_t layout)
{
     if (!CONSP(layout))
          vmerror_wrong_type_n(2, layout);

     size_t len = (size_t) get_c_long(llength(layout));

     if (len != 2)
          vmerror_arg_out_of_range(layout, _T("bad structure layout, length<>2"));

     lref_t slot_layout = CAR(CDR(layout));

     if (get_c_long(llength(slot_layout)) != (long) slots)
          vmerror_arg_out_of_range(lcons(slot_layout, fixcons(slots)),
                                   _T("bad structure layout, wrong number of slots"));

     for (; CONSP(slot_layout); slot_layout = CDR(slot_layout))
     {
          if (!CONSP(CAR(slot_layout)))
               vmerror_arg_out_of_range(lcons(slot_layout, layout),
                                        _T("bad structure layout, bad slot layout"));

          if (!SYMBOLP(CAR(CAR(slot_layout))))
               vmerror_arg_out_of_range(layout,
                                        _T("bad structure layout, missing slot name"));
     }
}
Esempio n. 3
0
File: main.c Progetto: mschaef/vcsh
static void accept_command_line_arguments(int argc, _TCHAR * argv[])
{
     lref_t arg_list = NIL;
     lref_t arg_list_bud = NIL;

     for (int ii = 0; ii < argc; ii++)
     {
          if (is_vm_argument(argv[ii]))
               continue;

          lref_t new_cell = lcons(strconsbuf(argv[ii]), NIL);

          if (NULLP(arg_list_bud))
          {
               arg_list = arg_list_bud = new_cell;
          }
          else
          {
               SET_CDR(arg_list_bud, new_cell);
               arg_list_bud = new_cell;
          }
     }

     interp.startup_args = arg_list;
}
Esempio n. 4
0
lref_t lenvlookup(lref_t var, lref_t env)
{
     lref_t frame;

     for (frame = env; CONSP(frame); frame = CDR(frame))
     {
          lref_t tmp = CAR(frame);

          if (!CONSP(tmp))
               panic("damaged frame");

          lref_t al, fl;

          for (fl = CAR(tmp), al = CDR(tmp);
               CONSP(fl);
               fl = CDR(fl), al = CDR(al))
          {
               if (!CONSP(al))
                    vmerror_arg_out_of_range(NIL, _T("too few arguments"));

               if (EQ(CAR(fl), var))
                    return al;
          }

          if (SYMBOLP(fl) && EQ(fl, var))
               return lcons(al, NIL);
     }

     if (!NULLP(frame))
          panic("damaged env");

     return NIL;
}
Esempio n. 5
0
/*
 * Finish an incomplete split by inserting/updating the downlinks in parent
 * page. 'splitinfo' contains all the child pages involved in the split,
 * from left-to-right.
 *
 * On entry, the caller must hold a lock on stack->buffer and all the child
 * pages in 'splitinfo'. If 'unlockbuf' is true, the lock on stack->buffer is
 * released on return. The child pages are always unlocked and unpinned.
 */
static void
gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
				GISTSTATE *giststate, List *splitinfo, bool unlockbuf)
{
	ListCell   *lc;
	List	   *reversed;
	GISTPageSplitInfo *right;
	GISTPageSplitInfo *left;
	IndexTuple	tuples[2];

	/* A split always contains at least two halves */
	Assert(list_length(splitinfo) >= 2);

	/*
	 * We need to insert downlinks for each new page, and update the downlink
	 * for the original (leftmost) page in the split. Begin at the rightmost
	 * page, inserting one downlink at a time until there's only two pages
	 * left. Finally insert the downlink for the last new page and update the
	 * downlink for the original page as one operation.
	 */

	/* for convenience, create a copy of the list in reverse order */
	reversed = NIL;
	foreach(lc, splitinfo)
	{
		reversed = lcons(lfirst(lc), reversed);
	}
Esempio n. 6
0
/*
 * listCopy--
 *    this copy function only copies the "lcons-cells" of the list but not
 *    its contents. (good for list of pointers as well as list of integers).
 */
List *
listCopy(List *list)
{
    List *newlist=NIL;
    List *l, *nl;

    foreach(l, list) {
	if (newlist==NIL) {
	    newlist = nl = lcons(lfirst(l),NIL);
	}else {
	    lnext(nl) = lcons(lfirst(l),NIL);
	    nl = lnext(nl);
	}
    }
    return newlist; 
}
Esempio n. 7
0
/*
 * Async_Notify
 *
 *		This is executed by the SQL notify command.
 *
 *		Adds the relation to the list of pending notifies.
 *		Actual notification happens during transaction commit.
 *		^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 */
void
Async_Notify(const char *relname)
{
	if (Trace_notify)
		elog(DEBUG1, "Async_Notify(%s)", relname);

	/* no point in making duplicate entries in the list ... */
	if (!AsyncExistsPendingNotify(relname))
	{
		/*
		 * The name list needs to live until end of transaction, so store it
		 * in the transaction context.
		 */
		MemoryContext oldcontext;

		oldcontext = MemoryContextSwitchTo(CurTransactionContext);

		/*
		 * Ordering of the list isn't important.  We choose to put new entries
		 * on the front, as this might make duplicate-elimination a tad faster
		 * when the same condition is signaled many times in a row.
		 */
		pendingNotifies = lcons(pstrdup(relname), pendingNotifies);

		MemoryContextSwitchTo(oldcontext);
	}
}
Esempio n. 8
0
static lref_t arg_list_from_buffer(size_t argc, lref_t argv[])
{
     lref_t result = NIL;

     for (size_t ii = argc; ii > 0; ii--)
          result = lcons(argv[ii - 1], result);

     return result;
}
Esempio n. 9
0
/*
 * CreateAndPushRestrictionContext creates a new restriction context, inserts it to the
 * beginning of the context list, and returns the newly created context.
 */
RelationRestrictionContext *
CreateAndPushRestrictionContext(void)
{
	RelationRestrictionContext *restrictionContext =
		palloc0(sizeof(RelationRestrictionContext));
	relationRestrictionContextList = lcons(restrictionContext,
										   relationRestrictionContextList);

	return restrictionContext;
}
Esempio n. 10
0
/* ----------------
 *		CreateExprContext
 *
 *		Create a context for expression evaluation within an EState.
 *
 * An executor run may require multiple ExprContexts (we usually make one
 * for each Plan node, and a separate one for per-output-tuple processing
 * such as constraint checking).  Each ExprContext has its own "per-tuple"
 * memory context.
 *
 * Note we make no assumption about the caller's memory context.
 * ----------------
 */
ExprContext *
CreateExprContext(EState *estate)
{
	ExprContext *econtext;
	MemoryContext oldcontext;

	/* Create the ExprContext node within the per-query memory context */
	oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);

	econtext = makeNode(ExprContext);

	/* Initialize fields of ExprContext */
	econtext->ecxt_scantuple = NULL;
	econtext->ecxt_innertuple = NULL;
	econtext->ecxt_outertuple = NULL;

	econtext->ecxt_per_query_memory = estate->es_query_cxt;

	/*
	 * Create working memory for expression evaluation in this context.
	 */
	econtext->ecxt_per_tuple_memory =
		AllocSetContextCreate(estate->es_query_cxt,
							  "ExprContext",
							  ALLOCSET_DEFAULT_MINSIZE,
							  ALLOCSET_DEFAULT_INITSIZE,
							  ALLOCSET_DEFAULT_MAXSIZE);

	econtext->ecxt_param_exec_vals = estate->es_param_exec_vals;
	econtext->ecxt_param_list_info = estate->es_param_list_info;

	econtext->ecxt_aggvalues = NULL;
	econtext->ecxt_aggnulls = NULL;

	econtext->caseValue_datum = (Datum) 0;
	econtext->caseValue_isNull = true;

	econtext->domainValue_datum = (Datum) 0;
	econtext->domainValue_isNull = true;

	econtext->ecxt_estate = estate;

	econtext->ecxt_callbacks = NULL;

	/*
	 * Link the ExprContext into the EState to ensure it is shut down when the
	 * EState is freed.  Because we use lcons(), shutdowns will occur in
	 * reverse order of creation, which may not be essential but can't hurt.
	 */
	estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);

	MemoryContextSwitchTo(oldcontext);

	return econtext;
}
Esempio n. 11
0
lref_t lstress_lisp_heap(lref_t c)
{
     if (!FIXNUMP(c))
          vmerror_wrong_type_n(1, c);

     fixnum_t count = FIXNM(c);

     for (fixnum_t i = 0; i < count; i++)
          lcons(NIL, NIL);

     return NIL;
}
Esempio n. 12
0
static lref_t extend_env(lref_t actuals, lref_t formals, lref_t env)
{
     if (SYMBOLP(formals))
          return lcons(lcons(lcons(formals, NIL), lcons(actuals, NIL)), env);
     else
          return lcons(lcons(formals, actuals), env);
}
Esempio n. 13
0
loliObj* loliCons::eval(loliObj* env){
  this->type = typeCONS;
  //   std::cout<<this->type->toString()<<std::endl;
  if(this->head() == SYM("if")){
    loliObj* cond = lcons(this->tail())->head();
    if(this->tail()->nilp()){
      loli_err("Need at least one expression for if");
      return nil;
    }
    loliObj* wt = lcons(lcons(this->tail())->tail())->head();
    loliObj* wf = lcons(lcons(lcons(this->tail())->tail())->tail())->head();
    if(cond->eval(env)==boolt){
      return wt->eval(top_env);
    }else if(cond->eval(env)==boolf){
      if(wf){
        return wf->eval(top_env);
      }else{
        return nil;
      }
    }else{
      loli_err("Condition error");
      return nil;
    }
  }
  return eval_list(this, env);
}
Esempio n. 14
0
static void
pushIncompleteInsert(RelFileNode node, XLogRecPtr lsn, ItemPointerData key,
					 BlockNumber *blkno, int lenblk,
					 PageSplitRecord *xlinfo /* to extract blkno info */ )
{
	MemoryContext oldCxt;
	gistIncompleteInsert *ninsert;

	if (!ItemPointerIsValid(&key))

		/*
		 * if key is null then we should not store insertion as incomplete,
		 * because it's a vacuum operation..
		 */
		return;

	oldCxt = MemoryContextSwitchTo(insertCtx);
	ninsert = (gistIncompleteInsert *) palloc(sizeof(gistIncompleteInsert));

	ninsert->node = node;
	ninsert->key = key;
	ninsert->lsn = lsn;

	if (lenblk && blkno)
	{
		ninsert->lenblk = lenblk;
		ninsert->blkno = (BlockNumber *) palloc(sizeof(BlockNumber) * ninsert->lenblk);
		memcpy(ninsert->blkno, blkno, sizeof(BlockNumber) * ninsert->lenblk);
		ninsert->origblkno = *blkno;
	}
	else
	{
		int			i;

		Assert(xlinfo);
		ninsert->lenblk = xlinfo->data->npage;
		ninsert->blkno = (BlockNumber *) palloc(sizeof(BlockNumber) * ninsert->lenblk);
		for (i = 0; i < ninsert->lenblk; i++)
			ninsert->blkno[i] = xlinfo->page[i].header->blkno;
		ninsert->origblkno = xlinfo->data->origblkno;
	}
	Assert(ninsert->lenblk > 0);

	/*
	 * Stick the new incomplete insert onto the front of the list, not the
	 * back.  This is so that gist_xlog_cleanup will process incompletions in
	 * last-in-first-out order.
	 */
	incomplete_inserts = lcons(ninsert, incomplete_inserts);

	MemoryContextSwitchTo(oldCxt);
}
Esempio n. 15
0
/*
 * Creates a new gang by logging on a session to each segDB involved.
 *
 * elog ERROR or return a non-NULL gang.
 */
Gang *
AllocateGang(CdbDispatcherState *ds, GangType type, List *segments)
{
	MemoryContext	oldContext;
	SegmentType 	segmentType;
	Gang			*newGang = NULL;
	int				i;

	ELOG_DISPATCHER_DEBUG("AllocateGang begin.");

	if (Gp_role != GP_ROLE_DISPATCH)
	{
		elog(FATAL, "dispatch process called with role %d", Gp_role);
	}

	if (segments == NIL)
		return NULL;

	Assert(DispatcherContext);
	oldContext = MemoryContextSwitchTo(DispatcherContext);

	if (type == GANGTYPE_PRIMARY_WRITER)
		segmentType = SEGMENTTYPE_EXPLICT_WRITER;
	/* for extended query like cursor, must specify a reader */
	else if (ds->isExtendedQuery)
		segmentType = SEGMENTTYPE_EXPLICT_READER;
	else
		segmentType = SEGMENTTYPE_ANY;

	newGang = cdbgang_createGang(segments, segmentType);
	newGang->allocated = true;
	newGang->type = type;

	ds->allocatedGangs = lcons(newGang, ds->allocatedGangs);
	ds->largestGangSize = Max(ds->largestGangSize, newGang->size);

	ELOG_DISPATCHER_DEBUG("AllocateGang end.");

	if (type == GANGTYPE_PRIMARY_WRITER)
	{
		/*
		 * set "whoami" for utility statement. non-utility statement will
		 * overwrite it in function getCdbProcessList.
		 */
		for (i = 0; i < newGang->size; i++)
			cdbconn_setQEIdentifier(newGang->db_descriptors[i], -1);
	}

	MemoryContextSwitchTo(oldContext);

	return newGang;
}
Esempio n. 16
0
loliObj* eval_list(loliObj* lst, loliObj* env){
  loliObj* car = lcons(lst)->head()->eval(env);
  loliObj* cdr = lcons(lst)->tail();
  std::cout<<"HEAD: "<<car->toString()<<"\tTAIL: "<<cdr->toString()<<std::endl;
  //    std::cout<<cdr->type->toString()<<std::endl;
  if(lfunc(car)->rtype){
    if(lcons(lcons(lcons(cdr)->tail())->head())->hd == NULL){
      std::cout<<lcons(lcons(lcons(cdr)->tail())->head())->toString()<<std::endl;
      return c_apply(car, cdr->eval(env), env);
    }
    return c_apply(car, cdr, env);
  }
  else{
    loli_err(car->toString() + " is not a function!");
  }
  return nil;
}
Esempio n. 17
0
/*
 * AtSubStart_Notify() --- Take care of subtransaction start.
 *
 * Push empty state for the new subtransaction.
 */
void
AtSubStart_Notify(void)
{
	MemoryContext old_cxt;

	/* Keep the list-of-lists in TopTransactionContext for simplicity */
	old_cxt = MemoryContextSwitchTo(TopTransactionContext);

	upperPendingNotifies = lcons(pendingNotifies, upperPendingNotifies);

	Assert(list_length(upperPendingNotifies) ==
		   GetCurrentTransactionNestLevel() - 1);

	pendingNotifies = NIL;

	MemoryContextSwitchTo(old_cxt);
}
Esempio n. 18
0
/* ----------------
 *		create_expr_ctx
 *
 *		Create a context for expression evaluation within an EState.
 *
 * An executor run may require multiple ExprContexts (we usually make one
 * for each plan_n node, and a separate one for per-output-tuple processing
 * such as constraint checking).  Each expr_ctx_n has its own "per-tuple"
 * memory context.
 *
 * Note we make no assumption about the caller's memory context.
 * ----------------
 */
expr_ctx_n*
create_expr_ctx(exec_state_n *estate)
{
	expr_ctx_n *econtext;
	struct mctx * oldcontext;

	/* Create the expr_ctx_n node within the per-query memory context */
	oldcontext = mctx_switch(estate->es_query_cxt);

	econtext = MK_N(ExprContext,expr_ctx_n);

	/* Initialize fields of expr_ctx_n */
	econtext->ecxt_scantuple = NULL;
	econtext->ecxt_innertuple = NULL;
	econtext->ecxt_outertuple = NULL;
	econtext->ecxt_per_query_memory = estate->es_query_cxt;

	/*
	 * Create working memory for expression evaluation in this context.
	 */
	econtext->ecxt_per_tuple_memory = aset_create_normal(estate->es_query_cxt, "ExprContext");
	econtext->ecxt_param_exec_vals = estate->es_param_exec_vals;
	econtext->ecxt_param_list_info = estate->es_param_list_info;
	econtext->ecxt_aggvalues = NULL;
	econtext->ecxt_aggnulls = NULL;
	econtext->caseValue_datum = (datum_t) 0;
	econtext->caseValue_isNull = true;
	econtext->domainValue_datum = (datum_t) 0;
	econtext->domainValue_isNull = true;
	econtext->ecxt_estate = estate;
	econtext->ecxt_callbacks = NULL;

	/*
	 * Link the expr_ctx_n into the exec_state_n to ensure it is shut down when the
	 * exec_state_n is freed.  Because we use lcons(), shutdowns will occur in
	 * reverse order of creation, which may not be essential but can't hurt.
	 */
	estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
	mctx_switch(oldcontext);

	return econtext;
}
Esempio n. 19
0
/*
 * subxact.__enter__() or subxact.enter()
 *
 * Start an explicit subtransaction.  SPI calls within an explicit
 * subtransaction will not start another one, so you can atomically
 * execute many SPI calls and still get a controllable exception if
 * one of them fails.
 */
static PyObject *
PLy_subtransaction_enter(PyObject *self, PyObject *unused)
{
	PLySubtransactionData *subxactdata;
	MemoryContext oldcontext;
	PLySubtransactionObject *subxact = (PLySubtransactionObject *) self;

	if (subxact->started)
	{
		PLy_exception_set(PyExc_ValueError, "this subtransaction has already been entered");
		return NULL;
	}

	if (subxact->exited)
	{
		PLy_exception_set(PyExc_ValueError, "this subtransaction has already been exited");
		return NULL;
	}

	subxact->started = true;
	oldcontext = CurrentMemoryContext;

	subxactdata = (PLySubtransactionData *)
		MemoryContextAlloc(TopTransactionContext,
						   sizeof(PLySubtransactionData));

	subxactdata->oldcontext = oldcontext;
	subxactdata->oldowner = CurrentResourceOwner;

	BeginInternalSubTransaction(NULL);

	/* Be sure that cells of explicit_subtransactions list are long-lived */
	MemoryContextSwitchTo(TopTransactionContext);
	explicit_subtransactions = lcons(subxactdata, explicit_subtransactions);

	/* Caller wants to stay in original memory context */
	MemoryContextSwitchTo(oldcontext);

	Py_INCREF(self);
	return self;
}
Esempio n. 20
0
/*
 * infer_tupledesc
 *
 * Given a stream, infer a TupleDesc based on the supertype of all
 * the casted types for each of the stream's columns
 */
static void
infer_tupledesc(StreamTargetsEntry *stream)
{
	HASH_SEQ_STATUS status;
	StreamColumnsEntry *entry;
	List *names = NIL;
	List *types = NIL;
	List *mods = NIL;
	List *collations = NIL;
	Const *preferred = makeConst(NUMERIC_OID, -1, 0, -1, 0, false, false);

	hash_seq_init(&status, stream->colstotypes);
	while ((entry = (StreamColumnsEntry *) hash_seq_search(&status)) != NULL)
	{
		char err[128];
		Oid supertype;
		char category;
		bool typispreferred;
		Oid t = exprType(linitial(entry->types));

		/*
		 * If there are any numeric types in our target types, we prepend a float8
		 * to the list of types to select from, as that is our preferred type when
		 * there is any ambiguity about how to interpret numeric types.
		 */
		get_type_category_preferred(t, &category, &typispreferred);
		if (category == TYPCATEGORY_NUMERIC)
			entry->types = lcons(preferred, entry->types);

		sprintf(err, "type conflict with stream \"%s\":", get_rel_name(stream->relid));
		supertype = select_common_type(NULL, entry->types, err, NULL);

		names = lappend(names, makeString(entry->name));
		types = lappend_int(types, supertype);
		mods = lappend_int(mods, -1);
		collations = lappend_int(collations, 0);
	}

	stream->desc = BuildDescFromLists(names, types, mods, collations);
}
Esempio n. 21
0
File: fasl.c Progetto: mschaef/vcsh
static void fast_read_list(lref_t reader, bool read_listd, lref_t * list)
{
    *list = NIL;
    lref_t list_bud = NIL;
    lref_t next_list_cell = NIL;

    lref_t list_length;
    fast_read(reader, &list_length, false);

    if (!FIXNUMP(list_length))
        vmerror_fast_read("expected fixnum for list length", reader, list_length);

    *list = NIL;

    for (fixnum_t ii = 0; ii < FIXNM(list_length); ii++)
    {
        next_list_cell = lcons(NIL, NIL);

        if (NULLP(*list))
            *list = next_list_cell;
        else
            SET_CDR(list_bud, next_list_cell);

        list_bud = next_list_cell;

        fast_read(reader, &(next_list_cell->as.cons.car), false);

        if (EOFP(CAR(next_list_cell)))
            vmerror_fast_read("incomplete list definition", reader, NIL);
    }

    if (read_listd)
    {
        fast_read(reader, &(list_bud->as.cons.cdr), false);

        if (EOFP(CDR(list_bud)))
            vmerror_fast_read("incomplete list defintion, missing cdr", reader, NIL);
    }
}
Esempio n. 22
0
/*
 *--------------------------------------------------------------
 * Async_Notify
 *
 *		This is executed by the SQL notify command.
 *
 *		Adds the relation to the list of pending notifies.
 *		Actual notification happens during transaction commit.
 *		^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 *
 *--------------------------------------------------------------
 */
void
Async_Notify(const char *relname)
{
	if (Trace_notify)
		elog(DEBUG1, "Async_Notify(%s)", relname);

	/* no point in making duplicate entries in the list ... */
	if (!AsyncExistsPendingNotify(relname))
	{
		/*
		 * The name list needs to live until end of transaction, so store it
		 * in the transaction context.
		 */
		MemoryContext oldcontext;

		oldcontext = MemoryContextSwitchTo(CurTransactionContext);

		pendingNotifies = lcons(pstrdup(relname), pendingNotifies);

		MemoryContextSwitchTo(oldcontext);
	}
}
Esempio n. 23
0
/*
 * Returns a List* of WorkloadElements.
 */
List *
ExtractWorkload(void){
	//for the beginning just get all the childNodes of the ModelGraph, simulating a single reference of every Node
	//TODO: extract the relevant nodes from the workload, a separate structure will be needed: WorkloadElement{Node, relativeCount, Horizon}
	List		*init = GetAllChildren(NULL, true),
				*res = NIL;
	ListCell	*lc;
	Horizon 	*h;

	foreach(lc, init)
	{
		WorkloadElement	*w = palloc0(sizeof(WorkloadElement));
		w->mgin = (ModelGraphIndexNode *)lfirst(lc);
		h = palloc0(sizeof(Horizon));
		h->error = 0.0;
		h->horizon = 1;
		h->frequency = 1.0/list_length(init);
		h->stillCheck = true;
		w->horizons = lcons(h, w->horizons);

//		res = lcons(w, res);
		res = lappend(res, w);
	}
Esempio n. 24
0
/*
 * get_relation_info -
 *	  Retrieves catalog information for a given relation.
 *
 * Given the Oid of the relation, return the following info into fields
 * of the RelOptInfo struct:
 *
 *	min_attr	lowest valid AttrNumber
 *	max_attr	highest valid AttrNumber
 *	indexlist	list of IndexOptInfos for relation's indexes
 *	pages		number of pages
 *	tuples		number of tuples
 *
 * Also, initialize the attr_needed[] and attr_widths[] arrays.  In most
 * cases these are left as zeroes, but sometimes we need to compute attr
 * widths here, and we may as well cache the results for costsize.c.
 *
 * If inhparent is true, all we need to do is set up the attr arrays:
 * the RelOptInfo actually represents the appendrel formed by an inheritance
 * tree, and so the parent rel's physical size and index information isn't
 * important for it.
 */
void
get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
				  RelOptInfo *rel)
{
	Index		varno = rel->relid;
	Relation	relation;
	bool		hasindex;
	List	   *indexinfos = NIL;

	/*
	 * We need not lock the relation since it was already locked, either by
	 * the rewriter or when expand_inherited_rtentry() added it to the query's
	 * rangetable.
	 */
	relation = heap_open(relationObjectId, NoLock);

	rel->min_attr = FirstLowInvalidHeapAttributeNumber + 1;
	rel->max_attr = RelationGetNumberOfAttributes(relation);
	rel->reltablespace = RelationGetForm(relation)->reltablespace;

	Assert(rel->max_attr >= rel->min_attr);
	rel->attr_needed = (Relids *)
		palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
	rel->attr_widths = (int32 *)
		palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));

	/*
	 * Estimate relation size --- unless it's an inheritance parent, in which
	 * case the size will be computed later in set_append_rel_pathlist, and we
	 * must leave it zero for now to avoid bollixing the total_table_pages
	 * calculation.
	 */
	if (!inhparent)
		estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
						  &rel->pages, &rel->tuples);

	/*
	 * Make list of indexes.  Ignore indexes on system catalogs if told to.
	 * Don't bother with indexes for an inheritance parent, either.
	 */
	if (inhparent ||
		(IgnoreSystemIndexes && IsSystemClass(relation->rd_rel)))
		hasindex = false;
	else
		hasindex = relation->rd_rel->relhasindex;

	if (hasindex)
	{
		List	   *indexoidlist;
		ListCell   *l;
		LOCKMODE	lmode;

		indexoidlist = RelationGetIndexList(relation);

		/*
		 * For each index, we get the same type of lock that the executor will
		 * need, and do not release it.  This saves a couple of trips to the
		 * shared lock manager while not creating any real loss of
		 * concurrency, because no schema changes could be happening on the
		 * index while we hold lock on the parent rel, and neither lock type
		 * blocks any other kind of index operation.
		 */
		if (rel->relid == root->parse->resultRelation)
			lmode = RowExclusiveLock;
		else
			lmode = AccessShareLock;

		foreach(l, indexoidlist)
		{
			Oid			indexoid = lfirst_oid(l);
			Relation	indexRelation;
			Form_pg_index index;
			IndexOptInfo *info;
			int			ncolumns;
			int			i;

			/*
			 * Extract info from the relation descriptor for the index.
			 */
			indexRelation = index_open(indexoid, lmode);
			index = indexRelation->rd_index;

			/*
			 * Ignore invalid indexes, since they can't safely be used for
			 * queries.  Note that this is OK because the data structure we
			 * are constructing is only used by the planner --- the executor
			 * still needs to insert into "invalid" indexes!
			 */
			if (!index->indisvalid)
			{
				index_close(indexRelation, NoLock);
				continue;
			}

			/*
			 * If the index is valid, but cannot yet be used, ignore it; but
			 * mark the plan we are generating as transient. See
			 * src/backend/access/heap/README.HOT for discussion.
			 */
			if (index->indcheckxmin &&
				!TransactionIdPrecedes(HeapTupleHeaderGetXmin(indexRelation->rd_indextuple->t_data),
									   TransactionXmin))
			{
				root->glob->transientPlan = true;
				index_close(indexRelation, NoLock);
				continue;
			}

			info = makeNode(IndexOptInfo);

			info->indexoid = index->indexrelid;
			info->reltablespace =
				RelationGetForm(indexRelation)->reltablespace;
			info->rel = rel;
			info->ncolumns = ncolumns = index->indnatts;

			/*
			 * Allocate per-column info arrays.  To save a few palloc cycles
			 * we allocate all the Oid-type arrays in one request.	Note that
			 * the opfamily array needs an extra, terminating zero at the end.
			 * We pre-zero the ordering info in case the index is unordered.
			 */
			info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
			info->opfamily = (Oid *) palloc0(sizeof(Oid) * (4 * ncolumns + 1));
			info->opcintype = info->opfamily + (ncolumns + 1);
			info->fwdsortop = info->opcintype + ncolumns;
			info->revsortop = info->fwdsortop + ncolumns;
			info->nulls_first = (bool *) palloc0(sizeof(bool) * ncolumns);

			for (i = 0; i < ncolumns; i++)
			{
				info->indexkeys[i] = index->indkey.values[i];
				info->opfamily[i] = indexRelation->rd_opfamily[i];
				info->opcintype[i] = indexRelation->rd_opcintype[i];
			}

			info->relam = indexRelation->rd_rel->relam;
			info->amcostestimate = indexRelation->rd_am->amcostestimate;
			info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
			info->amsearchnulls = indexRelation->rd_am->amsearchnulls;
			info->amhasgettuple = OidIsValid(indexRelation->rd_am->amgettuple);
			info->amhasgetbitmap = OidIsValid(indexRelation->rd_am->amgetbitmap);

			/*
			 * Fetch the ordering operators associated with the index, if any.
			 * We expect that all ordering-capable indexes use btree's
			 * strategy numbers for the ordering operators.
			 */
			if (indexRelation->rd_am->amcanorder)
			{
				int			nstrat = indexRelation->rd_am->amstrategies;

				for (i = 0; i < ncolumns; i++)
				{
					int16		opt = indexRelation->rd_indoption[i];
					int			fwdstrat;
					int			revstrat;

					if (opt & INDOPTION_DESC)
					{
						fwdstrat = BTGreaterStrategyNumber;
						revstrat = BTLessStrategyNumber;
					}
					else
					{
						fwdstrat = BTLessStrategyNumber;
						revstrat = BTGreaterStrategyNumber;
					}

					/*
					 * Index AM must have a fixed set of strategies for it to
					 * make sense to specify amcanorder, so we need not allow
					 * the case amstrategies == 0.
					 */
					if (fwdstrat > 0)
					{
						Assert(fwdstrat <= nstrat);
						info->fwdsortop[i] = indexRelation->rd_operator[i * nstrat + fwdstrat - 1];
					}
					if (revstrat > 0)
					{
						Assert(revstrat <= nstrat);
						info->revsortop[i] = indexRelation->rd_operator[i * nstrat + revstrat - 1];
					}
					info->nulls_first[i] = (opt & INDOPTION_NULLS_FIRST) != 0;
				}
			}

			/*
			 * Fetch the index expressions and predicate, if any.  We must
			 * modify the copies we obtain from the relcache to have the
			 * correct varno for the parent relation, so that they match up
			 * correctly against qual clauses.
			 */
			info->indexprs = RelationGetIndexExpressions(indexRelation);
			info->indpred = RelationGetIndexPredicate(indexRelation);
			if (info->indexprs && varno != 1)
				ChangeVarNodes((Node *) info->indexprs, 1, varno, 0);
			if (info->indpred && varno != 1)
				ChangeVarNodes((Node *) info->indpred, 1, varno, 0);
			info->predOK = false;		/* set later in indxpath.c */
			info->unique = index->indisunique;

			/*
			 * Estimate the index size.  If it's not a partial index, we lock
			 * the number-of-tuples estimate to equal the parent table; if it
			 * is partial then we have to use the same methods as we would for
			 * a table, except we can be sure that the index is not larger
			 * than the table.
			 */
			if (info->indpred == NIL)
			{
				info->pages = RelationGetNumberOfBlocks(indexRelation);
				info->tuples = rel->tuples;
			}
			else
			{
				estimate_rel_size(indexRelation, NULL,
								  &info->pages, &info->tuples);
				if (info->tuples > rel->tuples)
					info->tuples = rel->tuples;
			}

			index_close(indexRelation, NoLock);

			indexinfos = lcons(info, indexinfos);
		}

		list_free(indexoidlist);
	}
Esempio n. 25
0
/*
 * get_relation_info -
 *	  Retrieves catalog information for a given relation.
 *
 * Given the Oid of the relation, return the following info into fields
 * of the RelOptInfo struct:
 *
 *	min_attr	lowest valid AttrNumber
 *	max_attr	highest valid AttrNumber
 *	indexlist	list of IndexOptInfos for relation's indexes
 *	pages		number of pages
 *	tuples		number of tuples
 *
 * Also, initialize the attr_needed[] and attr_widths[] arrays.  In most
 * cases these are left as zeroes, but sometimes we need to compute attr
 * widths here, and we may as well cache the results for costsize.c.
 *
 * If inhparent is true, all we need to do is set up the attr arrays:
 * the RelOptInfo actually represents the appendrel formed by an inheritance
 * tree, and so the parent rel's physical size and index information isn't
 * important for it.
 */
void
get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
				  RelOptInfo *rel)
{
	Index		varno = rel->relid;
	Relation	relation;
	bool		hasindex;
	List	   *indexinfos = NIL;

	/*
	 * We need not lock the relation since it was already locked, either by
	 * the rewriter or when expand_inherited_rtentry() added it to the query's
	 * rangetable.
	 */
	relation = heap_open(relationObjectId, NoLock);

	/* Temporary and unlogged relations are inaccessible during recovery. */
	if (!RelationNeedsWAL(relation) && RecoveryInProgress())
		ereport(ERROR,
				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
				 errmsg("cannot access temporary or unlogged relations during recovery")));

	rel->min_attr = FirstLowInvalidHeapAttributeNumber + 1;
	rel->max_attr = RelationGetNumberOfAttributes(relation);
	rel->reltablespace = RelationGetForm(relation)->reltablespace;

	Assert(rel->max_attr >= rel->min_attr);
	rel->attr_needed = (Relids *)
		palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
	rel->attr_widths = (int32 *)
		palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));

	/*
	 * Estimate relation size --- unless it's an inheritance parent, in which
	 * case the size will be computed later in set_append_rel_pathlist, and we
	 * must leave it zero for now to avoid bollixing the total_table_pages
	 * calculation.
	 */
	if (!inhparent)
		estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
						  &rel->pages, &rel->tuples, &rel->allvisfrac);

	/*
	 * Make list of indexes.  Ignore indexes on system catalogs if told to.
	 * Don't bother with indexes for an inheritance parent, either.
	 */
	if (inhparent ||
		(IgnoreSystemIndexes && IsSystemClass(relation->rd_rel)))
		hasindex = false;
	else
		hasindex = relation->rd_rel->relhasindex;

	if (hasindex)
	{
		List	   *indexoidlist;
		ListCell   *l;
		LOCKMODE	lmode;

		indexoidlist = RelationGetIndexList(relation);

		/*
		 * For each index, we get the same type of lock that the executor will
		 * need, and do not release it.  This saves a couple of trips to the
		 * shared lock manager while not creating any real loss of
		 * concurrency, because no schema changes could be happening on the
		 * index while we hold lock on the parent rel, and neither lock type
		 * blocks any other kind of index operation.
		 */
		if (rel->relid == root->parse->resultRelation)
			lmode = RowExclusiveLock;
		else
			lmode = AccessShareLock;

		foreach(l, indexoidlist)
		{
			Oid			indexoid = lfirst_oid(l);
			Relation	indexRelation;
			Form_pg_index index;
			IndexOptInfo *info;
			int			ncolumns;
			int			i;

			/*
			 * Extract info from the relation descriptor for the index.
			 */
			indexRelation = index_open(indexoid, lmode);
			index = indexRelation->rd_index;

			/*
			 * Ignore invalid indexes, since they can't safely be used for
			 * queries.  Note that this is OK because the data structure we
			 * are constructing is only used by the planner --- the executor
			 * still needs to insert into "invalid" indexes!
			 */
			if (!index->indisvalid)
			{
				index_close(indexRelation, NoLock);
				continue;
			}

			/*
			 * If the index is valid, but cannot yet be used, ignore it; but
			 * mark the plan we are generating as transient. See
			 * src/backend/access/heap/README.HOT for discussion.
			 */
			if (index->indcheckxmin &&
				!TransactionIdPrecedes(HeapTupleHeaderGetXmin(indexRelation->rd_indextuple->t_data),
									   TransactionXmin))
			{
				root->glob->transientPlan = true;
				index_close(indexRelation, NoLock);
				continue;
			}

			info = makeNode(IndexOptInfo);

			info->indexoid = index->indexrelid;
			info->reltablespace =
				RelationGetForm(indexRelation)->reltablespace;
			info->rel = rel;
			info->ncolumns = ncolumns = index->indnatts;
			info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
			info->indexcollations = (Oid *) palloc(sizeof(Oid) * ncolumns);
			info->opfamily = (Oid *) palloc(sizeof(Oid) * ncolumns);
			info->opcintype = (Oid *) palloc(sizeof(Oid) * ncolumns);

			for (i = 0; i < ncolumns; i++)
			{
				info->indexkeys[i] = index->indkey.values[i];
				info->indexcollations[i] = indexRelation->rd_indcollation[i];
				info->opfamily[i] = indexRelation->rd_opfamily[i];
				info->opcintype[i] = indexRelation->rd_opcintype[i];
			}

			info->relam = indexRelation->rd_rel->relam;
			info->amcostestimate = indexRelation->rd_am->amcostestimate;
			info->canreturn = index_can_return(indexRelation);
			info->amcanorderbyop = indexRelation->rd_am->amcanorderbyop;
			info->amoptionalkey = indexRelation->rd_am->amoptionalkey;
			info->amsearcharray = indexRelation->rd_am->amsearcharray;
			info->amsearchnulls = indexRelation->rd_am->amsearchnulls;
			info->amhasgettuple = OidIsValid(indexRelation->rd_am->amgettuple);
			info->amhasgetbitmap = OidIsValid(indexRelation->rd_am->amgetbitmap);

			/*
			 * Fetch the ordering information for the index, if any.
			 */
			if (info->relam == BTREE_AM_OID)
			{
				/*
				 * If it's a btree index, we can use its opfamily OIDs
				 * directly as the sort ordering opfamily OIDs.
				 */
				Assert(indexRelation->rd_am->amcanorder);

				info->sortopfamily = info->opfamily;
				info->reverse_sort = (bool *) palloc(sizeof(bool) * ncolumns);
				info->nulls_first = (bool *) palloc(sizeof(bool) * ncolumns);

				for (i = 0; i < ncolumns; i++)
				{
					int16		opt = indexRelation->rd_indoption[i];

					info->reverse_sort[i] = (opt & INDOPTION_DESC) != 0;
					info->nulls_first[i] = (opt & INDOPTION_NULLS_FIRST) != 0;
				}
			}
			else if (indexRelation->rd_am->amcanorder)
			{
				/*
				 * Otherwise, identify the corresponding btree opfamilies by
				 * trying to map this index's "<" operators into btree.  Since
				 * "<" uniquely defines the behavior of a sort order, this is
				 * a sufficient test.
				 *
				 * XXX This method is rather slow and also requires the
				 * undesirable assumption that the other index AM numbers its
				 * strategies the same as btree.  It'd be better to have a way
				 * to explicitly declare the corresponding btree opfamily for
				 * each opfamily of the other index type.  But given the lack
				 * of current or foreseeable amcanorder index types, it's not
				 * worth expending more effort on now.
				 */
				info->sortopfamily = (Oid *) palloc(sizeof(Oid) * ncolumns);
				info->reverse_sort = (bool *) palloc(sizeof(bool) * ncolumns);
				info->nulls_first = (bool *) palloc(sizeof(bool) * ncolumns);

				for (i = 0; i < ncolumns; i++)
				{
					int16		opt = indexRelation->rd_indoption[i];
					Oid			ltopr;
					Oid			btopfamily;
					Oid			btopcintype;
					int16		btstrategy;

					info->reverse_sort[i] = (opt & INDOPTION_DESC) != 0;
					info->nulls_first[i] = (opt & INDOPTION_NULLS_FIRST) != 0;

					ltopr = get_opfamily_member(info->opfamily[i],
												info->opcintype[i],
												info->opcintype[i],
												BTLessStrategyNumber);
					if (OidIsValid(ltopr) &&
						get_ordering_op_properties(ltopr,
												   &btopfamily,
												   &btopcintype,
												   &btstrategy) &&
						btopcintype == info->opcintype[i] &&
						btstrategy == BTLessStrategyNumber)
					{
						/* Successful mapping */
						info->sortopfamily[i] = btopfamily;
					}
					else
					{
						/* Fail ... quietly treat index as unordered */
						info->sortopfamily = NULL;
						info->reverse_sort = NULL;
						info->nulls_first = NULL;
						break;
					}
				}
			}
			else
			{
				info->sortopfamily = NULL;
				info->reverse_sort = NULL;
				info->nulls_first = NULL;
			}

			/*
			 * Fetch the index expressions and predicate, if any.  We must
			 * modify the copies we obtain from the relcache to have the
			 * correct varno for the parent relation, so that they match up
			 * correctly against qual clauses.
			 */
			info->indexprs = RelationGetIndexExpressions(indexRelation);
			info->indpred = RelationGetIndexPredicate(indexRelation);
			if (info->indexprs && varno != 1)
				ChangeVarNodes((Node *) info->indexprs, 1, varno, 0);
			if (info->indpred && varno != 1)
				ChangeVarNodes((Node *) info->indpred, 1, varno, 0);

			/* Build targetlist using the completed indexprs data */
			info->indextlist = build_index_tlist(root, info, relation);

			info->predOK = false;		/* set later in indxpath.c */
			info->unique = index->indisunique;
			info->immediate = index->indimmediate;
			info->hypothetical = false;

			/*
			 * Estimate the index size.  If it's not a partial index, we lock
			 * the number-of-tuples estimate to equal the parent table; if it
			 * is partial then we have to use the same methods as we would for
			 * a table, except we can be sure that the index is not larger
			 * than the table.
			 */
			if (info->indpred == NIL)
			{
				info->pages = RelationGetNumberOfBlocks(indexRelation);
				info->tuples = rel->tuples;
			}
			else
			{
				double		allvisfrac;				/* dummy */

				estimate_rel_size(indexRelation, NULL,
								  &info->pages, &info->tuples, &allvisfrac);
				if (info->tuples > rel->tuples)
					info->tuples = rel->tuples;
			}

			index_close(indexRelation, NoLock);

			indexinfos = lcons(info, indexinfos);
		}

		list_free(indexoidlist);
	}
Esempio n. 26
0
/*
 * Get any row security quals and check quals that should be applied to the
 * specified RTE.
 *
 * In addition, hasRowSecurity is set to true if row level security is enabled
 * (even if this RTE doesn't have any row security quals), and hasSubLinks is
 * set to true if any of the quals returned contain sublinks.
 */
void
get_row_security_policies(Query *root, CmdType commandType, RangeTblEntry *rte,
						  int rt_index, List **securityQuals,
						  List **withCheckOptions, bool *hasRowSecurity,
						  bool *hasSubLinks)
{
	Expr	   *rowsec_expr = NULL;
	Expr	   *rowsec_with_check_expr = NULL;
	Expr	   *hook_expr_restrictive = NULL;
	Expr	   *hook_with_check_expr_restrictive = NULL;
	Expr	   *hook_expr_permissive = NULL;
	Expr	   *hook_with_check_expr_permissive = NULL;

	List	   *rowsec_policies;
	List	   *hook_policies_restrictive = NIL;
	List	   *hook_policies_permissive = NIL;

	Relation	rel;
	Oid			user_id;
	int			rls_status;
	bool		defaultDeny = false;

	/* Defaults for the return values */
	*securityQuals = NIL;
	*withCheckOptions = NIL;
	*hasRowSecurity = false;
	*hasSubLinks = false;

	/* If this is not a normal relation, just return immediately */
	if (rte->relkind != RELKIND_RELATION)
		return;

	/* Switch to checkAsUser if it's set */
	user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId();

	/* Determine the state of RLS for this, pass checkAsUser explicitly */
	rls_status = check_enable_rls(rte->relid, rte->checkAsUser, false);

	/* If there is no RLS on this table at all, nothing to do */
	if (rls_status == RLS_NONE)
		return;

	/*
	 * RLS_NONE_ENV means we are not doing any RLS now, but that may change
	 * with changes to the environment, so we mark it as hasRowSecurity to
	 * force a re-plan when the environment changes.
	 */
	if (rls_status == RLS_NONE_ENV)
	{
		/*
		 * Indicate that this query may involve RLS and must therefore be
		 * replanned if the environment changes (GUCs, role), but we are not
		 * adding anything here.
		 */
		*hasRowSecurity = true;

		return;
	}

	/* Grab the built-in policies which should be applied to this relation. */
	rel = heap_open(rte->relid, NoLock);

	rowsec_policies = pull_row_security_policies(commandType, rel,
												 user_id);

	/*
	 * Check if this is only the default-deny policy.
	 *
	 * Normally, if the table has row security enabled but there are no
	 * policies, we use a default-deny policy and not allow anything. However,
	 * when an extension uses the hook to add their own policies, we don't
	 * want to include the default deny policy or there won't be any way for a
	 * user to use an extension exclusively for the policies to be used.
	 */
	if (((RowSecurityPolicy *) linitial(rowsec_policies))->policy_id
		== InvalidOid)
		defaultDeny = true;

	/* Now that we have our policies, build the expressions from them. */
	process_policies(root, rowsec_policies, rt_index, &rowsec_expr,
					 &rowsec_with_check_expr, hasSubLinks, OR_EXPR);

	/*
	 * Also, allow extensions to add their own policies.
	 *
	 * extensions can add either permissive or restrictive policies.
	 *
	 * Note that, as with the internal policies, if multiple policies are
	 * returned then they will be combined into a single expression with all
	 * of them OR'd (for permissive) or AND'd (for restrictive) together.
	 *
	 * If only a USING policy is returned by the extension then it will be
	 * used for WITH CHECK as well, similar to how internal policies are
	 * handled.
	 *
	 * The only caveat to this is that if there are NO internal policies
	 * defined, there ARE policies returned by the extension, and RLS is
	 * enabled on the table, then we will ignore the internally-generated
	 * default-deny policy and use only the policies returned by the
	 * extension.
	 */
	if (row_security_policy_hook_restrictive)
	{
		hook_policies_restrictive = (*row_security_policy_hook_restrictive) (commandType, rel);

		/* Build the expression from any policies returned. */
		if (hook_policies_restrictive != NIL)
			process_policies(root, hook_policies_restrictive, rt_index,
							 &hook_expr_restrictive,
							 &hook_with_check_expr_restrictive,
							 hasSubLinks,
							 AND_EXPR);
	}

	if (row_security_policy_hook_permissive)
	{
		hook_policies_permissive = (*row_security_policy_hook_permissive) (commandType, rel);

		/* Build the expression from any policies returned. */
		if (hook_policies_permissive != NIL)
			process_policies(root, hook_policies_permissive, rt_index,
							 &hook_expr_permissive,
							 &hook_with_check_expr_permissive, hasSubLinks,
							 OR_EXPR);
	}

	/*
	 * If the only built-in policy is the default-deny one, and hook policies
	 * exist, then use the hook policies only and do not apply the
	 * default-deny policy.  Otherwise, we will apply both sets below.
	 */
	if (defaultDeny &&
		(hook_policies_restrictive != NIL || hook_policies_permissive != NIL))
	{
		rowsec_expr = NULL;
		rowsec_with_check_expr = NULL;
	}

	/*
	 * For INSERT or UPDATE, we need to add the WITH CHECK quals to Query's
	 * withCheckOptions to verify that any new records pass the WITH CHECK
	 * policy (this will be a copy of the USING policy, if no explicit WITH
	 * CHECK policy exists).
	 */
	if (commandType == CMD_INSERT || commandType == CMD_UPDATE)
	{
		/*
		 * WITH CHECK OPTIONS wants a WCO node which wraps each Expr, so
		 * create them as necessary.
		 */

		/*
		 * Handle any restrictive policies first.
		 *
		 * They can simply be added.
		 */
		if (hook_with_check_expr_restrictive)
		{
			WithCheckOption *wco;

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
				WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) hook_with_check_expr_restrictive;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}

		/*
		 * Handle built-in policies, if there are no permissive policies from
		 * the hook.
		 */
		if (rowsec_with_check_expr && !hook_with_check_expr_permissive)
		{
			WithCheckOption *wco;

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
				WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) rowsec_with_check_expr;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}
		/* Handle the hook policies, if there are no built-in ones. */
		else if (!rowsec_with_check_expr && hook_with_check_expr_permissive)
		{
			WithCheckOption *wco;

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
				WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) hook_with_check_expr_permissive;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}
		/* Handle the case where there are both. */
		else if (rowsec_with_check_expr && hook_with_check_expr_permissive)
		{
			WithCheckOption *wco;
			List	   *combined_quals = NIL;
			Expr	   *combined_qual_eval;

			combined_quals = lcons(copyObject(rowsec_with_check_expr),
								   combined_quals);

			combined_quals = lcons(copyObject(hook_with_check_expr_permissive),
								   combined_quals);

			combined_qual_eval = makeBoolExpr(OR_EXPR, combined_quals, -1);

			wco = (WithCheckOption *) makeNode(WithCheckOption);
			wco->kind = commandType == CMD_INSERT ? WCO_RLS_INSERT_CHECK :
				WCO_RLS_UPDATE_CHECK;
			wco->relname = pstrdup(RelationGetRelationName(rel));
			wco->qual = (Node *) combined_qual_eval;
			wco->cascaded = false;
			*withCheckOptions = lappend(*withCheckOptions, wco);
		}

		/*
		 * ON CONFLICT DO UPDATE has an RTE that is subject to both INSERT and
		 * UPDATE RLS enforcement.  Those are enforced (as a special, distinct
		 * kind of WCO) on the target tuple.
		 *
		 * Make a second, recursive pass over the RTE for this, gathering
		 * UPDATE-applicable RLS checks/WCOs, and gathering and converting
		 * UPDATE-applicable security quals into WCO_RLS_CONFLICT_CHECK RLS
		 * checks/WCOs.  Finally, these distinct kinds of RLS checks/WCOs are
		 * concatenated with our own INSERT-applicable list.
		 */
		if (root->onConflict && root->onConflict->action == ONCONFLICT_UPDATE &&
			commandType == CMD_INSERT)
		{
			List	   *conflictSecurityQuals = NIL;
			List	   *conflictWCOs = NIL;
			ListCell   *item;
			bool		conflictHasRowSecurity = false;
			bool		conflictHasSublinks = false;

			/* Assume that RTE is target resultRelation */
			get_row_security_policies(root, CMD_UPDATE, rte, rt_index,
									  &conflictSecurityQuals, &conflictWCOs,
									  &conflictHasRowSecurity,
									  &conflictHasSublinks);

			if (conflictHasRowSecurity)
				*hasRowSecurity = true;
			if (conflictHasSublinks)
				*hasSubLinks = true;

			/*
			 * Append WITH CHECK OPTIONs/RLS checks, which should not conflict
			 * between this INSERT and the auxiliary UPDATE
			 */
			*withCheckOptions = list_concat(*withCheckOptions,
											conflictWCOs);

			foreach(item, conflictSecurityQuals)
			{
				Expr	   *conflict_rowsec_expr = (Expr *) lfirst(item);
				WithCheckOption *wco;

				wco = (WithCheckOption *) makeNode(WithCheckOption);

				wco->kind = WCO_RLS_CONFLICT_CHECK;
				wco->relname = pstrdup(RelationGetRelationName(rel));
				wco->qual = (Node *) copyObject(conflict_rowsec_expr);
				wco->cascaded = false;
				*withCheckOptions = lappend(*withCheckOptions, wco);
			}
		}
Esempio n. 27
0
/*
 * Traverse the tree to find path from root page to specified "child" block.
 *
 * returns a new insertion stack, starting from the parent of "child", up
 * to the root. *downlinkoffnum is set to the offset of the downlink in the
 * direct parent of child.
 *
 * To prevent deadlocks, this should lock only one page at a time.
 */
static GISTInsertStack *
gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
{
	Page		page;
	Buffer		buffer;
	OffsetNumber i,
				maxoff;
	ItemId		iid;
	IndexTuple	idxtuple;
	List	   *fifo;
	GISTInsertStack *top,
			   *ptr;
	BlockNumber blkno;

	top = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
	top->blkno = GIST_ROOT_BLKNO;
	top->downlinkoffnum = InvalidOffsetNumber;

	fifo = list_make1(top);
	while (fifo != NIL)
	{
		/* Get next page to visit */
		top = linitial(fifo);
		fifo = list_delete_first(fifo);

		buffer = ReadBuffer(r, top->blkno);
		LockBuffer(buffer, GIST_SHARE);
		gistcheckpage(r, buffer);
		page = (Page) BufferGetPage(buffer);

		if (GistPageIsLeaf(page))
		{
			/*
			 * Because we scan the index top-down, all the rest of the pages
			 * in the queue must be leaf pages as well.
			 */
			UnlockReleaseBuffer(buffer);
			break;
		}

		top->lsn = PageGetLSN(page);

		/*
		 * If F_FOLLOW_RIGHT is set, the page to the right doesn't have a
		 * downlink. This should not normally happen..
		 */
		if (GistFollowRight(page))
			elog(ERROR, "concurrent GiST page split was incomplete");

		if (top->parent && top->parent->lsn < GistPageGetNSN(page) &&
			GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ )
		{
			/*
			 * Page was split while we looked elsewhere. We didn't see the
			 * downlink to the right page when we scanned the parent, so add
			 * it to the queue now.
			 *
			 * Put the right page ahead of the queue, so that we visit it
			 * next. That's important, because if this is the lowest internal
			 * level, just above leaves, we might already have queued up some
			 * leaf pages, and we assume that there can't be any non-leaf
			 * pages behind leaf pages.
			 */
			ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
			ptr->blkno = GistPageGetOpaque(page)->rightlink;
			ptr->downlinkoffnum = InvalidOffsetNumber;
			ptr->parent = top->parent;

			fifo = lcons(ptr, fifo);
		}

		maxoff = PageGetMaxOffsetNumber(page);

		for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
		{
			iid = PageGetItemId(page, i);
			idxtuple = (IndexTuple) PageGetItem(page, iid);
			blkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
			if (blkno == child)
			{
				/* Found it! */
				UnlockReleaseBuffer(buffer);
				*downlinkoffnum = i;
				return top;
			}
			else
			{
				/* Append this child to the list of pages to visit later */
				ptr = (GISTInsertStack *) palloc0(sizeof(GISTInsertStack));
				ptr->blkno = blkno;
				ptr->downlinkoffnum = i;
				ptr->parent = top;

				fifo = lappend(fifo, ptr);
			}
		}

		UnlockReleaseBuffer(buffer);
	}

	elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
		 RelationGetRelationName(r), child);
	return NULL;				/* keep compiler quiet */
}
Esempio n. 28
0
static lref_t execute_fast_op(lref_t fop, lref_t env)
{
     lref_t retval = NIL;
     lref_t sym;
     lref_t binding;
     lref_t fn;
     lref_t args;
     size_t argc;
     lref_t argv[ARG_BUF_LEN];
     lref_t after;
     lref_t tag;
     lref_t cell;
     lref_t escape_retval;
     jmp_buf *jmpbuf;

     STACK_CHECK(&fop);
     _process_interrupts();

     fstack_enter_eval_frame(&fop, fop, env);

     while(!NULLP(fop)) {
          switch(fop->header.opcode)
          {
          case FOP_LITERAL:
               retval = fop->as.fast_op.arg1;
               fop = fop->as.fast_op.next;
               break;

          case FOP_GLOBAL_REF:
               sym = fop->as.fast_op.arg1;
               binding = SYMBOL_VCELL(sym);

               if (UNBOUND_MARKER_P(binding))
                    vmerror_unbound(sym);

               retval = binding;

               fop = fop->as.fast_op.next;
               break;

          case FOP_GLOBAL_SET:
               sym = fop->as.fast_op.arg1;
               binding = SYMBOL_VCELL(sym);

               if (UNBOUND_MARKER_P(binding))
                    vmerror_unbound(sym);

               SET_SYMBOL_VCELL(sym, retval);

               fop = fop->as.fast_op.next;
               break;

          case FOP_APPLY_GLOBAL:
               sym = fop->as.fast_op.arg1;
               fn = SYMBOL_VCELL(sym);

               if (UNBOUND_MARKER_P(fn))
                    vmerror_unbound(sym);

               argc = 0;
               args = fop->as.fast_op.arg2;

               while (CONSP(args)) {
                    if (argc >= ARG_BUF_LEN) {
                         vmerror_unsupported(_T("too many actual arguments"));
                         break;
                    }

                    argv[argc] = execute_fast_op(CAR(args), env);

                    args = CDR(args);
                    argc++;
               }

               if (!NULLP(args))
                    vmerror_arg_out_of_range(fop->as.fast_op.arg2,
                                             _T("bad formal argument list"));

               fop = apply(fn, argc, argv, &env, &retval);
               break;

          case FOP_APPLY:
               argc = 0;
               fn = execute_fast_op(fop->as.fast_op.arg1, env);
               args = fop->as.fast_op.arg2;

               while (CONSP(args)) {
                    if (argc >= ARG_BUF_LEN) {
                         vmerror_unsupported(_T("too many actual arguments"));
                         break;
                    }

                    argv[argc] = execute_fast_op(CAR(args), env);

                    args = CDR(args);
                    argc++;
               }

               if (!NULLP(args))
                    vmerror_arg_out_of_range(fop->as.fast_op.arg2,
                                             _T("bad formal argument list"));

               fop = apply(fn, argc, argv, &env, &retval);
               break;

          case FOP_IF_TRUE:
               if (TRUEP(retval))
                    fop = fop->as.fast_op.arg1;
               else
                    fop = fop->as.fast_op.arg2;
               break;

          case FOP_RETVAL:
               fop = fop->as.fast_op.next;
               break;

          case FOP_SEQUENCE:
               retval = execute_fast_op(fop->as.fast_op.arg1, env);

               fop = fop->as.fast_op.arg2;
               break;

          case FOP_THROW:
               tag = execute_fast_op(fop->as.fast_op.arg1, env);
               escape_retval = execute_fast_op(fop->as.fast_op.arg2, env);

               dscwritef(DF_SHOW_THROWS, (_T("; DEBUG: throw ~a, retval = ~a\n"), tag, escape_retval));

               CURRENT_TIB()->escape_frame = find_matching_escape(CURRENT_TIB()->frame, tag);
               CURRENT_TIB()->escape_value = escape_retval;

               if (CURRENT_TIB()->escape_frame == NULL) {
                    /* If we don't find a matching catch for the throw, we have a
                     * problem and need to invoke a trap. */
                    vmtrap(TRAP_UNCAUGHT_THROW,
                           (enum vmt_options_t)(VMT_MANDATORY_TRAP | VMT_HANDLER_MUST_ESCAPE),
                           2, tag, escape_retval);
               }

               unwind_stack_for_throw();

               fop = fop->as.fast_op.next;
               break;

          case FOP_CATCH:
               tag = execute_fast_op(fop->as.fast_op.arg1, env);

               jmpbuf = fstack_enter_catch_frame(tag, CURRENT_TIB()->frame);

               dscwritef(DF_SHOW_THROWS, (_T("; DEBUG: setjmp tag: ~a, frame: ~c&, jmpbuf: ~c&\n"), tag, CURRENT_TIB()->frame, jmpbuf));

               if (setjmp(*jmpbuf) == 0) {
                    retval = execute_fast_op(fop->as.fast_op.arg2, env);
               } else {
                    dscwritef(DF_SHOW_THROWS, (_T("; DEBUG: catch, retval = ~a\n"), CURRENT_TIB()->escape_value));

                    retval = CURRENT_TIB()->escape_value;
                    CURRENT_TIB()->escape_value = NIL;
               }

               fstack_leave_frame();

               fop = fop->as.fast_op.next;
               break;

          case FOP_WITH_UNWIND_FN:
               fstack_enter_unwind_frame(execute_fast_op(fop->as.fast_op.arg1, env));

               retval = execute_fast_op(fop->as.fast_op.arg2, env);

               after = CURRENT_TIB()->frame[FOFS_UNWIND_AFTER];

               fstack_leave_frame();

               apply1(after, 0, NULL);

               fop = fop->as.fast_op.next;
               break;

          case FOP_CLOSURE:
               retval = lclosurecons(env,
                                     lcons(lcar(fop->as.fast_op.arg1),
                                           fop->as.fast_op.arg2),
                                     lcdr(fop->as.fast_op.arg1));
               fop = fop->as.fast_op.next;
               break;

          case FOP_CAR:
               retval = lcar(retval);
               fop = fop->as.fast_op.next;
               break;

          case FOP_CDR:
               retval = lcdr(retval);
               fop = fop->as.fast_op.next;
               break;

          case FOP_NOT:
               retval = boolcons(!TRUEP(retval));
               fop = fop->as.fast_op.next;
               break;

          case FOP_NULLP:
               retval = boolcons(NULLP(retval));
               fop = fop->as.fast_op.next;
               break;

          case FOP_EQP:
               retval = boolcons(EQ(execute_fast_op(fop->as.fast_op.arg1, env),
                                    execute_fast_op(fop->as.fast_op.arg2, env)));
               fop = fop->as.fast_op.next;
               break;

          case FOP_GET_ENV:
               retval = env;
               fop = fop->as.fast_op.next;
               break;

          case FOP_GLOBAL_DEF: // three args, third was genv, but currently unused
               retval = lidefine_global(fop->as.fast_op.arg1, fop->as.fast_op.arg2);
               fop = fop->as.fast_op.next;
               break;

          case FOP_GET_FSP:
               retval = fixcons((fixnum_t)CURRENT_TIB()->fsp);
               fop = fop->as.fast_op.next;
               break;

          case FOP_GET_FRAME:
               retval = fixcons((fixnum_t)CURRENT_TIB()->frame);
               fop = fop->as.fast_op.next;
               break;

          case FOP_GET_HFRAMES:
               retval = CURRENT_TIB()->handler_frames;
               fop = fop->as.fast_op.next;
               break;

          case FOP_SET_HFRAMES:
               CURRENT_TIB()->handler_frames = execute_fast_op(fop->as.fast_op.arg1, env);
               fop = fop->as.fast_op.next;
               break;

          case FOP_GLOBAL_PRESERVE_FRAME:
               sym = fop->as.fast_op.arg1;
               binding = SYMBOL_VCELL(sym);

               if (UNBOUND_MARKER_P(binding))
                    vmerror_unbound(sym);

               SET_SYMBOL_VCELL(sym, fixcons((fixnum_t)CURRENT_TIB()->frame));

               retval = execute_fast_op(fop->as.fast_op.arg2, env);
               fop = fop->as.fast_op.next;
               break;

          case FOP_STACK_BOUNDARY:
               sym = execute_fast_op(fop->as.fast_op.arg1, env);

               fstack_enter_boundary_frame(sym);

               retval = execute_fast_op(fop->as.fast_op.arg2, env);

               fstack_leave_frame();

               fop = fop->as.fast_op.next;
               break;

          case FOP_FAST_ENQUEUE_CELL:
               retval = execute_fast_op(fop->as.fast_op.arg2, env);

               cell = execute_fast_op(fop->as.fast_op.arg1, env);

               SET_CDR(CAR(retval), cell);
               SET_CAR(retval, cell);

               fop = fop->as.fast_op.next;
               break;

          case FOP_WHILE_TRUE:
               while(TRUEP(execute_fast_op(fop->as.fast_op.arg1, env))) {
                    retval = execute_fast_op(fop->as.fast_op.arg2, env);
               }
               fop = fop->as.fast_op.next;
               break;

          case FOP_LOCAL_REF_BY_INDEX:
               retval = lenvlookup_by_index(FIXNM(fop->as.fast_op.arg1),
                                            FIXNM(fop->as.fast_op.arg2),
                                            env);
               fop = fop->as.fast_op.next;
               break;

          case FOP_LOCAL_REF_RESTARG:
               retval = lenvlookup_restarg_by_index(FIXNM(fop->as.fast_op.arg1),
                                                    FIXNM(fop->as.fast_op.arg2),
                                                    env);
               fop = fop->as.fast_op.next;
               break;

          case FOP_LOCAL_SET_BY_INDEX:
               lenvlookup_set_by_index(FIXNM(fop->as.fast_op.arg1),
                                       FIXNM(fop->as.fast_op.arg2),
                                       env,
                                       retval);
               fop = fop->as.fast_op.next;
               break;

          default:
               panic("Unsupported fast-op");
          }
     }

     fstack_leave_frame();

     return retval;
}
Esempio n. 29
0
/*
 * sort_inner_and_outer
 *	  Create mergejoin join paths by explicitly sorting both the outer and
 *	  inner join relations on each available merge ordering.
 *
 * 'joinrel' is the join relation
 * 'outerrel' is the outer join relation
 * 'innerrel' is the inner join relation
 * 'restrictlist' contains all of the RestrictInfo nodes for restriction
 *		clauses that apply to this join
 * 'mergeclause_list' is a list of RestrictInfo nodes for available
 *		mergejoin clauses in this join
 * 'jointype' is the type of join to do
 */
static void
sort_inner_and_outer(PlannerInfo *root,
					 RelOptInfo *joinrel,
					 RelOptInfo *outerrel,
					 RelOptInfo *innerrel,
					 List *restrictlist,
					 List *mergeclause_list,
					 JoinType jointype)
{
	bool		useallclauses;
	Path	   *outer_path;
	Path	   *inner_path;
	List	   *all_pathkeys;
	ListCell   *l;

	/*
	 * If we are doing a right or full join, we must use *all* the
	 * mergeclauses as join clauses, else we will not have a valid plan.
	 */
	switch (jointype)
	{
		case JOIN_INNER:
		case JOIN_LEFT:
		case JOIN_IN:
		case JOIN_UNIQUE_OUTER:
		case JOIN_UNIQUE_INNER:
			useallclauses = false;
			break;
		case JOIN_RIGHT:
		case JOIN_FULL:
			useallclauses = true;
			break;
		default:
			elog(ERROR, "unrecognized join type: %d",
				 (int) jointype);
			useallclauses = false;		/* keep compiler quiet */
			break;
	}

	/*
	 * We only consider the cheapest-total-cost input paths, since we are
	 * assuming here that a sort is required.  We will consider
	 * cheapest-startup-cost input paths later, and only if they don't need a
	 * sort.
	 *
	 * If unique-ification is requested, do it and then handle as a plain
	 * inner join.
	 */
	outer_path = outerrel->cheapest_total_path;
	inner_path = innerrel->cheapest_total_path;
	if (jointype == JOIN_UNIQUE_OUTER)
	{
		outer_path = (Path *) create_unique_path(root, outerrel, outer_path);
		jointype = JOIN_INNER;
	}
	else if (jointype == JOIN_UNIQUE_INNER)
	{
		inner_path = (Path *) create_unique_path(root, innerrel, inner_path);
		jointype = JOIN_INNER;
	}

	/*
	 * Each possible ordering of the available mergejoin clauses will generate
	 * a differently-sorted result path at essentially the same cost.  We have
	 * no basis for choosing one over another at this level of joining, but
	 * some sort orders may be more useful than others for higher-level
	 * mergejoins, so it's worth considering multiple orderings.
	 *
	 * Actually, it's not quite true that every mergeclause ordering will
	 * generate a different path order, because some of the clauses may be
	 * partially redundant (refer to the same EquivalenceClasses).	Therefore,
	 * what we do is convert the mergeclause list to a list of canonical
	 * pathkeys, and then consider different orderings of the pathkeys.
	 *
	 * Generating a path for *every* permutation of the pathkeys doesn't seem
	 * like a winning strategy; the cost in planning time is too high. For
	 * now, we generate one path for each pathkey, listing that pathkey first
	 * and the rest in random order.  This should allow at least a one-clause
	 * mergejoin without re-sorting against any other possible mergejoin
	 * partner path.  But if we've not guessed the right ordering of secondary
	 * keys, we may end up evaluating clauses as qpquals when they could have
	 * been done as mergeclauses.  (In practice, it's rare that there's more
	 * than two or three mergeclauses, so expending a huge amount of thought
	 * on that is probably not worth it.)
	 *
	 * The pathkey order returned by select_outer_pathkeys_for_merge() has
	 * some heuristics behind it (see that function), so be sure to try it
	 * exactly as-is as well as making variants.
	 */
	all_pathkeys = select_outer_pathkeys_for_merge(root,
												   mergeclause_list,
												   joinrel);

	foreach(l, all_pathkeys)
	{
		List	   *front_pathkey = (List *) lfirst(l);
		List	   *cur_mergeclauses;
		List	   *outerkeys;
		List	   *innerkeys;
		List	   *merge_pathkeys;

		/* Make a pathkey list with this guy first */
		if (l != list_head(all_pathkeys))
			outerkeys = lcons(front_pathkey,
							  list_delete_ptr(list_copy(all_pathkeys),
											  front_pathkey));
		else
			outerkeys = all_pathkeys;	/* no work at first one... */

		/* Sort the mergeclauses into the corresponding ordering */
		cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
														  outerkeys,
														  true,
														  mergeclause_list);

		/* Should have used them all... */
		Assert(list_length(cur_mergeclauses) == list_length(mergeclause_list));

		/* Build sort pathkeys for the inner side */
		innerkeys = make_inner_pathkeys_for_merge(root,
												  cur_mergeclauses,
												  outerkeys);

		/* Build pathkeys representing output sort order */
		merge_pathkeys = build_join_pathkeys(root, joinrel, jointype,
											 outerkeys);

		/*
		 * And now we can make the path.
		 *
		 * Note: it's possible that the cheapest paths will already be sorted
		 * properly.  create_mergejoin_path will detect that case and suppress
		 * an explicit sort step, so we needn't do so here.
		 */
		add_path(joinrel, (Path *)
				 create_mergejoin_path(root,
									   joinrel,
									   jointype,
									   outer_path,
									   inner_path,
									   restrictlist,
									   merge_pathkeys,
									   cur_mergeclauses,
									   outerkeys,
									   innerkeys));
	}
Esempio n. 30
0
/*
 * sepgsql_avc_compute
 *
 * A fallback path, when cache mishit. It asks SELinux its access control
 * decision for the supplied pair of security context and object class.
 */
static avc_cache *
sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
{
	char	   *ucontext = NULL;
	char	   *ncontext = NULL;
	MemoryContext oldctx;
	avc_cache  *cache;
	uint32		hash;
	int			index;
	struct av_decision avd;

	hash = sepgsql_avc_hash(scontext, tcontext, tclass);
	index = hash % AVC_NUM_SLOTS;

	/*
	 * Validation check of the supplied security context. Because it always
	 * invoke system-call, frequent check should be avoided. Unless security
	 * policy is reloaded, validation status shall be kept, so we also cache
	 * whether the supplied security context was valid, or not.
	 */
	if (security_check_context_raw((security_context_t) tcontext) != 0)
		ucontext = sepgsql_avc_unlabeled();

	/*
	 * Ask SELinux its access control decision
	 */
	if (!ucontext)
		sepgsql_compute_avd(scontext, tcontext, tclass, &avd);
	else
		sepgsql_compute_avd(scontext, ucontext, tclass, &avd);

	/*
	 * It also caches a security label to be switched when a client labeled as
	 * 'scontext' executes a procedure labeled as 'tcontext', not only access
	 * control decision on the procedure. The security label to be switched
	 * shall be computed uniquely on a pair of 'scontext' and 'tcontext',
	 * thus, it is reasonable to cache the new label on avc, and enables to
	 * reduce unnecessary system calls. It shall be referenced at
	 * sepgsql_needs_fmgr_hook to check whether the supplied function is a
	 * trusted procedure, or not.
	 */
	if (tclass == SEPG_CLASS_DB_PROCEDURE)
	{
		if (!ucontext)
			ncontext = sepgsql_compute_create(scontext, tcontext,
											  SEPG_CLASS_PROCESS, NULL);
		else
			ncontext = sepgsql_compute_create(scontext, ucontext,
											  SEPG_CLASS_PROCESS, NULL);
		if (strcmp(scontext, ncontext) == 0)
		{
			pfree(ncontext);
			ncontext = NULL;
		}
	}

	/*
	 * Set up an avc_cache object
	 */
	oldctx = MemoryContextSwitchTo(avc_mem_cxt);

	cache = palloc0(sizeof(avc_cache));

	cache->hash = hash;
	cache->scontext = pstrdup(scontext);
	cache->tcontext = pstrdup(tcontext);
	cache->tclass = tclass;

	cache->allowed = avd.allowed;
	cache->auditallow = avd.auditallow;
	cache->auditdeny = avd.auditdeny;
	cache->hot_cache = true;
	if (avd.flags & SELINUX_AVD_FLAGS_PERMISSIVE)
		cache->permissive = true;
	if (!ucontext)
		cache->tcontext_is_valid = true;
	if (ncontext)
		cache->ncontext = pstrdup(ncontext);

	avc_num_caches++;

	if (avc_num_caches > avc_threshold)
		sepgsql_avc_reclaim();

	avc_slots[index] = lcons(cache, avc_slots[index]);

	MemoryContextSwitchTo(oldctx);

	return cache;
}