Esempio n. 1
0
/* ----------------------------------------------------------------
 *		ForeignNext
 *
 *		This is a workhorse for ExecForeignScan
 * ----------------------------------------------------------------
 */
static struct tupslot *
ForeignNext(foreign_ss *node)
{
	struct tupslot *slot;
	foreign_scan_sc *plan = (foreign_scan_sc *) node->ss.ps.plan;
	expr_ctx_n *econtext = node->ss.ps.ps_ExprContext;
	struct mctx * oldcontext;

	/* Call the Iterate function in short-lived context */
	oldcontext = mctx_switch(econtext->ecxt_per_tuple_memory);
	slot = node->fdwroutine->IterateForeignScan(node);
	mctx_switch(oldcontext);

	/*
	 * If any system columns are requested, we have to force the tuple into
	 * physical-tuple form to avoid "cannot extract system attribute from
	 * virtual tuple" errors later.  We also insert a valid value for
	 * tableoid, which is the only actually-useful system column.
	 */
	if (plan->fsSystemCol
		&& !TUPSLOT_NULL(slot)) {
		struct heap_tuple* tup;

		tup = exec_materialize_slot(slot);
		tup->t_tableOid = REL_ID(node->ss.ss_currentRelation);
	}

	return slot;
}
Esempio n. 2
0
/* --------------------------------
 *		exec_materialize_slot
 *			Force a slot into the "materialized" state.
 *
 *		This causes the slot's tuple to be a local copy not dependent on
 *		any external storage.  A pointer to the contained tuple is returned.
 *
 *		A typical use for this operation is to prepare a computed tuple
 *		for being stored on disk.  The original data may or may not be
 *		virtual, but in any case we need a private copy for heap_insert
 *		to scribble on.
 * --------------------------------
 */
struct heap_tuple*
exec_materialize_slot(struct tupslot *slot)
{
	struct mctx* oldContext;

	/*
	 * sanity checks
	 */
	ASSERT(slot != NULL);
	ASSERT(!slot->tts_isempty);

	/*
	 * If we have a regular physical tuple, and it's locally palloc'd, we have
	 * nothing to do.
	 */
	if (slot->tts_tuple && slot->tts_shouldFree)
		return slot->tts_tuple;

	/*
	 * Otherwise, copy or build a physical tuple, and store it into the slot.
	 *
	 * We may be called in a context that is shorter-lived than the tuple
	 * slot, but we have to ensure that the materialized tuple will survive
	 * anyway.
	 */
	oldContext = mctx_switch(slot->tts_mcxt);
	slot->tts_tuple = exec_copy_slot_tuple(slot);
	slot->tts_shouldFree = true;
	mctx_switch(oldContext);

	/*
	 * Drop the pin on the referenced buffer, if there is one.
	 */
	if (BUF_VALID(slot->tts_buffer))
		release_buf(slot->tts_buffer);

	slot->tts_buffer = INVALID_BUF;

	/*
	 * Mark extracted state invalid.  This is important because the slot is
	 * not supposed to depend any more on the previous external data; we
	 * mustn't leave any dangling pass-by-reference datums in tts_values.
	 * However, we have not actually invalidated any such datums, if there
	 * happen to be any previously fetched from the slot.  (Note in particular
	 * that we have not pfree'd tts_mintuple, if there is one.)
	 */
	slot->tts_nvalid = 0;

	/*
	 * On the same principle of not depending on previous remote storage,
	 * forget the mintuple if it's not local storage.  (If it is local
	 * storage, we must not pfree it now, since callers might have already
	 * fetched datum pointers referencing it.)
	 */
	if (!slot->tts_shouldFreeMin)
		slot->tts_mintuple = NULL;

	return slot->tts_tuple;
}
Esempio n. 3
0
/*
 * connectby - does the real work for connectby_text()
 */
static struct tupstore *
connectby(char *relname,
		  char *key_fld,
		  char *parent_key_fld,
		  char *orderby_fld,
		  char *branch_delim,
		  char *start_with,
		  int max_depth,
		  bool show_branch,
		  bool show_serial,
		  struct mctx * per_query_ctx,
		  bool randomAccess,
		  AttInMetadata *attinmeta)
{
	struct tupstore *tupstore = NULL;
	int			ret;
	struct mctx * oldcontext;

	int			serial = 1;

	/* Connect to SPI manager */
	if ((ret = SPI_connect()) < 0)
		/* internal error */
		elog(ERROR, "connectby: SPI_connect returned %d", ret);

	/* switch to longer term context to create the tuple store */
	oldcontext = mctx_switch(per_query_ctx);

	/* initialize our tuplestore */
	tupstore = tts_begin_heap(randomAccess, false, work_mem);

	mctx_switch(oldcontext);

	/* now go get the whole tree */
	tupstore = build_tuplestore_recursively(key_fld,
											parent_key_fld,
											relname,
											orderby_fld,
											branch_delim,
											start_with,
											start_with, /* current_branch */
											0,	/* initial level is 0 */
											&serial,	/* initial serial is 1 */
											max_depth,
											show_branch,
											show_serial,
											per_query_ctx,
											attinmeta,
											tupstore);

	SPI_finish();

	return tupstore;
}
Esempio n. 4
0
/*
 * ExecIndexEvalRuntimeKeys
 *		Evaluate any runtime key values, and update the scankeys.
 */
void
ExecIndexEvalRuntimeKeys(
	expr_ctx_n *econtext,
	index_runtime_key_info_s *runtimeKeys,
	int numRuntimeKeys)
{
	int j;
	struct mctx* oldContext;

	/* We want to keep the key values in per-tuple memory */
	oldContext = mctx_switch(econtext->ecxt_per_tuple_memory);

	for (j = 0; j < numRuntimeKeys; j++) {
		struct scankey*	scan_key = runtimeKeys[j].scan_key;
		expr_state_n* key_expr = runtimeKeys[j].key_expr;
		datum_t	scanvalue;
		bool isNull;

		/*
		 * For each run-time key, extract the run-time expression and evaluate
		 * it with respect to the current context.	We then stick the result
		 * into the proper scan key.
		 *
		 * Note: the result of the eval could be a pass-by-ref value that's
		 * stored in some outer scan's tuple, not in
		 * econtext->ecxt_per_tuple_memory.  We assume that the outer tuple
		 * will stay put throughout our scan.  If this is wrong, we could copy
		 * the result into our context explicitly, but I think that's not
		 * necessary.
		 *
		 * It's also entirely possible that the result of the eval is a
		 * toasted value.  In this case we should forcibly detoast it, to
		 * avoid repeat detoastings each time the value is examined by an
		 * index support function.
		 */
		scanvalue = EXEC_EVAL_EXPR(key_expr, econtext, &isNull, NULL);
		if (isNull) {
			scan_key->sk_argument = scanvalue;
			scan_key->sk_flags |= SK_ISNULL;
		} else {
			if (runtimeKeys[j].key_toastable)
				scanvalue = PTR_TO_D(PG_DETOAST_DATUM(scanvalue));

			scan_key->sk_argument = scanvalue;
			scan_key->sk_flags &= ~SK_ISNULL;
		}
	}

	mctx_switch(oldContext);
}
Esempio n. 5
0
File: worker.c Progetto: jsyk/lpel
/**
 * Worker loop
 */
static void WorkerLoop( workerctx_t *wc)
{
  lpel_task_t *t = NULL;

  do {
    /* before executing a task, handle all pending requests! */
    LpelSpmdHandleRequests(wc->wid);

    t = LpelSchedFetchReady( wc->sched);
    if (t != NULL) {
      /* execute task */
      wc->current_task = t;
      mctx_switch(&wc->mctx, &t->mctx);

      WORKER_DBGMSG(wc, "Back on worker %d context.\n", wc->wid);

      /* check if there are any contexts marked for deletion. */
      LpelCollectTask(wc, NULL);

    } else {
      /* no ready tasks */
      WaitForNewMessage( wc);
    }
    /* fetch (remaining) messages */
    FetchAllMessages( wc);
  } while ( !( 0==wc->num_tasks && wc->terminate) );

}
Esempio n. 6
0
/* --------------------------------
 *		exec_copy_slot
 *			Copy the source slot's contents into the destination slot.
 *
 *		The destination acquires a private copy that will not go away
 *		if the source is cleared.
 *
 *		The caller must ensure the slots have compatible tupdescs.
 * --------------------------------
 */
struct tupslot*
exec_copy_slot(struct tupslot* dstslot, struct tupslot* srcslot)
{
	struct heap_tuple* newTuple;
	struct mctx * oldContext;

	/*
	 * There might be ways to optimize this when the source is virtual, but
	 * for now just always build a physical copy.  Make sure it is in the
	 * right context.
	 */
	oldContext = mctx_switch(dstslot->tts_mcxt);
	newTuple = exec_copy_slot_tuple(srcslot);
	mctx_switch(oldContext);

	return exec_store_tuple(newTuple, dstslot, INVALID_BUF, true);
}
Esempio n. 7
0
static void WrapperLoop(workerctx_t *wp)
{
	lpel_task_t *t = NULL;
	workermsg_t msg;

	do {
		t = wp->current_task;
		if (t != NULL) {
			/* execute task */
			mctx_switch(&wp->mctx, &t->mctx);
		} else {
			/* no ready tasks */
			LpelMailboxRecv(wp->mailbox, &msg);
			switch(msg.type) {
			case WORKER_MSG_ASSIGN:
				t = msg.body.task;
				WORKER_DBG("wrapper: get task %d\n", t->uid);
				assert(t->state == TASK_CREATED);
				t->state = TASK_READY;
				wp->current_task = t;
#ifdef USE_LOGGING
				if (t->mon) {
					if (MON_CB(worker_create_wrapper)) {
						wp->mon = MON_CB(worker_create_wrapper)(t->mon);
					} else {
						wp->mon = NULL;
					}
				}
				if (t->mon && MON_CB(task_assign)) {
					MON_CB(task_assign)(t->mon, wp->mon);
				}
#endif
				break;

			case WORKER_MSG_WAKEUP:
				t = msg.body.task;
				WORKER_DBG("wrapper: unblock task %d\n", t->uid);
				assert (t->state == TASK_BLOCKED);
				t->state = TASK_READY;
				wp->current_task = t;
#ifdef USE_LOGGING
				if (t->mon && MON_CB(task_assign)) {
					MON_CB(task_assign)(t->mon, wp->mon);
				}
#endif
				break;
			default:
				assert(0);
				break;
			}
		}
	} while (!wp->terminate);
	LpelTaskDestroy(wp->current_task);
	/* cleanup task context marked for deletion */
}
Esempio n. 8
0
File: worker.c Progetto: jsyk/lpel
/**
 * Dispatch next ready task
 *
 * This dispatcher function is called upon blocking a task.
 * It executes on the same exec-stack as the task itself, calls the FetchReady
 * function from the scheduler module, and, if there is a ready task, makes
 * a continuation to that ready task. If no task is ready, execution returns to the
 * worker loop (wc->mctx). If the task runs on a wrapper, execution returns to the
 * wrapper loop in either case.
 * This way, in the optimal case, task switching only requires a single context switch
 * instead of two.
 *
 * @param t   the current task context
 */
void LpelWorkerDispatcher( lpel_task_t *t)
{
  workerctx_t *wc = t->worker_context;

  /* dependent of worker or wrapper */
  if (wc->wid != -1) {
    lpel_task_t *next;

    /* before picking the next task, process messages to consider
     * also newly arrived READY tasks
     */
    FetchAllMessages( wc);

    /* before executing a task, handle all pending requests! */
    LpelSpmdHandleRequests(wc->wid);

    next = LpelSchedFetchReady( wc->sched);
    if (next != NULL) {
      /* short circuit */
      if (next==t) { return; }

      /* execute task */
      wc->current_task = next;
      mctx_switch(&t->mctx, &next->mctx); /*SWITCH*/
    } else {
      /* no ready task! -> back to worker context */
      wc->current_task = NULL;
      mctx_switch(&t->mctx, &wc->mctx); /*SWITCH*/
    }
  } else {
    /* we are on a wrapper.
     * back to wrapper context
     */
    wc->current_task = NULL;
    mctx_switch(&t->mctx, &wc->mctx); /*SWITCH*/
    /* nothing to finalize on a wrapper */
  }
  /*********************************
   * ... CTX SWITCH ...
   *********************************/
  /* let task continue its business ... */
}
Esempio n. 9
0
/* ----------------
 *		create_expr_ctx
 *
 *		Create a context for expression evaluation within an EState.
 *
 * An executor run may require multiple ExprContexts (we usually make one
 * for each plan_n node, and a separate one for per-output-tuple processing
 * such as constraint checking).  Each expr_ctx_n has its own "per-tuple"
 * memory context.
 *
 * Note we make no assumption about the caller's memory context.
 * ----------------
 */
expr_ctx_n*
create_expr_ctx(exec_state_n *estate)
{
	expr_ctx_n *econtext;
	struct mctx * oldcontext;

	/* Create the expr_ctx_n node within the per-query memory context */
	oldcontext = mctx_switch(estate->es_query_cxt);

	econtext = MK_N(ExprContext,expr_ctx_n);

	/* Initialize fields of expr_ctx_n */
	econtext->ecxt_scantuple = NULL;
	econtext->ecxt_innertuple = NULL;
	econtext->ecxt_outertuple = NULL;
	econtext->ecxt_per_query_memory = estate->es_query_cxt;

	/*
	 * Create working memory for expression evaluation in this context.
	 */
	econtext->ecxt_per_tuple_memory = aset_create_normal(estate->es_query_cxt, "ExprContext");
	econtext->ecxt_param_exec_vals = estate->es_param_exec_vals;
	econtext->ecxt_param_list_info = estate->es_param_list_info;
	econtext->ecxt_aggvalues = NULL;
	econtext->ecxt_aggnulls = NULL;
	econtext->caseValue_datum = (datum_t) 0;
	econtext->caseValue_isNull = true;
	econtext->domainValue_datum = (datum_t) 0;
	econtext->domainValue_isNull = true;
	econtext->ecxt_estate = estate;
	econtext->ecxt_callbacks = NULL;

	/*
	 * Link the expr_ctx_n into the exec_state_n to ensure it is shut down when the
	 * exec_state_n is freed.  Because we use lcons(), shutdowns will occur in
	 * reverse order of creation, which may not be essential but can't hurt.
	 */
	estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
	mctx_switch(oldcontext);

	return econtext;
}
Esempio n. 10
0
/*
 * regexp_matches()
 *		Return a table of matches of a pattern within a string.
 */
datum_t regexp_matches(PG_FUNC_ARGS)
{
	struct fcall_ctx *funcctx;
	regexp_matches_ctx *matchctx;

	if (SRF_IS_FIRSTCALL()) {
		text *pattern = ARG_TEXT_PP(1);
		text *flags = ARG_TEXT_PP_IF_EXISTS(2);
		struct mctx * oldcontext;

		funcctx = SRF_FIRSTCALL_INIT();
		oldcontext = mctx_switch(funcctx->multi_call_memory_ctx);

		/* be sure to copy the input string into the multi-call ctx */
		matchctx = setup_regexp_matches(ARG_TEXT_P_COPY(0), pattern,
			flags, PG_COLLATION(), false, true, false);

		/* Pre-create workspace that build_regexp_matches_result needs */
		matchctx->elems = (datum_t *) palloc(sizeof(datum_t) * matchctx->npatterns);
		matchctx->nulls = (bool *) palloc(sizeof(bool) * matchctx->npatterns);

		mctx_switch(oldcontext);
		funcctx->user_fctx = (void *)matchctx;
	}

	funcctx = SRF_PERCALL_SETUP();
	matchctx = (regexp_matches_ctx *) funcctx->user_fctx;

	if (matchctx->next_match < matchctx->nmatches) {
		array_s *result_ary;

		result_ary = build_regexp_matches_result(matchctx);
		matchctx->next_match++;
		SRF_RETURN_NEXT(funcctx, PTR_TO_D(result_ary));
	}

	/* release space in multi-call ctx to avoid intraquery memory leak */
	cleanup_regexp_matches(matchctx);

	SRF_RETURN_DONE(funcctx);
}
Esempio n. 11
0
void LpelWorkerTaskBlock(lpel_task_t *t){
	workerctx_t *wc = t->worker_context;
	if (wc->wid < 0) {	//wrapper
			wc->current_task = NULL;
	} else {
		WORKER_DBG("worker %d: block task %d\n", wc->wid, t->uid);
		//sendUpdatePrior(t);		//update prior for neighbor
		requestTask(wc);
	}
	wc->current_task = NULL;
	mctx_switch(&t->mctx, &wc->mctx);		// switch back to the worker/wrapper
}
Esempio n. 12
0
void LpelWorkerTaskExit(lpel_task_t *t) {
	workerctx_t *wc = t->worker_context;
	WORKER_DBG("worker %d: task %d exit\n", wc->wid, t->uid);
	if (wc->wid >= 0) {
		requestTask(wc);	// FIXME: should have requested before
		wc->current_task = NULL;
	}
	else
		wc->terminate = 1;		// wrapper: terminate

	mctx_switch(&t->mctx, &wc->mctx);		// switch back to the worker
}
Esempio n. 13
0
void LpelWorkerTaskYield(lpel_task_t *t){
	workerctx_t *wc = t->worker_context;
	if (wc->wid < 0) {	//wrapper
			WORKER_DBG("wrapper: task %d yields\n", t->uid);
	}
	else {
		//sendUpdatePrior(t);		//update prior for neighbor
		requestTask(wc);
		WORKER_DBG("worker %d: return task %d\n", wc->wid, t->uid);
		wc->current_task = NULL;
	}
	mctx_switch(&t->mctx, &wc->mctx);		// switch back to the worker/wrapper
}
Esempio n. 14
0
/* --------------------------------
 *		exec_fetch_slot_min_tuple
 *			Fetch the slot's minimal physical tuple.
 *
 *		If the slot contains a virtual tuple, we convert it to minimal
 *		physical form.	The slot retains ownership of the minimal tuple.
 *		If it contains a regular tuple we convert to minimal form and store
 *		that in addition to the regular tuple (not instead of, because
 *		callers may hold pointers to Datums within the regular tuple).
 *
 * As above, the result must be treated as read-only.
 * --------------------------------
 */
struct min_tuple*
exec_fetch_slot_min_tuple(struct tupslot *slot)
{
	struct mctx* oldContext;

	/*
	 * sanity checks
	 */
	ASSERT(slot != NULL);
	ASSERT(!slot->tts_isempty);

	/*
	 * If we have a minimal physical tuple (local or not) then just return it.
	 */
	if (slot->tts_mintuple)
		return slot->tts_mintuple;

	/*
	 * Otherwise, copy or build a minimal tuple, and store it into the slot.
	 *
	 * We may be called in a context that is shorter-lived than the tuple
	 * slot, but we have to ensure that the materialized tuple will survive
	 * anyway.
	 */
	oldContext = mctx_switch(slot->tts_mcxt);
	slot->tts_mintuple = exec_copy_slot_mintup(slot);
	slot->tts_shouldFreeMin = true;
	mctx_switch(oldContext);

	/*
	 * Note: we may now have a situation where we have a local minimal tuple
	 * attached to a virtual or non-local physical tuple.  There seems no harm
	 * in that at the moment, but if any materializes, we should change this
	 * function to force the slot into minimal-tuple-only state.
	 */

	return slot->tts_mintuple;
}
Esempio n. 15
0
/*
 * regexp_split_to_table()
 *		Split the string at matches of the pattern, returning the
 *		split-out substrings as a table.
 */
datum_t regexp_split_to_table(PG_FUNC_ARGS)
{
	struct fcall_ctx *funcctx;
	regexp_matches_ctx *splitctx;

	if (SRF_IS_FIRSTCALL()) {
		text *pattern = ARG_TEXT_PP(1);
		text *flags = ARG_TEXT_PP_IF_EXISTS(2);
		struct mctx * oldcontext;

		funcctx = SRF_FIRSTCALL_INIT();
		oldcontext = mctx_switch(funcctx->multi_call_memory_ctx);

		/* be sure to copy the input string into the multi-call ctx */
		splitctx = setup_regexp_matches(ARG_TEXT_P_COPY(0), pattern,
			flags, PG_COLLATION(), true, false, true);

		mctx_switch(oldcontext);
		funcctx->user_fctx = (void*)splitctx;
	}

	funcctx = SRF_PERCALL_SETUP();
	splitctx = (regexp_matches_ctx*) funcctx->user_fctx;

	if (splitctx->next_match <= splitctx->nmatches) {
		datum_t result = build_regexp_split_result(splitctx);

		splitctx->next_match++;
		SRF_RETURN_NEXT(funcctx, result);
	}

	/* release space in multi-call ctx to avoid intraquery memory leak */
	cleanup_regexp_matches(splitctx);

	SRF_RETURN_DONE(funcctx);
}
Esempio n. 16
0
/*
 * executor_start hook: start up logging if needed
 */
static void
explain_executor_start(struct qry_desc *queryDesc, int eflags)
{
	if (auto_explain_enabled())
	{
		/* Enable per-node instrumentation iff log_analyze is required. */
		if (auto_explain_log_analyze && (eflags & EXEC_FLAG_EXPLAIN_ONLY) == 0)
		{
			queryDesc->instrument_options |= INSTRUMENT_TIMER;
			if (auto_explain_log_buffers)
				queryDesc->instrument_options |= INSTRUMENT_BUFFERS;
		}
	}

	if (prev_executor_start)
		prev_executor_start(queryDesc, eflags);
	else
		standard_executor_start(queryDesc, eflags);

	if (auto_explain_enabled())
	{
		/*
		 * Set up to track total elapsed time in executor_run.  Make sure the
		 * space is allocated in the per-query context so it will go away at
		 * executor_end.
		 */
		if (queryDesc->totaltime == NULL)
		{
			struct mctx * oldcxt;

			oldcxt = mctx_switch(queryDesc->estate->es_query_cxt);
			queryDesc->totaltime = instr_alloc(1, INSTRUMENT_ALL);
			mctx_switch(oldcxt);
		}
	}
}
Esempio n. 17
0
static void WorkerLoop(workerctx_t *wc)
{
	WORKER_DBG("start worker %d\n", wc->wid);

  lpel_task_t *t = NULL;
  requestTask(wc);		// ask for the first time

  workermsg_t msg;
  do {
  	  LpelMailboxRecv(wc->mailbox, &msg);

  	  switch(msg.type) {
  	  case WORKER_MSG_ASSIGN:
  	  	t = msg.body.task;
  	  	WORKER_DBG("worker %d: get task %d\n", wc->wid, t->uid);
  	  	assert(t->state == TASK_READY);
  	  	t->worker_context = wc;
  	  	wc->current_task = t;

#ifdef USE_LOGGING
  	  	if (wc->mon && MON_CB(worker_waitstop)) {
  	  		MON_CB(worker_waitstop)(wc->mon);
  	  	}
  	  	if (t->mon && MON_CB(task_assign)) {
  	  		MON_CB(task_assign)(t->mon, wc->mon);
  	  	}
#endif
  	  	mctx_switch(&wc->mctx, &t->mctx);
  	  	//task return here
  	  	assert(t->state != TASK_RUNNING);
//  	  	if (t->state != TASK_ZOMBIE) {
  	  	wc->current_task = NULL;
  	  		t->worker_context = NULL;
  	  		returnTask(t);
//  	  	} else
//  	  		LpelTaskDestroy(t);		// if task finish, destroy it and not return to master
  	  	break;
  	  case WORKER_MSG_TERMINATE:
  	  	wc->terminate = 1;
  	  	break;
  	  default:
  	  	assert(0);
  	  	break;
  	  }
  	  // reach here --> message request for task has been sent
  } while (!(wc->terminate) );
}
Esempio n. 18
0
File: worker.c Progetto: jsyk/lpel
static void WrapperLoop( workerctx_t *wc)
{
  lpel_task_t *t = NULL;

  do {
    t = wc->wraptask;
    if (t != NULL) {
      /* execute task */
      wc->current_task = t;
      wc->wraptask = NULL;
      mctx_switch(&wc->mctx, &t->mctx);

    } else {
      /* no ready tasks */
      WaitForNewMessage( wc);
    }
    /* fetch (remaining) messages */
    FetchAllMessages( wc);
  } while ( !wc->terminate);
}
Esempio n. 19
0
void switch_to_scheduler()
{
	th_queue_t* foundQueueTh;
	th_queue_t* queueSche;
	int tid = getpid();
	if((queueSche = find_thread_by_tid(sched_queueHead, tid)) == NULL)
	{
		fprintf(stderr, "Can't find the scheduler, kernel id = %d\n", getpid());
		abort();
	}
		
	if((foundQueueTh = find_thread_by_tid(ready_queueHead, queueSche->thread->current_tid)) == NULL)
	{
	
		fprintf(stderr, "Can't find the thread, thread id = %d\n", queueSche->thread->current_tid);
		abort();
	}
		
	//printf("%d is on duty, from %d\n", tid, foundQueueTh->thread->tid);fflush(stdout);

	mctx_switch(&(foundQueueTh->thread->mctx), &(queueSche->thread->mctx));
}
Esempio n. 20
0
datum_t
heap_page_items(PG_FUNC_ARGS)
{
	bytea	   *raw_page = ARG_BYTEA_P(0);
	heap_page_items_state *inter_call_data = NULL;
	struct fcall_ctx *fctx;
	int			raw_page_size;

	if (!superuser())
		ereport(ERROR,
				(errcode(E_INSUFFICIENT_PRIVILEGE),
				 (errmsg("must be superuser to use raw page functions"))));

	raw_page_size = VLA_SZ(raw_page) - VAR_HDR_SZ;

	if (SRF_IS_FIRSTCALL())
	{
		struct tuple *	tupdesc;
		struct mctx * mctx;

		if (raw_page_size < PAGE_HDR_SZ)
			ereport(ERROR,
					(errcode(E_INVALID_PARAMETER_VALUE),
				  errmsg("input page too small (%d bytes)", raw_page_size)));

		fctx = SRF_FIRSTCALL_INIT();
		mctx = mctx_switch(fctx->multi_call_memory_ctx);

		inter_call_data = palloc(sizeof(heap_page_items_state));

		/* Build a tuple descriptor for our result type */
		if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
			elog(ERROR, "return type must be a row type");

		inter_call_data->tupd = tupdesc;

		inter_call_data->offset = FIRST_ITEM_ID;
		inter_call_data->page = VLA_DATA(raw_page);

		fctx->max_calls = PAGE_MAX_ITEM_ID(inter_call_data->page);
		fctx->user_fctx = inter_call_data;

		mctx_switch(mctx);
	}

	fctx = SRF_PERCALL_SETUP();
	inter_call_data = fctx->user_fctx;

	if (fctx->call_cntr < fctx->max_calls)
	{
		page_p		page = inter_call_data->page;
		struct heap_tuple *	resultTuple;
		datum_t		result;
		struct item_id *		id;
		datum_t		values[13];
		bool		nulls[13];
		uint16		lp_offset;
		uint16		lp_flags;
		uint16		lp_len;

		memset(nulls, 0, sizeof(nulls));

		/* Extract information from the line pointer */

		id = PAGE_ITEM_ID(page, inter_call_data->offset);

		lp_offset = ITEMID_OFFSET(id);
		lp_flags = ITEMID_FLAGS(id);
		lp_len = ITEMID_LENGTH(id);

		values[0] = UINT16_TO_D(inter_call_data->offset);
		values[1] = UINT16_TO_D(lp_offset);
		values[2] = UINT16_TO_D(lp_flags);
		values[3] = UINT16_TO_D(lp_len);

		/*
		 * We do just enough validity checking to make sure we don't reference
		 * data outside the page passed to us. The page could be corrupt in
		 * many other ways, but at least we won't crash.
		 */
		if (ITEMID_HAS_STORAGE(id) &&
			lp_len >= sizeof(struct htup_header *) &&
			lp_offset == MAX_ALIGN(lp_offset) &&
			lp_offset + lp_len <= raw_page_size)
		{
			struct htup_header * tuphdr;
			int			bits_len;

			/* Extract information from the tuple header */

			tuphdr = (struct htup_header *) PAGE_GET_ITEM(page, id);

			values[4] = UINT32_TO_D(HTH_GET_XMIN(tuphdr));
			values[5] = UINT32_TO_D(HTH_GET_XMAX(tuphdr));
			values[6] = UINT32_TO_D(HTH_GET_RAW_CMDID(tuphdr)); /* shared with xvac */
			values[7] = PTR_TO_D(&tuphdr->t_ctid);
			values[8] = UINT32_TO_D(tuphdr->t_infomask2);
			values[9] = UINT32_TO_D(tuphdr->t_infomask);
			values[10] = UINT8_TO_D(tuphdr->t_hoff);

			/*
			 * We already checked that the item as is completely within the
			 * raw page passed to us, with the length given in the line
			 * pointer.. Let's check that t_hoff doesn't point over lp_len,
			 * before using it to access t_bits and oid.
			 */
			if (tuphdr->t_hoff >= sizeof(struct htup_header *) &&
				tuphdr->t_hoff <= lp_len)
			{
				if (tuphdr->t_infomask & HEAP_HAS_NULL)
				{
					bits_len = tuphdr->t_hoff -
						(((char *) tuphdr->t_bits) -((char *) tuphdr));

					values[11] = CStringGetTextDatum(
								 bits_to_text(tuphdr->t_bits, bits_len * 8));
				}
				else
					nulls[11] = true;

				if (tuphdr->t_infomask & HEAP_HAS_OID)
					values[12] = HTH_GET_OID(tuphdr);
				else
					nulls[12] = true;
			}
			else
			{
				nulls[11] = true;
				nulls[12] = true;
			}
		}
		else
		{
			/*
			 * The line pointer is not used, or it's invalid. Set the rest of
			 * the fields to NULL
			 */
			int			i;

			for (i = 4; i <= 12; i++)
				nulls[i] = true;
		}

		/* Build and return the result tuple. */
		resultTuple = heap_form_tuple(inter_call_data->tupd, values, nulls);
		result = HeapTupleGetDatum(resultTuple);

		inter_call_data->offset++;

		SRF_RETURN_NEXT(fctx, result);
	}
	else
		SRF_RETURN_DONE(fctx);
}
Esempio n. 21
0
datum_t
bt_page_items(PG_FUNC_ARGS)
{
	text	   *relname = ARG_TEXT_P(0);
	uint32		blkno = ARG_UINT32(1);
	datum_t		result;
	char	   *values[6];
	struct heap_tuple *	tuple;
	struct fcall_ctx *fctx;
	struct mctx * mctx;
	struct user_args *uargs;

	if (!superuser())
		ereport(ERROR,
				(errcode(E_INSUFFICIENT_PRIVILEGE),
				 (errmsg("must be superuser to use pageinspect functions"))));

	if (SRF_IS_FIRSTCALL())
	{
		range_var_n   *relrv;
		struct relation *	rel;
		buf_id_t		buffer;
		struct bt_page_opaque * opaque;
		struct tuple *	tupleDesc;

		fctx = SRF_FIRSTCALL_INIT();

		relrv = nl_to_range_var(textToQualifiedNameList(relname));
		rel = relation_openrv(relrv, ACCESS_SHR_LOCK);

		if (!IS_INDEX(rel) || !IS_BTREE(rel))
			elog(ERROR, "relation \"%s\" is not a btree index",
				 REL_NAME(rel));

		/*
		 * Reject attempts to read non-local temporary relations; we would be
		 * likely to get wrong data since we have no visibility into the
		 * owning session's local buffers.
		 */
		if (REL_IS_OTHER_TMP(rel))
			ereport(ERROR,
					(errcode(E_FEATURE_NOT_SUPPORTED),
				errmsg("cannot access temporary tables of other sessions")));

		if (blkno == 0)
			elog(ERROR, "block 0 is a meta page");

		CHECK_RELATION_BLOCK_RANGE(rel, blkno);

		buffer = read_buf(rel, blkno);

		/*
		 * We copy the page into local storage to avoid holding pin on the
		 * buffer longer than we must, and possibly failing to release it at
		 * all if the calling query doesn't fetch all rows.
		 */
		mctx = mctx_switch(fctx->multi_call_memory_ctx);

		uargs = palloc(sizeof(struct user_args));

		uargs->page = palloc(BLK_SZ);
		memcpy(uargs->page, BUF_PAGE(buffer), BLK_SZ);

		release_buf(buffer);
		relation_close(rel, ACCESS_SHR_LOCK);

		uargs->offset = FIRST_ITEM_ID;

		opaque = (struct bt_page_opaque *) PAGE_SPECIAL_PTR(uargs->page);

		if (P_ISDELETED(opaque))
			elog(NOTICE, "page is deleted");

		fctx->max_calls = PAGE_MAX_ITEM_ID(uargs->page);

		/* Build a tuple descriptor for our result type */
		if (get_call_result_type(fcinfo, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
			elog(ERROR, "return type must be a row type");

		fctx->attinmeta = TupleDescGetAttInMetadata(tupleDesc);

		fctx->user_fctx = uargs;

		mctx_switch(mctx);
	}

	fctx = SRF_PERCALL_SETUP();
	uargs = fctx->user_fctx;

	if (fctx->call_cntr < fctx->max_calls)
	{
		struct item_id *		id;
		struct index_tuple *	itup;
		int			j;
		int			off;
		int			dlen;
		char	   *dump;
		char	   *ptr;

		id = PAGE_ITEM_ID(uargs->page, uargs->offset);

		if (!ITEMID_VALID(id))
			elog(ERROR, "invalid ItemId");

		itup = (struct index_tuple *) PAGE_GET_ITEM(uargs->page, id);

		j = 0;
		values[j] = palloc(32);
		snprintf(values[j++], 32, "%d", uargs->offset);
		values[j] = palloc(32);
		snprintf(values[j++], 32, "(%u,%u)",
				 BLK_ID_TO_BLK_NR(&(itup->t_tid.ip_blkid)),
				 itup->t_tid.ip_posid);
		values[j] = palloc(32);
		snprintf(values[j++], 32, "%d", (int) INDEX_TUPLE_SZ(itup));
		values[j] = palloc(32);
		snprintf(values[j++], 32, "%c", INDEX_TUPLE_HAS_NULLS(itup) ? 't' : 'f');
		values[j] = palloc(32);
		snprintf(values[j++], 32, "%c", INDEX_TUPLE_HAS_VAR(itup) ? 't' : 'f');

		ptr = (char *) itup + INDEX_TUPLE_DATA_OFFSET(itup->t_info);
		dlen = INDEX_TUPLE_SZ(itup) - INDEX_TUPLE_DATA_OFFSET(itup->t_info);
		dump = pzalloc(dlen * 3 + 1);
		values[j] = dump;
		for (off = 0; off < dlen; off++)
		{
			if (off > 0)
				*dump++ = ' ';
			sprintf(dump, "%02x", *(ptr + off) & 0xff);
			dump += 2;
		}

		tuple = build_tuple_from_cstrings(fctx->attinmeta, values);
		result = HeapTupleGetDatum(tuple);

		uargs->offset = uargs->offset + 1;

		SRF_RETURN_NEXT(fctx, result);
	}
	else
	{
		pfree(uargs->page);
		pfree(uargs);
		SRF_RETURN_DONE(fctx);
	}
}
Esempio n. 22
0
void* scheduler(void* p)
{
//這裡還要加上signal mask防止interrupt過來
	th_queue_t* queueSche;
	th_queue_t* nextQueueTh;
//	queueSche = th_queue_head(ready_queueHead);	
//	srand(time(NULL));		//seed rand()
	//disable_timer();
	pthread_mutex_lock(&output_lock);
	pthread_mutex_lock(&output_lock);
	printf("I`m in %d, %d\n", getpid(), sched_queueHead==NULL);fflush(stdout);
	pthread_mutex_unlock(&output_lock);
	for(;;)	//infinite loop
	{
		if((queueSche = find_thread_by_tid(sched_queueHead, getpid())) == NULL)
			abort();
		pthread_mutex_lock(&kill_lock);
		if(wait_for_kill>0)
		{
			if(getpid() != main_kernel_id)
			{
				th_queue_delete(sched_queueHead, queueSche->thread);
				wait_for_kill--;
				
				pthread_mutex_unlock(&kill_lock);
				pthread_mutex_lock(&output_lock);
				printf("kill %d, wait = %d\n", getpid(),wait_for_kill );fflush(stdout);
				pthread_mutex_unlock(&output_lock);
				exit(0);
				abort();	
			}
		}
		pthread_mutex_unlock(&kill_lock);
		
		pthread_mutex_lock(&findthread_lock);
		for(;;)
		{
			if((nextQueueTh = find_next_thread(ready_queueHead, queueSche->thread->current_tid)) == NULL)
				abort();
			queueSche->thread->current_tid = nextQueueTh->thread->tid;

			if(nextQueueTh->thread->mctx.status == TH_WAITING)
			{
				break;
			}
		}
		pthread_mutex_lock(&output_lock);
		printf("I`m %d find out %d, %dn", getpid(), nextQueueTh->thread->tid, nextQueueTh->thread->mctx.status);fflush(stdout);
		pthread_mutex_unlock(&output_lock);
	
		nextQueueTh->thread->mctx.status = TH_RUNNING;
		queueSche->thread->current_tid = nextQueueTh->thread->tid;
		pthread_mutex_unlock(&findthread_lock);
		
		enable_timer();
		mctx_switch(&(queueSche->thread->mctx), &(nextQueueTh->thread->mctx));
		
		if(nextQueueTh->thread->mctx.status !=TH_EXITED && \
		   nextQueueTh->thread->mctx.status !=TH_KILLED)
			nextQueueTh->thread->mctx.status = TH_WAITING;
		
		pthread_mutex_lock(&output_lock);
		//printf("I`m %d comeback from %d\n", getpid(), nextQueueTh->thread->tid);fflush(stdout);
		
		pthread_mutex_unlock(&output_lock);
		
		/*
		mctx_list[currentTid].status=TH_RUNNING;
		mctx_switch(&mctx_list[0],&mctx_list[currentTid]);
		*/
	}
	return NULL;
}
Esempio n. 23
0
datum_t
connectby_text(PG_FUNC_ARGS)
{
	char	   *relname = text_to_cstring(ARG_TEXT_PP(0));
	char	   *key_fld = text_to_cstring(ARG_TEXT_PP(1));
	char	   *parent_key_fld = text_to_cstring(ARG_TEXT_PP(2));
	char	   *start_with = text_to_cstring(ARG_TEXT_PP(3));
	int			max_depth = ARG_INT32(4);
	char	   *branch_delim = NULL;
	bool		show_branch = false;
	bool		show_serial = false;
	return_set_info_n *rsinfo = (return_set_info_n *) fcinfo->resultinfo;
	struct tuple *	tupdesc;
	AttInMetadata *attinmeta;
	struct mctx * per_query_ctx;
	struct mctx * oldcontext;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IS_A(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(E_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize) ||
		rsinfo->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(E_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not " \
						"allowed in this context")));

	if (fcinfo->nargs == 6)
	{
		branch_delim = text_to_cstring(ARG_TEXT_PP(5));
		show_branch = true;
	}
	else
		/* default is no show, tilde for the delimiter */
		branch_delim = pstrdup("~");

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
	oldcontext = mctx_switch(per_query_ctx);

	/* get the requested return tuple description */
	tupdesc = tupdesc_copy(rsinfo->expectedDesc);

	/* does it meet our needs */
	validateConnectbyTupleDesc(tupdesc, show_branch, show_serial);

	/* OK, use it then */
	attinmeta = TupleDescGetAttInMetadata(tupdesc);

	/* OK, go to work */
	rsinfo->returnMode = SFRM_Materialize;
	rsinfo->setResult = connectby(relname,
								  key_fld,
								  parent_key_fld,
								  NULL,
								  branch_delim,
								  start_with,
								  max_depth,
								  show_branch,
								  show_serial,
								  per_query_ctx,
							  rsinfo->allowedModes & SFRM_Materialize_Random,
								  attinmeta);
	rsinfo->setDesc = tupdesc;

	mctx_switch(oldcontext);

	/*
	 * SFRM_Materialize mode expects us to return a NULL Datum. The actual
	 * tuples are in our tuplestore and passed back through rsinfo->setResult.
	 * rsinfo->setDesc is set to the tuple description that we actually used
	 * to build our tuples with, so the caller can verify we did what it was
	 * expecting.
	 */
	return (datum_t) 0;
}
Esempio n. 24
0
/*
 * load up the categories hash table
 */
static struct htab *
load_categories_hash(char *cats_sql, struct mctx * per_query_ctx)
{
	struct htab	   *crosstab_hash;
	struct hash_ctl		ctl;
	int			ret;
	int			proc;
	struct mctx * SPIcontext;

	/* initialize the category hash table */
	pg_memset(&ctl, 0, sizeof(ctl));
	ctl.keysize = MAX_CATNAME_LEN;
	ctl.entrysize = sizeof(crosstab_HashEnt);
	ctl.hcxt = per_query_ctx;

	/*
	 * use INIT_CATS, defined above as a guess of how many hash table entries
	 * to create, initially
	 */
	crosstab_hash = hcreate("crosstab hash",
								INIT_CATS,
								&ctl,
								H_ELEM | HASH_CONTEXT);

	/* Connect to SPI manager */
	if ((ret = SPI_connect()) < 0)
		/* internal error */
		elog(ERROR, "load_categories_hash: SPI_connect returned %d", ret);

	/* Retrieve the category name rows */
	ret = SPI_execute(cats_sql, true, 0);
	proc = SPI_processed;

	/* Check for qualifying tuples */
	if ((ret == SPI_OK_SELECT) && (proc > 0))
	{
		struct SPI_tuple_table *spi_tuptable = SPI_tuptable;
		struct tuple *	spi_tupdesc = spi_tuptable->tupdesc;
		int			i;

		/*
		 * The provided categories SQL query must always return one column:
		 * category - the label or identifier for each column
		 */
		if (spi_tupdesc->natts != 1)
			ereport(ERROR,
					(errcode(E_SYNTAX_ERROR),
					 errmsg("provided \"categories\" SQL must " \
							"return 1 column of at least one row")));

		for (i = 0; i < proc; i++)
		{
			crosstab_cat_desc *catdesc;
			char	   *catname;
			struct heap_tuple *	spi_tuple;

			/* get the next sql result tuple */
			spi_tuple = spi_tuptable->vals[i];

			/* get the category from the current sql result tuple */
			catname = SPI_getvalue(spi_tuple, spi_tupdesc, 1);

			SPIcontext = mctx_switch(per_query_ctx);

			catdesc = (crosstab_cat_desc *) palloc(sizeof(crosstab_cat_desc));
			catdesc->catname = catname;
			catdesc->attidx = i;

			/* Add the proc description block to the hashtable */
			crosstab_HashTableInsert(crosstab_hash, catdesc);

			mctx_switch(SPIcontext);
		}
	}

	if (SPI_finish() != SPI_OK_FINISH)
		/* internal error */
		elog(ERROR, "load_categories_hash: SPI_finish() failed");

	return crosstab_hash;
}
Esempio n. 25
0
datum_t
crosstab_hash(PG_FUNC_ARGS)
{
	char	   *sql = text_to_cstring(ARG_TEXT_PP(0));
	char	   *cats_sql = text_to_cstring(ARG_TEXT_PP(1));
	return_set_info_n *rsinfo = (return_set_info_n *) fcinfo->resultinfo;
	struct tuple *	tupdesc;
	struct mctx * per_query_ctx;
	struct mctx * oldcontext;
	struct htab	   *crosstab_hash;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IS_A(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(E_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize) ||
		rsinfo->expectedDesc == NULL)
		ereport(ERROR,
				(errcode(E_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not " \
						"allowed in this context")));

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
	oldcontext = mctx_switch(per_query_ctx);

	/* get the requested return tuple description */
	tupdesc = tupdesc_copy(rsinfo->expectedDesc);

	/*
	 * Check to make sure we have a reasonable tuple descriptor
	 *
	 * Note we will attempt to coerce the values into whatever the return
	 * attribute type is and depend on the "in" function to complain if
	 * needed.
	 */
	if (tupdesc->natts < 2)
		ereport(ERROR,
				(errcode(E_SYNTAX_ERROR),
				 errmsg("query-specified return tuple and " \
						"crosstab function are not compatible")));

	/* load up the categories hash table */
	crosstab_hash = load_categories_hash(cats_sql, per_query_ctx);

	/* let the caller know we're sending back a tuplestore */
	rsinfo->returnMode = SFRM_Materialize;

	/* now go build it */
	rsinfo->setResult = get_crosstab_tuplestore(sql,
												crosstab_hash,
												tupdesc,
												per_query_ctx,
							 rsinfo->allowedModes & SFRM_Materialize_Random);

	/*
	 * SFRM_Materialize mode expects us to return a NULL Datum. The actual
	 * tuples are in our tuplestore and passed back through rsinfo->setResult.
	 * rsinfo->setDesc is set to the tuple description that we actually used
	 * to build our tuples with, so the caller can verify we did what it was
	 * expecting.
	 */
	rsinfo->setDesc = tupdesc;
	mctx_switch(oldcontext);

	return (datum_t) 0;
}
Esempio n. 26
0
datum_t
crosstab(PG_FUNC_ARGS)
{
	char	   *sql = text_to_cstring(ARG_TEXT_PP(0));
	return_set_info_n *rsinfo = (return_set_info_n *) fcinfo->resultinfo;
	struct tupstore *tupstore;
	struct tuple *	tupdesc;
	int			call_cntr;
	int			max_calls;
	AttInMetadata *attinmeta;
	struct SPI_tuple_table *spi_tuptable;
	struct tuple *	spi_tupdesc;
	bool		firstpass;
	char	   *lastrowid;
	int			i;
	int			num_categories;
	struct mctx * per_query_ctx;
	struct mctx * oldcontext;
	int			ret;
	int			proc;

	/* check to see if caller supports us returning a tuplestore */
	if (rsinfo == NULL || !IS_A(rsinfo, ReturnSetInfo))
		ereport(ERROR,
				(errcode(E_FEATURE_NOT_SUPPORTED),
				 errmsg("set-valued function called in context that cannot accept a set")));
	if (!(rsinfo->allowedModes & SFRM_Materialize))
		ereport(ERROR,
				(errcode(E_FEATURE_NOT_SUPPORTED),
				 errmsg("materialize mode required, but it is not " \
						"allowed in this context")));

	per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;

	/* Connect to SPI manager */
	if ((ret = SPI_connect()) < 0)
		/* internal error */
		elog(ERROR, "crosstab: SPI_connect returned %d", ret);

	/* Retrieve the desired rows */
	ret = SPI_execute(sql, true, 0);
	proc = SPI_processed;

	/* If no qualifying tuples, fall out early */
	if (ret != SPI_OK_SELECT || proc <= 0)
	{
		SPI_finish();
		rsinfo->isDone = ExprEndResult;
		RET_NULL();
	}

	spi_tuptable = SPI_tuptable;
	spi_tupdesc = spi_tuptable->tupdesc;

	/*----------
	 * The provided SQL query must always return three columns.
	 *
	 * 1. rowname
	 *	the label or identifier for each row in the final result
	 * 2. category
	 *	the label or identifier for each column in the final result
	 * 3. values
	 *	the value for each column in the final result
	 *----------
	 */
	if (spi_tupdesc->natts != 3)
		ereport(ERROR,
				(errcode(E_INVALID_PARAMETER_VALUE),
				 errmsg("invalid source data SQL statement"),
				 errdetail("The provided SQL must return 3 "
						   "columns: rowid, category, and values.")));

	/* get a tuple descriptor for our result type */
	switch (get_call_result_type(fcinfo, NULL, &tupdesc))
	{
		case TYPEFUNC_COMPOSITE:
			/* success */
			break;
		case TYPEFUNC_RECORD:
			/* failed to determine actual type of RECORD */
			ereport(ERROR,
					(errcode(E_FEATURE_NOT_SUPPORTED),
					 errmsg("function returning record called in context "
							"that cannot accept type record")));
			break;
		default:
			/* result type isn't composite */
			elog(ERROR, "return type must be a row type");
			break;
	}

	/*
	 * Check that return tupdesc is compatible with the data we got from SPI,
	 * at least based on number and type of attributes
	 */
	if (!compatCrosstabTupleDescs(tupdesc, spi_tupdesc))
		ereport(ERROR,
				(errcode(E_SYNTAX_ERROR),
				 errmsg("return and sql tuple descriptions are " \
						"incompatible")));

	/*
	 * switch to long-lived memory context
	 */
	oldcontext = mctx_switch(per_query_ctx);

	/* make sure we have a persistent copy of the result tupdesc */
	tupdesc = tupdesc_copy(tupdesc);

	/* initialize our tuplestore in long-lived context */
	tupstore =
		tts_begin_heap(rsinfo->allowedModes & SFRM_Materialize_Random,
							  false, work_mem);

	mctx_switch(oldcontext);

	/*
	 * Generate attribute metadata needed later to produce tuples from raw C
	 * strings
	 */
	attinmeta = TupleDescGetAttInMetadata(tupdesc);

	/* total number of tuples to be examined */
	max_calls = proc;

	/* the return tuple always must have 1 rowid + num_categories columns */
	num_categories = tupdesc->natts - 1;

	firstpass = true;
	lastrowid = NULL;

	for (call_cntr = 0; call_cntr < max_calls; call_cntr++)
	{
		bool		skip_tuple = false;
		char	  **values;

		/* allocate and zero space */
		values = (char **) pzalloc((1 + num_categories) * sizeof(char *));

		/*
		 * now loop through the sql results and assign each value in sequence
		 * to the next category
		 */
		for (i = 0; i < num_categories; i++)
		{
			struct heap_tuple *	spi_tuple;
			char	   *rowid;

			/* see if we've gone too far already */
			if (call_cntr >= max_calls)
				break;

			/* get the next sql result tuple */
			spi_tuple = spi_tuptable->vals[call_cntr];

			/* get the rowid from the current sql result tuple */
			rowid = SPI_getvalue(spi_tuple, spi_tupdesc, 1);

			/*
			 * If this is the first pass through the values for this rowid,
			 * set the first column to rowid
			 */
			if (i == 0)
			{
				xpstrdup(values[0], rowid);

				/*
				 * Check to see if the rowid is the same as that of the last
				 * tuple sent -- if so, skip this tuple entirely
				 */
				if (!firstpass && xstreq(lastrowid, rowid))
				{
					xpfree(rowid);
					skip_tuple = true;
					break;
				}
			}

			/*
			 * If rowid hasn't changed on us, continue building the output
			 * tuple.
			 */
			if (xstreq(rowid, values[0]))
			{
				/*
				 * Get the next category item value, which is always attribute
				 * number three.
				 *
				 * Be careful to assign the value to the array index based on
				 * which category we are presently processing.
				 */
				values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3);

				/*
				 * increment the counter since we consume a row for each
				 * category, but not for last pass because the outer loop will
				 * do that for us
				 */
				if (i < (num_categories - 1))
					call_cntr++;
				xpfree(rowid);
			}
			else
			{
				/*
				 * We'll fill in NULLs for the missing values, but we need to
				 * decrement the counter since this sql result row doesn't
				 * belong to the current output tuple.
				 */
				call_cntr--;
				xpfree(rowid);
				break;
			}
		}

		if (!skip_tuple)
		{
			struct heap_tuple *	tuple;

			/* build the tuple and store it */
			tuple = build_tuple_from_cstrings(attinmeta, values);
			tts_put_tuple(tupstore, tuple);
			heap_free_tuple(tuple);
		}

		/* Remember current rowid */
		xpfree(lastrowid);
		xpstrdup(lastrowid, values[0]);
		firstpass = false;

		/* Clean up */
		for (i = 0; i < num_categories + 1; i++)
			if (values[i] != NULL)
				pfree(values[i]);
		pfree(values);
	}

	/* let the caller know we're sending back a tuplestore */
	rsinfo->returnMode = SFRM_Materialize;
	rsinfo->setResult = tupstore;
	rsinfo->setDesc = tupdesc;

	/* release SPI related resources (and return to caller's context) */
	SPI_finish();

	return (datum_t) 0;
}
Esempio n. 27
0
datum_t
normal_rand(PG_FUNC_ARGS)
{
	struct fcall_ctx *funcctx;
	int			call_cntr;
	int			max_calls;
	normal_rand_fctx *fctx;
	float8		mean;
	float8		stddev;
	float8		carry_val;
	bool		use_carry;
	struct mctx * oldcontext;

	/* stuff done only on the first call of the function */
	if (SRF_IS_FIRSTCALL())
	{
		/* create a function context for cross-call persistence */
		funcctx = SRF_FIRSTCALL_INIT();

		/*
		 * switch to memory context appropriate for multiple function calls
		 */
		oldcontext = mctx_switch(funcctx->multi_call_memory_ctx);

		/* total number of tuples to be returned */
		funcctx->max_calls = ARG_UINT32(0);

		/* allocate memory for user context */
		fctx = (normal_rand_fctx *) palloc(sizeof(normal_rand_fctx));

		/*
		 * Use fctx to keep track of upper and lower bounds from call to call.
		 * It will also be used to carry over the spare value we get from the
		 * Box-Muller algorithm so that we only actually calculate a new value
		 * every other call.
		 */
		fctx->mean = ARG_FLOAT8(1);
		fctx->stddev = ARG_FLOAT8(2);
		fctx->carry_val = 0;
		fctx->use_carry = false;

		funcctx->user_fctx = fctx;

		mctx_switch(oldcontext);
	}

	/* stuff done on every call of the function */
	funcctx = SRF_PERCALL_SETUP();

	call_cntr = funcctx->call_cntr;
	max_calls = funcctx->max_calls;
	fctx = funcctx->user_fctx;
	mean = fctx->mean;
	stddev = fctx->stddev;
	carry_val = fctx->carry_val;
	use_carry = fctx->use_carry;

	if (call_cntr < max_calls)	/* do when there is more left to send */
	{
		float8		result;

		if (use_carry)
		{
			/*
			 * reset use_carry and use second value obtained on last pass
			 */
			fctx->use_carry = false;
			result = carry_val;
		}
		else
		{
			float8		normval_1;
			float8		normval_2;

			/* Get the next two normal values */
			get_normal_pair(&normval_1, &normval_2);

			/* use the first */
			result = mean + (stddev * normval_1);

			/* and save the second */
			fctx->carry_val = mean + (stddev * normval_2);
			fctx->use_carry = true;
		}

		/* send the result */
		SRF_RETURN_NEXT(funcctx, FLOAT8_TO_D(result));
	}
	else
		/* do when there is no more left */
		SRF_RETURN_DONE(funcctx);
}
Esempio n. 28
0
datum_t gistrescan(PG_FUNC_ARGS)
{
	struct index_scan *scan;
	struct scankey *key;
	struct scankey *orderbys;

	/* nkeys and norderbys arguments are ignored */
	struct gist_scan_opaque *so;
	int i;
	struct mctx *oldCxt;
	size_t size;

	scan = (struct index_scan *)ARG_POINTER(0);
	key = (struct scankey *)ARG_POINTER(1);
	orderbys = (struct scankey *)ARG_POINTER(3);
	so = (struct gist_scan_opaque *)scan->opaque;

	/* rescan an existing indexscan --- reset state */
	mctx_reset(so->queueCxt);
	so->curTreeItem = NULL;

	/* create new, empty RBTree for search queue */
	oldCxt = mctx_switch(so->queueCxt);
	size = GIST_ITEM_HDR_SZ + sizeof(double) * scan->numberOfOrderBys;
	so->queue = rb_create(
		size,
		GISTSearchTreeItemComparator,
		GISTSearchTreeItemCombiner,
		GISTSearchTreeItemAllocator,
		GISTSearchTreeItemDeleter,
		scan);
	mctx_switch(oldCxt);

	so->firstCall = true;

	/* Update scan key, if a new one is given */
	if (key && scan->numberOfKeys > 0) {
		size = scan->numberOfKeys * sizeof(struct scankey);
		memmove(scan->keyData, key, size);

		/*
		 * Modify the scan key so that the Consistent method is called for all
		 * comparisons. The original operator is passed to the Consistent
		 * function in the form of its strategy number, which is available
		 * from the sk_strategy field, and its subtype from the sk_subtype
		 * field.
		 *
		 * Next, if any of keys is a NULL and that key is not marked with
		 * SK_SEARCHNULL/SK_SEARCHNOTNULL then nothing can be found (ie, we
		 * assume all indexable operators are strict).
		 */
		so->qual_ok = true;
		for (i = 0; i < scan->numberOfKeys; i++) {
			struct scankey *skey;

			skey = scan->keyData + i;
			skey->sk_func = so->giststate->consistentFn[skey->sk_attno - 1];
			if (skey->sk_flags & SK_ISNULL) {
				if (!(skey->sk_flags & (SK_SEARCHNULL | SK_SEARCHNOTNULL)))
					so->qual_ok = false;
			}
		}
	}

	/* Update order-by key, if a new one is given */
	if (orderbys 
		&& scan->numberOfOrderBys > 0) {
		size = scan->numberOfOrderBys * sizeof(struct scankey);
		memmove(scan->orderByData, orderbys, size);

		/*
		 * Modify the order-by key so that the Distance method is called for
		 * all comparisons. The original operator is passed to the Distance
		 * function in the form of its strategy number, which is available
		 * from the sk_strategy field, and its subtype from the sk_subtype
		 * field.
		 */
		for (i = 0; i < scan->numberOfOrderBys; i++) {
			struct scankey *skey;

			skey = scan->orderByData + i;
			skey->sk_func = so->giststate->distanceFn[skey->sk_attno - 1];

			/* Check we actually have a distance function ... */
			if (!OID_VALID(skey->sk_func.fn_oid))
				elog(ERROR,
					"missing support function %d for attribute %d"
					" of index \"%s\"",
					GIST_DISTANCE_PROC,
					skey->sk_attno,
					REL_NAME(scan->indexRelation));
		}
	}

	RET_VOID();
}
Esempio n. 29
0
datum_t
tsa_rewrite_accum(PG_FUNC_ARGS)
{
	TSQuery		acc;
	array_s  *qa;
	TSQuery		q;
	QTNode	   *qex = NULL,
			   *subs = NULL,
			   *acctree = NULL;
	bool		isfind = false;
	datum_t	   *elemsp;
	int			nelemsp;
	struct mctx * aggcontext;
	struct mctx * oldcontext;

	if (!AggCheckCallContext(fcinfo, &aggcontext))
		elog(ERROR, "tsa_rewrite_accum called in non-aggregate context");

	if (PG_ARG_ISNULL(0) || ARG_POINTER(0) == NULL)
	{
		acc = (TSQuery) mctx_alloc(aggcontext, HDRSIZETQ);
		VLA_SET_SZ_STND(acc, HDRSIZETQ);
		acc->size = 0;
	}
	else
		acc = ARG_TSQUERY(0);

	if (PG_ARG_ISNULL(1) || ARG_POINTER(1) == NULL)
		RET_TSQUERY(acc);
	else
		qa = ARG_ARRAY_P_COPY(1);

	if (ARR_NDIM(qa) != 1)
		elog(ERROR, "array must be one-dimensional, not %d dimensions",
			 ARR_NDIM(qa));
	if (ArrayGetNItems(ARR_NDIM(qa), ARR_DIMS(qa)) != 3)
		elog(ERROR, "array must have three elements");
	if (ARR_ELEMTYPE(qa) != TSQUERYOID)
		elog(ERROR, "array must contain tsquery elements");

	deconstruct_array(qa, TSQUERYOID, -1, false, 'i', &elemsp, NULL, &nelemsp);

	q = DatumGetTSQuery(elemsp[0]);
	if (q->size == 0)
	{
		pfree(elemsp);
		RET_POINTER(acc);
	}

	if (!acc->size)
	{
		if (VLA_SZ(acc) > HDRSIZETQ)
		{
			pfree(elemsp);
			RET_POINTER(acc);
		}
		else
			acctree = QT2QTN(GETQUERY(q), GETOPERAND(q));
	}
	else
		acctree = QT2QTN(GETQUERY(acc), GETOPERAND(acc));

	QTNTernary(acctree);
	QTNSort(acctree);

	q = DatumGetTSQuery(elemsp[1]);
	if (q->size == 0)
	{
		pfree(elemsp);
		RET_POINTER(acc);
	}
	qex = QT2QTN(GETQUERY(q), GETOPERAND(q));
	QTNTernary(qex);
	QTNSort(qex);

	q = DatumGetTSQuery(elemsp[2]);
	if (q->size)
		subs = QT2QTN(GETQUERY(q), GETOPERAND(q));

	acctree = findsubquery(acctree, qex, subs, &isfind);

	if (isfind || !acc->size)
	{
		/* pfree( acc ); do not pfree(p), because nodeAgg.c will */
		if (acctree)
		{
			QTNBinary(acctree);
			oldcontext = mctx_switch(aggcontext);
			acc = QTN2QT(acctree);
			mctx_switch(oldcontext);
		}
		else
		{
			acc = (TSQuery) mctx_alloc(aggcontext, HDRSIZETQ);
			VLA_SET_SZ_STND(acc, HDRSIZETQ);
			acc->size = 0;
		}
	}

	pfree(elemsp);
	QTNFree(qex);
	QTNFree(subs);
	QTNFree(acctree);

	RET_TSQUERY(acc);
}
Esempio n. 30
0
/* ----------------
 *		create_exec_state
 *
 *		Create and initialize an exec_state_n node, which is the root of
 *		working storage for an entire Executor invocation.
 *
 * Principally, this creates the per-query memory context that will be
 * used to hold all working data that lives till the end of the query.
 * Note that the per-query context will become a child of the caller's
 * current_mctx.
 * ----------------
 */
exec_state_n *
create_exec_state(void)
{
	exec_state_n* estate;
	struct mctx* qcontext;
	struct mctx* oldcontext;

	/*
	 * Create the per-query context for this Executor run.
	 */
	qcontext = aset_create_normal(current_mctx, "ExecutorState");

	/*
	 * Make the exec_state_n node within the per-query context.  This way, we don't
	 * need a separate pfree() operation for it at shutdown.
	 */
	oldcontext = mctx_switch(qcontext);
	estate = MK_N(EState,exec_state_n);

	/*
	 * Initialize all fields of the Executor State structure
	 */
	estate->es_direction = FORWARD_SCANDIR;
	estate->es_snapshot = snap_now;
	estate->es_crosscheck_snapshot = INVALID_SNAPSHOT;	/* no crosscheck */
	estate->es_range_table = NIL;
	estate->es_plannedstmt = NULL;
	estate->es_junkFilter = NULL;
	estate->es_output_cid = (cmd_id_t) 0;
	estate->es_result_relations = NULL;
	estate->es_num_result_relations = 0;
	estate->es_result_relation_info = NULL;
	estate->es_trig_target_relations = NIL;
	estate->es_trig_tuple_slot = NULL;
	estate->es_trig_oldtup_slot = NULL;
	estate->es_trig_newtup_slot = NULL;
	estate->es_param_list_info = NULL;
	estate->es_param_exec_vals = NULL;
	estate->es_query_cxt = qcontext;
	estate->es_tupleTable = NIL;
	estate->es_rowMarks = NIL;
	estate->es_processed = 0;
	estate->es_lastoid = INVALID_OID;
	estate->es_top_eflags = 0;
	estate->es_instrument = 0;
	estate->es_select_into = false;
	estate->es_into_oids = false;
	estate->es_finished = false;
	estate->es_exprcontexts = NIL;
	estate->es_subplanstates = NIL;
	estate->es_auxmodifytables = NIL;
	estate->es_per_tuple_exprcontext = NULL;
	estate->es_epqTuple = NULL;
	estate->es_epqTupleSet = NULL;
	estate->es_epqScanDone = NULL;

	/*
	 * Return the executor state structure
	 */
	mctx_switch(oldcontext);

	return estate;
}