Example #1
0
/*
 * The administrator should be initialized to enable interpretation of
 * the command line arguments, before it starts serviceing statements
 */
int
MCinitClientThread(Client c)
{
	Thread t;
	char cname[11 + 1];

	snprintf(cname, 11, OIDFMT, c->user);
	cname[11] = '\0';
	t = THRnew(cname);
	if (t == 0) {
		showException(c->fdout, MAL, "initClientThread",
				"Failed to initialize client");
		MPresetProfiler(c->fdout);
		return -1;
	}
	/*
	 * The GDK thread administration should be set to reflect use of
	 * the proper IO descriptors.
	 */
	t->data[1] = c->fdin;
	t->data[0] = c->fdout;
	c->mythread = t;
	c->errbuf = GDKerrbuf;
	if (c->errbuf == NULL) {
		GDKsetbuf(GDKzalloc(GDKMAXERRLEN));
		c->errbuf = GDKerrbuf;
	} else
		c->errbuf[0] = 0;
	return 0;
}
Example #2
0
void
mvc_logmanager(void)
{
	Thread thr = THRnew("logmanager");
	store_manager();
	THRdel(thr);
}
Example #3
0
void
mvc_minmaxmanager(void)
{
	Thread thr = THRnew("minmaxmanager");
	minmax_manager();
	THRdel(thr);
}
Example #4
0
static void
DFLOWworker(void *T)
{
	struct worker *t = (struct worker *) T;
	DataFlow flow;
	FlowEvent fe = 0, fnxt = 0;
	int id = (int) (t - workers);
	Thread thr;
	str error = 0;
	int i,last;
	Client cntxt;
	InstrPtr p;

	thr = THRnew("DFLOWworker");

	GDKsetbuf(GDKmalloc(GDKMAXERRLEN)); /* where to leave errors */
	GDKerrbuf[0] = 0;
	MT_lock_set(&dataflowLock, "DFLOWworker");
	cntxt = t->cntxt;
	MT_lock_unset(&dataflowLock, "DFLOWworker");
	if (cntxt) {
		/* wait until we are allowed to start working */
		MT_sema_down(&t->s, "DFLOWworker");
	}
	while (1) {
		if (fnxt == 0) {
			MT_lock_set(&dataflowLock, "DFLOWworker");
			cntxt = t->cntxt;
			MT_lock_unset(&dataflowLock, "DFLOWworker");
			fe = q_dequeue(todo, cntxt);
			if (fe == NULL) {
				if (cntxt) {
					/* we're not done yet with work for the current
					 * client (as far as we know), so give up the CPU
					 * and let the scheduler enter some more work, but
					 * first compensate for the down we did in
					 * dequeue */
					MT_sema_up(&todo->s, "DFLOWworker");
					MT_sleep_ms(1);
					continue;
				}
				/* no more work to be done: exit */
				break;
			}
		} else
			fe = fnxt;
		if (ATOMIC_GET(exiting, exitingLock, "DFLOWworker")) {
			break;
		}
		fnxt = 0;
		assert(fe);
		flow = fe->flow;
		assert(flow);

		/* whenever we have a (concurrent) error, skip it */
		if (flow->error) {
			q_enqueue(flow->done, fe);
			continue;
		}

		/* skip all instructions when we have encontered an error */
		if (flow->error == 0) {
#ifdef USE_MAL_ADMISSION
			if (MALadmission(fe->argclaim, fe->hotclaim)) {
				fe->hotclaim = 0;   /* don't assume priority anymore */
				if (todo->last == 0)
					MT_sleep_ms(DELAYUNIT);
				q_requeue(todo, fe);
				continue;
			}
#endif
			error = runMALsequence(flow->cntxt, flow->mb, fe->pc, fe->pc + 1, flow->stk, 0, 0);
			PARDEBUG fprintf(stderr, "#executed pc= %d wrk= %d claim= " LLFMT "," LLFMT " %s\n",
							 fe->pc, id, fe->argclaim, fe->hotclaim, error ? error : "");
#ifdef USE_MAL_ADMISSION
			/* release the memory claim */
			MALadmission(-fe->argclaim, -fe->hotclaim);
#endif
			/* update the numa information. keep the thread-id producing the value */
			p= getInstrPtr(flow->mb,fe->pc);
			for( i = 0; i < p->argc; i++)
				flow->mb->var[getArg(p,i)]->worker = thr->tid;

			MT_lock_set(&flow->flowlock, "DFLOWworker");
			fe->state = DFLOWwrapup;
			MT_lock_unset(&flow->flowlock, "DFLOWworker");
			if (error) {
				MT_lock_set(&flow->flowlock, "DFLOWworker");
				/* only collect one error (from one thread, needed for stable testing) */
				if (!flow->error)
					flow->error = error;
				MT_lock_unset(&flow->flowlock, "DFLOWworker");
				/* after an error we skip the rest of the block */
				q_enqueue(flow->done, fe);
				continue;
			}
		}

		/* see if you can find an eligible instruction that uses the
		 * result just produced. Then we can continue with it right away.
		 * We are just looking forward for the last block, which means we
		 * are safe from concurrent actions. No other thread can steal it,
		 * because we hold the logical lock.
		 * All eligible instructions are queued
		 */
#ifdef USE_MAL_ADMISSION
		{
		InstrPtr p = getInstrPtr(flow->mb, fe->pc);
		assert(p);
		fe->hotclaim = 0;
		for (i = 0; i < p->retc; i++)
			fe->hotclaim += getMemoryClaim(flow->mb, flow->stk, p, i, FALSE);
		}
#endif
		MT_lock_set(&flow->flowlock, "DFLOWworker");

		for (last = fe->pc - flow->start; last >= 0 && (i = flow->nodes[last]) > 0; last = flow->edges[last])
			if (flow->status[i].state == DFLOWpending &&
				flow->status[i].blocks == 1) {
				flow->status[i].state = DFLOWrunning;
				flow->status[i].blocks = 0;
				flow->status[i].hotclaim = fe->hotclaim;
				flow->status[i].argclaim += fe->hotclaim;
				fnxt = flow->status + i;
				break;
			}
		MT_lock_unset(&flow->flowlock, "DFLOWworker");

		q_enqueue(flow->done, fe);
		if ( fnxt == 0) {
			int last;
			MT_lock_set(&todo->l, "DFLOWworker");
			last = todo->last;
			MT_lock_unset(&todo->l, "DFLOWworker");
			if (last == 0)
				profilerHeartbeatEvent("wait", 0);
		}
	}
	GDKfree(GDKerrbuf);
	GDKsetbuf(0);
	THRdel(thr);
	MT_lock_set(&dataflowLock, "DFLOWworker");
	t->flag = EXITED;
	MT_lock_unset(&dataflowLock, "DFLOWworker");
}