예제 #1
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
void *
THRgetdata(int n)
{
	Thread s = GDK_find_thread(MT_getpid());

	return (s ? s->data[n] : THRdata[n]);
}
예제 #2
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
int
THRgettid(void)
{
	Thread s = GDK_find_thread(MT_getpid());

	return s ? s->tid : 1;
}
예제 #3
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
void
THRsetdata(int n, ptr val)
{
	Thread s = GDK_find_thread(MT_getpid());

	if (s)
		s->data[n] = val;
}
예제 #4
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
int
THRhighwater(void)
{
	size_t c;
	Thread s;
	size_t diff;

	s = GDK_find_thread(MT_getpid());
	if (s == NULL)
		return 0;
	c = THRsp();
	diff = (c < s->sp) ? s->sp - c : c - s->sp;
	if (diff > (THREAD_STACK_SIZE - 16 * 1024)) {
		return 1;
	}
	return 0;
}
예제 #5
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
Thread
THRnew(str name)
{
	int tid = 0;
	Thread t;
	Thread s;
	MT_Id pid = MT_getpid();

	MT_lock_set(&GDKthreadLock, "THRnew");
	s = GDK_find_thread(pid);
	if (s == NULL) {
		for (s = GDKthreads, t = s + THREADS; s < t; s++) {
			if (s->pid == pid) {
				MT_lock_unset(&GDKthreadLock, "THRnew");
				IODEBUG THRprintf(GDKstdout, "#THRnew:duplicate " SZFMT "\n", (size_t) pid);
				return s;
			}
		}
		for (s = GDKthreads, t = s + THREADS; s < t; s++) {
			if (s->pid == 0) {
				break;
			}
		}
		if (s == t) {
			MT_lock_unset(&GDKthreadLock, "THRnew");
			IODEBUG THRprintf(GDKstdout, "#THRnew: too many threads\n");
			return NULL;
		}
		tid = s->tid;
		memset((char *) s, 0, sizeof(*s));
		s->pid = pid;
		s->tid = tid;
		s->data[1] = THRdata[1];
		s->data[0] = THRdata[0];
		s->sp = THRsp();

		PARDEBUG THRprintf(GDKstdout, "#%x " SZFMT " sp = " SZFMT "\n", s->tid, (size_t) pid, s->sp);
		PARDEBUG THRprintf(GDKstdout, "#nrofthreads %d\n", GDKnrofthreads);

		GDKnrofthreads++;
	}
	s->name = name;
	MT_lock_unset(&GDKthreadLock, "THRnew");

	return s;
}
예제 #6
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
/* coverity[+kill] */
void
GDKexit(int status)
{
	if (ATOMIC_CAS_int(GDKstopped, 0, 1, GDKstoppedLock, "GDKexit") == 0) {
		MT_lock_set(&GDKthreadLock, "GDKexit");
		GDKnrofthreads = 0;
		MT_lock_unset(&GDKthreadLock, "GDKexit");
		if (GDKvmtrim_id)
			MT_join_thread(GDKvmtrim_id);
		MT_sleep_ms(CATNAP);

		/* Kill all threads except myself */
		if (status == 0) {
			MT_Id pid = MT_getpid();
			Thread t, s;

			for (t = GDKthreads, s = t + THREADS; t < s; t++) {
				if (t->pid) {
					MT_Id victim = t->pid;

					if (t->pid != pid)
						MT_kill_thread(victim);
				}
			}
		}
		(void) GDKgetHome();
#if 0
		/* we can't clean up after killing threads */
		BBPexit();
#endif
		GDKlog(GDKLOGOFF);
		GDKunlockHome();
#if !defined(ATOMIC_LOCK) && !defined(NDEBUG)
		TEMDEBUG GDKlockstatistics(1);
#endif
		MT_global_exit(status);
	}
}
예제 #7
0
파일: gdk_utils.c 프로젝트: lajus/monetinr
/*
 * @+ Interrupt handling
 * The current version simply catches signals and prints a warning.
 * It should be extended to cope with the specifics of the interrupt
 * received.
 */
#if 0				/* these are unused */
static void
BATSIGignore(int nr)
{
	GDKsyserror("! ERROR signal %d caught by thread " SZFMT "\n", nr, (size_t) MT_getpid());
}
예제 #8
0
/* We create a pool of GDKnr_threads-1 generic workers, that is,
 * workers that will take on jobs from any clients.  In addition, we
 * create a single specific worker per client (i.e. each time we enter
 * here).  This specific worker will only do work for the client for
 * which it was started.  In this way we can guarantee that there will
 * always be progress for the client, even if all other workers are
 * doing something big.
 *
 * When all jobs for a client have been done (there are no more
 * entries for the client in the queue), the specific worker turns
 * itself into a generic worker.  At the same time, we signal that one
 * generic worker should exit and this function returns.  In this way
 * we make sure that there are once again GDKnr_threads-1 generic
 * workers. */
str
runMALdataflow(Client cntxt, MalBlkPtr mb, int startpc, int stoppc, MalStkPtr stk)
{
	DataFlow flow = NULL;
	str msg = MAL_SUCCEED;
	int size;
	int *ret;
	int i;

#ifdef DEBUG_FLOW
	fprintf(stderr, "#runMALdataflow for block %d - %d\n", startpc, stoppc);
	printFunction(GDKstdout, mb, 0, LIST_MAL_STMT | LIST_MAPI);
#endif

	/* in debugging mode we should not start multiple threads */
	if (stk == NULL)
		throw(MAL, "dataflow", "runMALdataflow(): Called with stk == NULL");
	ret = (int*) getArgReference(stk,getInstrPtr(mb,startpc),0);
	*ret = FALSE;
	if (stk->cmd) {
		*ret = TRUE;
		return MAL_SUCCEED;
	}

	assert(stoppc > startpc);

	/* check existence of workers */
	if (todo == NULL) {
		/* create thread pool */
		if (GDKnr_threads <= 1 || DFLOWinitialize() < 0) {
			/* no threads created, run serially */
			*ret = TRUE;
			return MAL_SUCCEED;
		}
		i = THREADS;			/* we didn't create an extra thread */
	}
	assert(todo);
	/* in addition, create one more worker that will only execute
	 * tasks for the current client to compensate for our waiting
	 * until all work is done */
	MT_lock_set(&dataflowLock, "runMALdataflow");
	/* join with already exited threads */
	{
		int joined;
		do {
			joined = 0;
			for (i = 0; i < THREADS; i++) {
				if (workers[i].flag == EXITED) {
					workers[i].flag = JOINING;
					workers[i].cntxt = NULL;
					joined = 1;
					MT_lock_unset(&dataflowLock, "runMALdataflow");
					MT_join_thread(workers[i].id);
					MT_lock_set(&dataflowLock, "runMALdataflow");
					workers[i].flag = IDLE;
				}
			}
		} while (joined);
	}
	for (i = 0; i < THREADS; i++) {
		if (workers[i].flag == IDLE) {
			/* only create specific worker if we are not doing a
			 * recursive call */
			if (stk->calldepth > 1) {
				int j;
				MT_Id pid = MT_getpid();

				/* doing a recursive call: copy specificity from
				 * current worker to new worker */
				workers[i].cntxt = NULL;
				for (j = 0; j < THREADS; j++) {
					if (workers[j].flag == RUNNING && workers[j].id == pid) {
						workers[i].cntxt = workers[j].cntxt;
						break;
					}
				}
			} else {
				/* not doing a recursive call: create specific worker */
				workers[i].cntxt = cntxt;
			}
			workers[i].flag = RUNNING;
			if (MT_create_thread(&workers[i].id, DFLOWworker, (void *) &workers[i], MT_THR_JOINABLE) < 0) {
				/* cannot start new thread, run serially */
				*ret = TRUE;
				workers[i].flag = IDLE;
				MT_lock_unset(&dataflowLock, "runMALdataflow");
				return MAL_SUCCEED;
			}
			break;
		}
	}
	MT_lock_unset(&dataflowLock, "runMALdataflow");
	if (i == THREADS) {
		/* no empty thread slots found, run serially */
		*ret = TRUE;
		return MAL_SUCCEED;
	}

	flow = (DataFlow)GDKzalloc(sizeof(DataFlowRec));
	if (flow == NULL)
		throw(MAL, "dataflow", "runMALdataflow(): Failed to allocate flow");

	flow->cntxt = cntxt;
	flow->mb = mb;
	flow->stk = stk;
	flow->error = 0;

	/* keep real block count, exclude brackets */
	flow->start = startpc + 1;
	flow->stop = stoppc;

	MT_lock_init(&flow->flowlock, "flow->flowlock");
	flow->done = q_create(stoppc- startpc+1, "flow->done");
	if (flow->done == NULL) {
		MT_lock_destroy(&flow->flowlock);
		GDKfree(flow);
		throw(MAL, "dataflow", "runMALdataflow(): Failed to create flow->done queue");
	}

	flow->status = (FlowEvent)GDKzalloc((stoppc - startpc + 1) * sizeof(FlowEventRec));
	if (flow->status == NULL) {
		q_destroy(flow->done);
		MT_lock_destroy(&flow->flowlock);
		GDKfree(flow);
		throw(MAL, "dataflow", "runMALdataflow(): Failed to allocate flow->status");
	}
	size = DFLOWgraphSize(mb, startpc, stoppc);
	size += stoppc - startpc;
	flow->nodes = (int*)GDKzalloc(sizeof(int) * size);
	if (flow->nodes == NULL) {
		GDKfree(flow->status);
		q_destroy(flow->done);
		MT_lock_destroy(&flow->flowlock);
		GDKfree(flow);
		throw(MAL, "dataflow", "runMALdataflow(): Failed to allocate flow->nodes");
	}
	flow->edges = (int*)GDKzalloc(sizeof(int) * size);
	if (flow->edges == NULL) {
		GDKfree(flow->nodes);
		GDKfree(flow->status);
		q_destroy(flow->done);
		MT_lock_destroy(&flow->flowlock);
		GDKfree(flow);
		throw(MAL, "dataflow", "runMALdataflow(): Failed to allocate flow->edges");
	}
	msg = DFLOWinitBlk(flow, mb, size);

	if (msg == MAL_SUCCEED)
		msg = DFLOWscheduler(flow, &workers[i]);

	GDKfree(flow->status);
	GDKfree(flow->edges);
	GDKfree(flow->nodes);
	q_destroy(flow->done);
	MT_lock_destroy(&flow->flowlock);
	GDKfree(flow);

	if (i != THREADS) {
		/* we created one worker, now tell one worker to exit again */
		MT_lock_set(&todo->l, "runMALdataflow");
		todo->exitcount++;
		MT_lock_unset(&todo->l, "runMALdataflow");
		MT_sema_up(&todo->s, "runMALdataflow");
	}
	return msg;
}