static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b, struct bio *bio)
{
	unsigned hi;
	dm_oblock_t hb = to_hblock(mq, b);
	struct entry *e = h_lookup(&mq->hotspot_table, hb);

	if (e) {
		stats_level_accessed(&mq->hotspot_stats, e->level);

		hi = get_index(&mq->hotspot_alloc, e);
		q_requeue(&mq->hotspot, e,
			  test_and_set_bit(hi, mq->hotspot_hit_bits) ?
			  0u : mq->hotspot_level_jump);

	} else {
		stats_miss(&mq->hotspot_stats);

		e = alloc_entry(&mq->hotspot_alloc);
		if (!e) {
			e = q_pop(&mq->hotspot);
			if (e) {
				h_remove(&mq->hotspot_table, e);
				hi = get_index(&mq->hotspot_alloc, e);
				clear_bit(hi, mq->hotspot_hit_bits);
			}

		}

		if (e) {
			e->oblock = hb;
			q_push(&mq->hotspot, e);
			h_insert(&mq->hotspot_table, e);
		}
	}

	return e;
}
Beispiel #2
0
static void
DFLOWworker(void *T)
{
	struct worker *t = (struct worker *) T;
	DataFlow flow;
	FlowEvent fe = 0, fnxt = 0;
	int id = (int) (t - workers);
	Thread thr;
	str error = 0;
	int i,last;
	Client cntxt;
	InstrPtr p;

	thr = THRnew("DFLOWworker");

	GDKsetbuf(GDKmalloc(GDKMAXERRLEN)); /* where to leave errors */
	GDKerrbuf[0] = 0;
	MT_lock_set(&dataflowLock, "DFLOWworker");
	cntxt = t->cntxt;
	MT_lock_unset(&dataflowLock, "DFLOWworker");
	if (cntxt) {
		/* wait until we are allowed to start working */
		MT_sema_down(&t->s, "DFLOWworker");
	}
	while (1) {
		if (fnxt == 0) {
			MT_lock_set(&dataflowLock, "DFLOWworker");
			cntxt = t->cntxt;
			MT_lock_unset(&dataflowLock, "DFLOWworker");
			fe = q_dequeue(todo, cntxt);
			if (fe == NULL) {
				if (cntxt) {
					/* we're not done yet with work for the current
					 * client (as far as we know), so give up the CPU
					 * and let the scheduler enter some more work, but
					 * first compensate for the down we did in
					 * dequeue */
					MT_sema_up(&todo->s, "DFLOWworker");
					MT_sleep_ms(1);
					continue;
				}
				/* no more work to be done: exit */
				break;
			}
		} else
			fe = fnxt;
		if (ATOMIC_GET(exiting, exitingLock, "DFLOWworker")) {
			break;
		}
		fnxt = 0;
		assert(fe);
		flow = fe->flow;
		assert(flow);

		/* whenever we have a (concurrent) error, skip it */
		if (flow->error) {
			q_enqueue(flow->done, fe);
			continue;
		}

		/* skip all instructions when we have encontered an error */
		if (flow->error == 0) {
#ifdef USE_MAL_ADMISSION
			if (MALadmission(fe->argclaim, fe->hotclaim)) {
				fe->hotclaim = 0;   /* don't assume priority anymore */
				if (todo->last == 0)
					MT_sleep_ms(DELAYUNIT);
				q_requeue(todo, fe);
				continue;
			}
#endif
			error = runMALsequence(flow->cntxt, flow->mb, fe->pc, fe->pc + 1, flow->stk, 0, 0);
			PARDEBUG fprintf(stderr, "#executed pc= %d wrk= %d claim= " LLFMT "," LLFMT " %s\n",
							 fe->pc, id, fe->argclaim, fe->hotclaim, error ? error : "");
#ifdef USE_MAL_ADMISSION
			/* release the memory claim */
			MALadmission(-fe->argclaim, -fe->hotclaim);
#endif
			/* update the numa information. keep the thread-id producing the value */
			p= getInstrPtr(flow->mb,fe->pc);
			for( i = 0; i < p->argc; i++)
				flow->mb->var[getArg(p,i)]->worker = thr->tid;

			MT_lock_set(&flow->flowlock, "DFLOWworker");
			fe->state = DFLOWwrapup;
			MT_lock_unset(&flow->flowlock, "DFLOWworker");
			if (error) {
				MT_lock_set(&flow->flowlock, "DFLOWworker");
				/* only collect one error (from one thread, needed for stable testing) */
				if (!flow->error)
					flow->error = error;
				MT_lock_unset(&flow->flowlock, "DFLOWworker");
				/* after an error we skip the rest of the block */
				q_enqueue(flow->done, fe);
				continue;
			}
		}

		/* see if you can find an eligible instruction that uses the
		 * result just produced. Then we can continue with it right away.
		 * We are just looking forward for the last block, which means we
		 * are safe from concurrent actions. No other thread can steal it,
		 * because we hold the logical lock.
		 * All eligible instructions are queued
		 */
#ifdef USE_MAL_ADMISSION
		{
		InstrPtr p = getInstrPtr(flow->mb, fe->pc);
		assert(p);
		fe->hotclaim = 0;
		for (i = 0; i < p->retc; i++)
			fe->hotclaim += getMemoryClaim(flow->mb, flow->stk, p, i, FALSE);
		}
#endif
		MT_lock_set(&flow->flowlock, "DFLOWworker");

		for (last = fe->pc - flow->start; last >= 0 && (i = flow->nodes[last]) > 0; last = flow->edges[last])
			if (flow->status[i].state == DFLOWpending &&
				flow->status[i].blocks == 1) {
				flow->status[i].state = DFLOWrunning;
				flow->status[i].blocks = 0;
				flow->status[i].hotclaim = fe->hotclaim;
				flow->status[i].argclaim += fe->hotclaim;
				fnxt = flow->status + i;
				break;
			}
		MT_lock_unset(&flow->flowlock, "DFLOWworker");

		q_enqueue(flow->done, fe);
		if ( fnxt == 0) {
			int last;
			MT_lock_set(&todo->l, "DFLOWworker");
			last = todo->last;
			MT_lock_unset(&todo->l, "DFLOWworker");
			if (last == 0)
				profilerHeartbeatEvent("wait", 0);
		}
	}
	GDKfree(GDKerrbuf);
	GDKsetbuf(0);
	THRdel(thr);
	MT_lock_set(&dataflowLock, "DFLOWworker");
	t->flag = EXITED;
	MT_lock_unset(&dataflowLock, "DFLOWworker");
}