Example #1
0
/* Not allowed to do work if it could cause a deadlock.
 * See, the async_calls called from this function are supposed
 * to run in normal kernel context (not interrupt, etc). So,
 * it won't do any work if:
 *  - preempt is disabled
 *  - any locks are held
 */
int workqueue_dowork(struct workqueue *wq)
{
	int old = cpu_interrupt_set(0);
	ASSERT(__current_cpu->preempt_disable == 0);
	if(current_thread->held_locks) {
		cpu_interrupt_set(old);
		return -1;
	}
	struct async_call *call;
	spinlock_acquire(&wq->lock);
	if(heap_pop(&wq->tasks, 0, (void **)&call) == 0) {
		call->queue = 0;
		spinlock_release(&wq->lock);
		atomic_fetch_sub(&wq->count, 1);
		cpu_interrupt_set(old);
		// Handle async_call.
		async_call_execute(call);
		return 0;
	}
	spinlock_release(&wq->lock);
	cpu_interrupt_set(old);
	return -1;
}
Example #2
0
void ticker_dowork(struct ticker *ticker)
{
	uint64_t key;
	void *data;
	int old = cpu_interrupt_set(0);
	assert(!current_thread->blocklist);
	if(__current_cpu->preempt_disable > 0) {
		cpu_interrupt_set(old);
		return;
	}
	if(current_thread->held_locks) {
		cpu_interrupt_set(old);
		return;
	}
	while(heap_peek(&ticker->heap, &key, &data) == 0) {
		if(key < ticker->tick) {
			/* get the data again, since it's cheap and
			 * we need to in case something bubbled up
			 * through the heap between the call to
			 * peak and now */
			spinlock_acquire(&ticker->lock);
			int res = heap_pop(&ticker->heap, &key, &data);
			if(!res)
				tm_thread_lower_flag(current_thread, THREAD_TICKER_DOWORK);
			spinlock_release(&ticker->lock);
			if(res == 0) {
				/* handle the time-event */
				struct async_call *call = (struct async_call *)data;
				call->queue = 0;
				async_call_execute(call);
			}
		} else {
			break;
		}
	}
	cpu_interrupt_set(old);
}