Exemple #1
0
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
	struct completion_queue *cq;
	struct llist_node *entry;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	while ((entry = llist_del_all(&cq->list)) != NULL) {
		entry = llist_reverse_order(entry);
		do {
			struct request_queue *q = NULL;

			cmd = container_of(entry, struct nullb_cmd, ll_list);
			entry = entry->next;
			if (cmd->rq)
				q = cmd->rq->q;
			end_cmd(cmd);

			if (q && !q->mq_ops && blk_queue_stopped(q)) {
				spin_lock(q->queue_lock);
				if (blk_queue_stopped(q))
					blk_start_queue(q);
				spin_unlock(q->queue_lock);
			}
		} while (entry);
	}

	return HRTIMER_NORESTART;
}
Exemple #2
0
/*---------------------------------------------------------------------------*/
static void priv_ev_loop_run_tasklet(unsigned long data)
{
	struct xio_ev_loop *loop = (struct xio_ev_loop *) data;
	struct xio_ev_data	*tev;
	struct llist_node	*node;

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}
}
Exemple #3
0
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
	struct completion_queue *cq;
	struct llist_node *entry;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	while ((entry = llist_del_all(&cq->list)) != NULL) {
		entry = llist_reverse_order(entry);
		do {
			cmd = container_of(entry, struct nullb_cmd, ll_list);
			entry = entry->next;
			end_cmd(cmd);
		} while (entry);
	}

	return HRTIMER_NORESTART;
}
Exemple #4
0
/**
 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 *
 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 *		      offline CPU. Skip this check if set to 'false'.
 *
 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 * to ensure that all pending IPI callbacks are run before it goes completely
 * offline.
 *
 * Loop through the call_single_queue and run all the queued callbacks.
 * Must be called with interrupts disabled.
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
	struct llist_head *head;
	struct llist_node *entry;
	call_single_data_t *csd, *csd_next;
	static bool warned;

	lockdep_assert_irqs_disabled();

	head = this_cpu_ptr(&call_single_queue);
	entry = llist_del_all(head);
	entry = llist_reverse_order(entry);

	/* There shouldn't be any pending callbacks on an offline CPU. */
	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
		     !warned && !llist_empty(head))) {
		warned = true;
		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

		/*
		 * We don't have to use the _safe() variant here
		 * because we are not invoking the IPI handlers yet.
		 */
		llist_for_each_entry(csd, entry, llist)
			pr_warn("IPI callback %pS sent to offline CPU\n",
				csd->func);
	}

	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		/* Do we wait until *after* callback? */
		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
			func(info);
			csd_unlock(csd);
		} else {
			csd_unlock(csd);
			func(info);
		}
	}
Exemple #5
0
/*---------------------------------------------------------------------------*/
int priv_ev_loop_run(void *loop_hndl)
{
	struct xio_ev_loop	*loop = loop_hndl;
	struct xio_ev_data	*tev;
	struct llist_node	*node;
	int cpu;

	clear_bit(XIO_EV_LOOP_STOP, &loop->states);

	switch (loop->flags) {
	case XIO_LOOP_GIVEN_THREAD:
		if (loop->ctx->worker != (uint64_t) get_current()) {
			ERROR_LOG("worker kthread(%p) is not current(%p).\n",
				  (void *) loop->ctx->worker, get_current());
			goto cleanup0;
		}
		/* no need to disable preemption */
		cpu = raw_smp_processor_id();
		if (loop->ctx->cpuid != cpu) {
			TRACE_LOG("worker on core(%d) scheduled to(%d).\n",
				  cpu, loop->ctx->cpuid);
			set_cpus_allowed_ptr(get_current(),
					     cpumask_of(loop->ctx->cpuid));
		}
		break;
	case XIO_LOOP_TASKLET:
		/* were events added to list while in STOP state ? */
		if (!llist_empty(&loop->ev_llist))
			priv_kick_tasklet(loop_hndl);
		return 0;
	case XIO_LOOP_WORKQUEUE:
		/* were events added to list while in STOP state ? */
		while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
			node = llist_reverse_order(node);
			while (node) {
				tev = llist_entry(node, struct xio_ev_data,
						  ev_llist);
				node = llist_next(node);
				tev->work.func = priv_ev_loop_run_work;
				queue_work_on(loop->ctx->cpuid, loop->workqueue,
					      &tev->work);
			}
		}
		return 0;
	default:
		/* undo */
		set_bit(XIO_EV_LOOP_STOP, &loop->states);
		return -1;
	}

retry_wait:
	wait_event_interruptible(loop->wait,
				 test_bit(XIO_EV_LOOP_WAKE, &loop->states));

retry_dont_wait:

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}

	/* "race point" */
	clear_bit(XIO_EV_LOOP_WAKE, &loop->states);

	if (unlikely(test_bit(XIO_EV_LOOP_STOP, &loop->states)))
		return 0;

	/* if a new entry was added while we were at "race point"
	 * than wait event might block forever as condition is false */
	if (llist_empty(&loop->ev_llist))
		goto retry_wait;

	/* race detected */
	if (!test_and_set_bit(XIO_EV_LOOP_WAKE, &loop->states))
		goto retry_dont_wait;

	/* was one wakeup was called */
	goto retry_wait;

cleanup0:
	set_bit(XIO_EV_LOOP_STOP, &loop->states);
	return -1;
}