예제 #1
0
static void irq_work_run_list(struct llist_head *list)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_node *llnode;

	BUG_ON(!irqs_disabled());

	if (llist_empty(list))
		return;

	llnode = llist_del_all(list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
예제 #2
0
파일: null_blk.c 프로젝트: matthewlui/linux
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
	struct completion_queue *cq;
	struct llist_node *entry;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	while ((entry = llist_del_all(&cq->list)) != NULL) {
		entry = llist_reverse_order(entry);
		do {
			struct request_queue *q = NULL;

			cmd = container_of(entry, struct nullb_cmd, ll_list);
			entry = entry->next;
			if (cmd->rq)
				q = cmd->rq->q;
			end_cmd(cmd);

			if (q && !q->mq_ops && blk_queue_stopped(q)) {
				spin_lock(q->queue_lock);
				if (blk_queue_stopped(q))
					blk_start_queue(q);
				spin_unlock(q->queue_lock);
			}
		} while (entry);
	}

	return HRTIMER_NORESTART;
}
예제 #3
0
/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 */
		work->flags = IRQ_WORK_BUSY;
		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
	}
}
예제 #4
0
파일: main.c 프로젝트: UNwS/rtl8192su
static int r92su_stop(struct net_device *ndev)
{
	struct r92su *r92su = ndev->ml_priv;
	struct cfg80211_bss *tmp_bss;
	struct llist_node *node;
	int err = -EINVAL, i;

	mutex_lock(&r92su->lock);

	if (r92su_is_connected(r92su)) {
		err = __r92su_disconnect(r92su);
		WARN_ONCE(err, "disconnect failed");
	}

	r92su_set_power(r92su, false);

	if (r92su_is_initializing(r92su)) {
		err = r92su_hw_mac_deinit(r92su);
		WARN_ONCE(err, "failed to deinitilize MAC");
	}

	if (r92su_is_initializing(r92su))
		r92su_set_state(r92su, R92SU_STOP);

	if (r92su->scan_request)
		cfg80211_scan_done(r92su->scan_request, true);

	tmp_bss = r92su->want_connect_bss;
	r92su->want_connect_bss = NULL;
	r92su_bss_free(r92su, tmp_bss);

	r92su->scan_request = NULL;

	for (i = 0; i < MAX_STA; i++)
		r92su_sta_del(r92su, i);

	mutex_unlock(&r92su->lock);

	cancel_delayed_work_sync(&r92su->survey_done_work);
	cancel_delayed_work_sync(&r92su->service_work);
	cancel_work_sync(&r92su->add_bss_work);
	cancel_work_sync(&r92su->connect_bss_work);
	cancel_work_sync(&r92su->disconnect_work);

	node = llist_del_all(&r92su->add_bss_list);
	while (node) {
		struct r92su_add_bss *bss_priv =
			llist_entry(node, struct r92su_add_bss, head);
		node = ACCESS_ONCE(node->next);
		kfree(bss_priv);
	}

	/* wait for keys and stas to be freed */
	synchronize_rcu();
	rcu_barrier();

	return err;
}
static void delayed_fput(struct work_struct *unused)
{
	struct llist_node *node = llist_del_all(&delayed_fput_list);
	struct llist_node *next;

	for (; node; node = next) {
		next = llist_next(node);
		__fput(llist_entry(node, struct file, f_u.fu_llist));
	}
}
예제 #6
0
파일: main.c 프로젝트: chunkeey/rtl8192su
static void r92su_bss_add_work(struct work_struct *work)
{
	struct r92su *r92su;
	struct llist_node *node;

	r92su = container_of(work, struct r92su, add_bss_work);
	node = llist_del_all(&r92su->add_bss_list);
	while (node) {
		const struct h2cc2h_bss *c2h_bss;
		struct r92su_add_bss *bss_priv;
		struct cfg80211_bss *bss;
		int chan_idx;
		int ie_len;

		bss_priv = llist_entry(node, struct r92su_add_bss, head);
		c2h_bss = &bss_priv->fw_bss;

		chan_idx = le32_to_cpu(c2h_bss->config.frequency) - 1;
		if (chan_idx < 0 || chan_idx >= r92su->band_2GHZ.n_channels) {
			R92SU_ERR(r92su,
				  "received survey event on bad channel.");
			goto next;
		}

		ie_len = le32_to_cpu(c2h_bss->ie_length) - 12;
		if (ie_len < 0)
			goto next;

		bss = cfg80211_inform_bss(r92su->wdev.wiphy,
			&r92su->band_2GHZ.channels[chan_idx],
			CFG80211_BSS_FTYPE_UNKNOWN, c2h_bss->bssid,
			le64_to_cpu(c2h_bss->ies.timestamp),
			le16_to_cpu(c2h_bss->ies.caps),
			le32_to_cpu(c2h_bss->config.beacon_period),
			c2h_bss->ies.ie, ie_len,
			le32_to_cpu(c2h_bss->rssi), GFP_KERNEL);

		if (bss) {
			r92su_bss_init(r92su, bss, c2h_bss);
			cfg80211_put_bss(r92su->wdev.wiphy, bss);
		}
next:
		node = ACCESS_ONCE(node->next);

		/* these bss_priv have been generated by "c2h_survey_event"
		 * they are not part of the cfg80211 framework and this is
		 * why we have to managed & destroy them.
		 */
		kfree(bss_priv);
	}
}
예제 #7
0
static
void tracker_call_rcu_workqueue(struct work_struct *work)
{
       struct latency_tracker *tracker;
       struct llist_node *list;
       struct latency_tracker_event *e, *n;

       tracker = container_of(work, struct latency_tracker,
		       tracker_call_rcu_w.work);
       list = llist_del_all(&tracker->to_release);
       synchronize_sched();
       llist_for_each_entry_safe(e, n, list, llist)
	       wrapper_freelist_put_event(tracker, e);
}
예제 #8
0
/*---------------------------------------------------------------------------*/
static void priv_ev_loop_run_tasklet(unsigned long data)
{
	struct xio_ev_loop *loop = (struct xio_ev_loop *) data;
	struct xio_ev_data	*tev;
	struct llist_node	*node;

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}
}
예제 #9
0
static void null_ipi_cmd_end_io(void *data)
{
	struct completion_queue *cq;
	struct llist_node *entry, *next;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	entry = llist_del_all(&cq->list);

	while (entry) {
		next = entry->next;
		cmd = llist_entry(entry, struct nullb_cmd, ll_list);
		end_cmd(cmd);
		entry = next;
	}
}
예제 #10
0
static void __irq_work_run(void)
{
	unsigned long flags;
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;


	/*
	 * Reset the "raised" state right before we check the list because
	 * an NMI may enqueue after we find the list empty from the runner.
	 */
	__this_cpu_write(irq_work_raised, 0);
	barrier();

	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty_relaxed(this_list))
		return;

	BUG_ON(!irqs_disabled());

	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);

		llnode = llist_next(llnode);

		/*
		 * Clear the PENDING bit, after this point the @work
		 * can be re-used.
		 * Make it immediately visible so that other CPUs trying
		 * to claim that work don't rely on us to handle their data
		 * while we are in the middle of the func.
		 */
		flags = work->flags & ~IRQ_WORK_PENDING;
		xchg(&work->flags, flags);

		work->func(work);
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
	}
}
예제 #11
0
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
{
	struct completion_queue *cq;
	struct llist_node *entry;
	struct nullb_cmd *cmd;

	cq = &per_cpu(completion_queues, smp_processor_id());

	while ((entry = llist_del_all(&cq->list)) != NULL) {
		do {
			cmd = container_of(entry, struct nullb_cmd, ll_list);
			end_cmd(cmd);
			entry = entry->next;
		} while (entry);
	}

	return HRTIMER_NORESTART;
}
예제 #12
0
파일: smp.c 프로젝트: Lyude/linux
/**
 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
 *
 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
 *		      offline CPU. Skip this check if set to 'false'.
 *
 * Flush any pending smp-call-function callbacks queued on this CPU. This is
 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
 * to ensure that all pending IPI callbacks are run before it goes completely
 * offline.
 *
 * Loop through the call_single_queue and run all the queued callbacks.
 * Must be called with interrupts disabled.
 */
static void flush_smp_call_function_queue(bool warn_cpu_offline)
{
	struct llist_head *head;
	struct llist_node *entry;
	call_single_data_t *csd, *csd_next;
	static bool warned;

	lockdep_assert_irqs_disabled();

	head = this_cpu_ptr(&call_single_queue);
	entry = llist_del_all(head);
	entry = llist_reverse_order(entry);

	/* There shouldn't be any pending callbacks on an offline CPU. */
	if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
		     !warned && !llist_empty(head))) {
		warned = true;
		WARN(1, "IPI on offline CPU %d\n", smp_processor_id());

		/*
		 * We don't have to use the _safe() variant here
		 * because we are not invoking the IPI handlers yet.
		 */
		llist_for_each_entry(csd, entry, llist)
			pr_warn("IPI callback %pS sent to offline CPU\n",
				csd->func);
	}

	llist_for_each_entry_safe(csd, csd_next, entry, llist) {
		smp_call_func_t func = csd->func;
		void *info = csd->info;

		/* Do we wait until *after* callback? */
		if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
			func(info);
			csd_unlock(csd);
		} else {
			csd_unlock(csd);
			func(info);
		}
	}
예제 #13
0
파일: tty_buffer.c 프로젝트: 19Dan01/linux
void tty_buffer_free_all(struct tty_port *port)
{
	struct tty_bufhead *buf = &port->buf;
	struct tty_buffer *p, *next;
	struct llist_node *llist;

	while ((p = buf->head) != NULL) {
		buf->head = p->next;
		if (p->size > 0)
			kfree(p);
	}
	llist = llist_del_all(&buf->free);
	llist_for_each_entry_safe(p, next, llist, free)
		kfree(p);

	tty_buffer_reset(&buf->sentinel, 0);
	buf->head = &buf->sentinel;
	buf->tail = &buf->sentinel;

	atomic_set(&buf->mem_used, 0);
}
예제 #14
0
파일: main.c 프로젝트: chunkeey/rtl8192su
static void r92su_survey_done_work(struct work_struct *work)
{
	struct cfg80211_scan_request *req;
	struct r92su *r92su = container_of(work, struct r92su,
					   survey_done_work.work);

	mutex_lock(&r92su->lock);
	if (!r92su_is_open(r92su))
		goto out;

	req = r92su->scan_request;
	r92su->scan_request = NULL;

	if (req) {
		struct cfg80211_scan_info info = {
			.aborted = false,
		};
		cfg80211_scan_done(req, &info);
	}

	r92su->scanned = true;
	complete(&r92su->scan_done);
out:
	mutex_unlock(&r92su->lock);
}

static int r92su_stop(struct net_device *ndev)
{
	struct r92su *r92su = ndev->ml_priv;
	struct cfg80211_bss *tmp_bss;
	struct llist_node *node;
	int err = -EINVAL, i;

	mutex_lock(&r92su->lock);

	if (r92su_is_connected(r92su)) {
		err = __r92su_disconnect(r92su);
		WARN_ONCE(err, "disconnect failed");
	}

	r92su_set_power(r92su, false);

	if (r92su_is_initializing(r92su)) {
		err = r92su_hw_mac_deinit(r92su);
		WARN_ONCE(err, "failed to deinitilize MAC");
	}

	if (r92su_is_initializing(r92su))
		r92su_set_state(r92su, R92SU_STOP);

	if (r92su->scan_request) {
		struct cfg80211_scan_info info = {
			.aborted = true,
		};
		cfg80211_scan_done(r92su->scan_request, &info);
	}

	tmp_bss = r92su->want_connect_bss;
	r92su->want_connect_bss = NULL;
	r92su_bss_free(r92su, tmp_bss);

	r92su->scan_request = NULL;

	for (i = 0; i < MAX_STA; i++)
		r92su_sta_del(r92su, i);

	mutex_unlock(&r92su->lock);

	cancel_delayed_work_sync(&r92su->survey_done_work);
	cancel_delayed_work_sync(&r92su->service_work);
	cancel_work_sync(&r92su->add_bss_work);
	cancel_work_sync(&r92su->connect_bss_work);
	cancel_work_sync(&r92su->disconnect_work);

	node = llist_del_all(&r92su->add_bss_list);
	while (node) {
		struct r92su_add_bss *bss_priv =
			llist_entry(node, struct r92su_add_bss, head);
		node = ACCESS_ONCE(node->next);
		kfree(bss_priv);
	}

	/* wait for keys and stas to be freed */
	synchronize_rcu();
	rcu_barrier();

	return err;
}

static netdev_tx_t r92su_start_xmit(struct sk_buff *skb,
				    struct net_device *ndev)
{
	struct r92su *r92su = ndev->ml_priv;

	switch (r92su->wdev.iftype) {
	case NL80211_IFTYPE_STATION:
	case NL80211_IFTYPE_ADHOC:
		if (skb->len >= ETH_ALEN + ETH_ALEN + 2)
			r92su_tx(r92su, skb, false);
		break;

	case NL80211_IFTYPE_MONITOR:
		r92su_tx_monitor(r92su, skb);
		break;

	default:
		dev_kfree_skb_any(skb);
		break;
	}
	return NETDEV_TX_OK;
}

static const struct net_device_ops r92su_netdevice_ops = {
	.ndo_open = r92su_open,
	.ndo_stop = r92su_stop,
	.ndo_start_xmit = r92su_start_xmit,
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_set_rx_mode = r92su_set_rx_mode,
	.ndo_change_mtu = eth_change_mtu,
	.ndo_validate_addr = eth_validate_addr,
};

static void *devm_dup(struct device *dev, void *src, size_t len)
{
	void *tmp;

	tmp = devm_kzalloc(dev, len, GFP_KERNEL);
	if (tmp)
		memcpy(tmp, src, len);
	return tmp;
}

static int r92su_init_band(struct r92su *r92su)
{
	struct ieee80211_supported_band *band;

	band = &r92su->band_2GHZ;
	band->channels = devm_dup(&r92su->wdev.wiphy->dev,
		r92su_channeltable, sizeof(r92su_channeltable));
	if (!band->channels)
		return -ENOMEM;

	band->bitrates = devm_dup(&r92su->wdev.wiphy->dev,
		r92su_ratetable, sizeof(r92su_ratetable));
	if (!band->bitrates)
		return -ENOMEM;

	band->n_channels = ARRAY_SIZE(r92su_channeltable);
	band->n_bitrates = ARRAY_SIZE(r92su_ratetable);

	memcpy(&band->ht_cap, &r92su_ht_info, sizeof(r92su_ht_info));
	band->ht_cap.ht_supported = !r92su->disable_ht;

	switch (r92su->rf_type) {
	case R92SU_1T1R:
		/* nothing needs to be done. The default ht_cap
		 * contains all the necessary bits for just 1T1R
		 * devices */
		break;

	case R92SU_1T2R:
	case R92SU_2T2R:
		band->ht_cap.mcs.rx_mask[1] = 0xff;
		band->ht_cap.mcs.rx_highest = cpu_to_le16(300);
		break;
	}

	r92su->wdev.wiphy->bands[NL80211_BAND_2GHZ] = &r92su->band_2GHZ;

	return 0;
}

static const struct ieee80211_txrx_stypes
r92su_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
	[NL80211_IFTYPE_ADHOC] = {
		.tx = 0xffff,
		.rx = 0,
	},
예제 #15
0
/*---------------------------------------------------------------------------*/
int priv_ev_loop_run(void *loop_hndl)
{
	struct xio_ev_loop	*loop = loop_hndl;
	struct xio_ev_data	*tev;
	struct llist_node	*node;
	int cpu;

	clear_bit(XIO_EV_LOOP_STOP, &loop->states);

	switch (loop->flags) {
	case XIO_LOOP_GIVEN_THREAD:
		if (loop->ctx->worker != (uint64_t) get_current()) {
			ERROR_LOG("worker kthread(%p) is not current(%p).\n",
				  (void *) loop->ctx->worker, get_current());
			goto cleanup0;
		}
		/* no need to disable preemption */
		cpu = raw_smp_processor_id();
		if (loop->ctx->cpuid != cpu) {
			TRACE_LOG("worker on core(%d) scheduled to(%d).\n",
				  cpu, loop->ctx->cpuid);
			set_cpus_allowed_ptr(get_current(),
					     cpumask_of(loop->ctx->cpuid));
		}
		break;
	case XIO_LOOP_TASKLET:
		/* were events added to list while in STOP state ? */
		if (!llist_empty(&loop->ev_llist))
			priv_kick_tasklet(loop_hndl);
		return 0;
	case XIO_LOOP_WORKQUEUE:
		/* were events added to list while in STOP state ? */
		while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
			node = llist_reverse_order(node);
			while (node) {
				tev = llist_entry(node, struct xio_ev_data,
						  ev_llist);
				node = llist_next(node);
				tev->work.func = priv_ev_loop_run_work;
				queue_work_on(loop->ctx->cpuid, loop->workqueue,
					      &tev->work);
			}
		}
		return 0;
	default:
		/* undo */
		set_bit(XIO_EV_LOOP_STOP, &loop->states);
		return -1;
	}

retry_wait:
	wait_event_interruptible(loop->wait,
				 test_bit(XIO_EV_LOOP_WAKE, &loop->states));

retry_dont_wait:

	while ((node = llist_del_all(&loop->ev_llist)) != NULL) {
		node = llist_reverse_order(node);
		while (node) {
			tev = llist_entry(node, struct xio_ev_data, ev_llist);
			node = llist_next(node);
			tev->handler(tev->data);
		}
	}

	/* "race point" */
	clear_bit(XIO_EV_LOOP_WAKE, &loop->states);

	if (unlikely(test_bit(XIO_EV_LOOP_STOP, &loop->states)))
		return 0;

	/* if a new entry was added while we were at "race point"
	 * than wait event might block forever as condition is false */
	if (llist_empty(&loop->ev_llist))
		goto retry_wait;

	/* race detected */
	if (!test_and_set_bit(XIO_EV_LOOP_WAKE, &loop->states))
		goto retry_dont_wait;

	/* was one wakeup was called */
	goto retry_wait;

cleanup0:
	set_bit(XIO_EV_LOOP_STOP, &loop->states);
	return -1;
}