static void hci_ibs_tx_idle_timeout(unsigned long arg)
{
	struct hci_uart *hu = (struct hci_uart *) arg;
	struct ibs_struct *ibs = hu->priv;
	unsigned long flags;
	unsigned long vote_tx_sleep = 0;

	BT_DBG("hu %p idle timeout in %lu state", hu, ibs->tx_ibs_state);

	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
					flags, SINGLE_DEPTH_NESTING);

	switch (ibs->tx_ibs_state) {
	default:
	case HCI_IBS_TX_ASLEEP:
	case HCI_IBS_TX_WAKING:
		BT_ERR("spurrious timeout in tx state %ld", ibs->tx_ibs_state);
		goto out;
	case HCI_IBS_TX_AWAKE: /* TX_IDLE, go to SLEEP */
		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
			BT_ERR("cannot send SLEEP to device");
			goto out;
		}
		ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
		ibs->ibs_sent_slps++; /* debug */
		vote_tx_sleep = 1;
		break;
	}

	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);

	hci_uart_tx_wakeup(hu);  /* run HCI tx handling unlocked */

	if (!vote_tx_sleep)
		return;
	/* now that message queued to tty driver, vote for tty clocks off */
	/* It is up to the tty driver to pend the clocks off until tx done. */

	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
					flags, SINGLE_DEPTH_NESTING);
	/* QualComm baseLine update 1030 to 1040 begin */
	/* vote off tx clock */
	ibs->ibs_wq_state = HCI_IBS_WQ_TX_VOTE_OFF;
	queue_work(ibs->workqueue, &ibs->ws_ibs);
	/* QualComm baseLine update 1030 to 1040 end */
out:
	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
}
Exemplo n.º 2
0
static void hci_ibs_wake_retrans_timeout(unsigned long arg)
{
	struct hci_uart *hu = (struct hci_uart *) arg;
	struct ibs_struct *ibs = hu->priv;
	unsigned long flags;
	unsigned long retransmit = 0;

	BT_DBG("hu %p wake retransmit timeout in %lu state",
		hu, ibs->tx_ibs_state);

	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
					flags, SINGLE_DEPTH_NESTING);

	switch (ibs->tx_ibs_state) {
	default:
	case HCI_IBS_TX_ASLEEP:
	case HCI_IBS_TX_AWAKE:
		BT_ERR("spurrious timeout tx state %ld", ibs->tx_ibs_state);
		goto out;
	case HCI_IBS_TX_WAKING: /* No WAKE_ACK, retransmit WAKE */
		retransmit = 1;
		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
			BT_ERR("cannot acknowledge device wake up");
			goto out;
		}
		ibs->ibs_sent_wakes++; /* debug */
		mod_timer(&ibs->wake_retrans_timer, jiffies + wake_retrans);
		break;
	}
out:
	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
	if (retransmit)
		hci_uart_tx_wakeup(hu);
}
Exemplo n.º 3
0
static void hci_ibs_tx_idle_timeout(unsigned long arg)
{
	struct hci_uart *hu = (struct hci_uart *) arg;
	struct ibs_struct *ibs = hu->priv;
	unsigned long flags;

	BT_DBG("hu %p idle timeout in %lu state", hu, ibs->tx_ibs_state);

	spin_lock_irqsave_nested(&ibs->hci_ibs_lock,
					flags, SINGLE_DEPTH_NESTING);

	switch (ibs->tx_ibs_state) {
	default:
	case HCI_IBS_TX_ASLEEP:
	case HCI_IBS_TX_WAKING:
		BT_ERR("spurrious timeout in tx state %ld", ibs->tx_ibs_state);
		goto out;
	case HCI_IBS_TX_AWAKE: /* TX_IDLE, go to SLEEP */
		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
			BT_ERR("cannot send SLEEP to device");
			goto out;
		}
		ibs->tx_ibs_state = HCI_IBS_TX_ASLEEP;
		ibs->ibs_sent_slps++; /* debug */
		break;
	}

	queue_work(ibs->workqueue, &ibs->ws_tx_vote_off);

out:
	spin_unlock_irqrestore(&ibs->hci_ibs_lock, flags);
}
Exemplo n.º 4
0
static void hci_ibs_tx_idle_timeout(unsigned long arg)
{
	struct hci_uart *hu = (struct hci_uart *)arg;
	struct qca_data *qca = hu->priv;
	unsigned long flags;

	BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state);

	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
				 flags, SINGLE_DEPTH_NESTING);

	switch (qca->tx_ibs_state) {
	case HCI_IBS_TX_AWAKE:
		/* TX_IDLE, go to SLEEP */
		if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) {
			BT_ERR("Failed to send SLEEP to device");
			break;
		}
		qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
		qca->ibs_sent_slps++;
		queue_work(qca->workqueue, &qca->ws_tx_vote_off);
		break;

	case HCI_IBS_TX_ASLEEP:
	case HCI_IBS_TX_WAKING:
		/* Fall through */

	default:
		BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
		break;
	}

	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
}
Exemplo n.º 5
0
static void hci_ibs_wake_retrans_timeout(unsigned long arg)
{
	struct hci_uart *hu = (struct hci_uart *)arg;
	struct qca_data *qca = hu->priv;
	unsigned long flags, retrans_delay;
	bool retransmit = false;

	BT_DBG("hu %p wake retransmit timeout in %d state",
		hu, qca->tx_ibs_state);

	spin_lock_irqsave_nested(&qca->hci_ibs_lock,
				 flags, SINGLE_DEPTH_NESTING);

	switch (qca->tx_ibs_state) {
	case HCI_IBS_TX_WAKING:
		/* No WAKE_ACK, retransmit WAKE */
		retransmit = true;
		if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) {
			BT_ERR("Failed to acknowledge device wake up");
			break;
		}
		qca->ibs_sent_wakes++;
		retrans_delay = msecs_to_jiffies(qca->wake_retrans);
		mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
		break;

	case HCI_IBS_TX_ASLEEP:
	case HCI_IBS_TX_AWAKE:
		/* Fall through */

	default:
		BT_ERR("Spurrious timeout tx state %d", qca->tx_ibs_state);
		break;
	}

	spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);

	if (retransmit)
		hci_uart_tx_wakeup(hu);
}
Exemplo n.º 6
0
/**
 * put_io_context - put a reference of io_context
 * @ioc: io_context to put
 * @locked_q: request_queue the caller is holding queue_lock of (hint)
 *
 * Decrement reference count of @ioc and release it if the count reaches
 * zero.  If the caller is holding queue_lock of a queue, it can indicate
 * that with @locked_q.  This is an optimization hint and the caller is
 * allowed to pass in %NULL even when it's holding a queue_lock.
 */
void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
{
	struct request_queue *last_q = locked_q;
	unsigned long flags;

	if (ioc == NULL)
		return;

	BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
	if (locked_q)
		lockdep_assert_held(locked_q->queue_lock);

	if (!atomic_long_dec_and_test(&ioc->refcount))
		return;

	/*
	 * Destroy @ioc.  This is a bit messy because icq's are chained
	 * from both ioc and queue, and ioc->lock nests inside queue_lock.
	 * The inner ioc->lock should be held to walk our icq_list and then
	 * for each icq the outer matching queue_lock should be grabbed.
	 * ie. We need to do reverse-order double lock dancing.
	 *
	 * Another twist is that we are often called with one of the
	 * matching queue_locks held as indicated by @locked_q, which
	 * prevents performing double-lock dance for other queues.
	 *
	 * So, we do it in two stages.  The fast path uses the queue_lock
	 * the caller is holding and, if other queues need to be accessed,
	 * uses trylock to avoid introducing locking dependency.  This can
	 * handle most cases, especially if @ioc was performing IO on only
	 * single device.
	 *
	 * If trylock doesn't cut it, we defer to @ioc->release_work which
	 * can do all the double-locking dancing.
	 */
	spin_lock_irqsave_nested(&ioc->lock, flags,
				 ioc_release_depth(locked_q));

	while (!hlist_empty(&ioc->icq_list)) {
		struct io_cq *icq = hlist_entry(ioc->icq_list.first,
						struct io_cq, ioc_node);
		struct request_queue *this_q = icq->q;

		if (this_q != last_q) {
			if (last_q && last_q != locked_q)
				spin_unlock(last_q->queue_lock);
			last_q = NULL;

			if (!spin_trylock(this_q->queue_lock))
				break;
			last_q = this_q;
			continue;
		}
		ioc_exit_icq(icq);
	}

	if (last_q && last_q != locked_q)
		spin_unlock(last_q->queue_lock);

	spin_unlock_irqrestore(&ioc->lock, flags);

	/* if no icq is left, we're done; otherwise, kick release_work */
	if (hlist_empty(&ioc->icq_list))
		kmem_cache_free(iocontext_cachep, ioc);
	else
		schedule_work(&ioc->release_work);
}