/** * blk_queue_invalidate_tags - invalidate all pending tags * @q: the request queue for the device * * Description: * Hardware conditions may dictate a need to stop all pending requests. * In this case, we will safely clear the block side of the tag queue and * readd all requests to the request queue in the right order. * * Notes: * queue lock must be held. **/ void blk_queue_invalidate_tags(struct request_queue *q) { struct list_head *tmp, *n; list_for_each_safe(tmp, n, &q->tag_busy_list) blk_requeue_request(q, list_entry_rq(tmp)); }
static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) { if (rq) blk_requeue_request(q, rq); if (rq || blk_peek_request(q)) { /* Use 3ms as that was the old plug delay */ blk_delay_queue(q, 3); } }
/* * Requeue the original request of a clone. */ static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms) { struct request_queue *q = rq->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); blk_delay_queue(q, delay_ms); spin_unlock_irqrestore(q->queue_lock, flags); }
/* * Requeue the original request of a clone. */ static void dm_old_requeue_request(struct request *rq) { struct request_queue *q = rq->q; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, rq); blk_run_queue_async(q); spin_unlock_irqrestore(q->queue_lock, flags); }
static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) { struct request_queue *q = drive->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); if (rq) blk_requeue_request(q, rq); if (!elv_queue_empty(q)) blk_plug_device(q); spin_unlock_irqrestore(q->queue_lock, flags); }
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) { struct request_queue *q = drive->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); if (rq) blk_requeue_request(q, rq); spin_unlock_irqrestore(q->queue_lock, flags); /* Use 3ms as that was the old plug delay */ if (rq) blk_delay_queue(q, 3); }
/* * do_blkif_request * read a block; request is in a request queue */ static void do_blkif_request(struct request_queue *rq) { struct blkfront_info *info = NULL; struct request *req; int queued; pr_debug("Entered do_blkif_request\n"); queued = 0; while ((req = blk_peek_request(rq)) != NULL) { info = req->rq_disk->private_data; if (RING_FULL(&info->ring)) goto wait; blk_start_request(req); if (!blk_fs_request(req)) { __blk_end_request_all(req, -EIO); continue; } pr_debug("do_blk_req %p: cmd %p, sec %lx, " "(%u/%u) buffer:%p [%s]\n", req, req->cmd, (unsigned long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), req->buffer, rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); wait: /* Avoid pointless unplugs. */ blk_stop_queue(rq); break; } queued++; } if (queued != 0) flush_requests(info); }
static void mbox_tx_tasklet(unsigned long tx_data) { int ret; struct request *rq; struct omap_mbox *mbox = (struct omap_mbox *)tx_data; struct request_queue *q = mbox->txq->queue; while (1) { rq = blk_fetch_request(q); if (!rq) break; ret = __mbox_msg_send(mbox, (mbox_msg_t)rq->special); if (ret) { omap_mbox_enable_irq(mbox, IRQ_TX); blk_requeue_request(q, rq); return; } blk_end_request_all(rq, 0); } }
/* * Request will be added back to the request queue and retried if * it cannot be immediately dispatched to the taskq for handling */ static inline void zvol_dispatch(task_func_t func, struct request *req) { if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP)) blk_requeue_request(req->q, req); }
/* * Issue a new request to a device. */ void do_ide_request(struct request_queue *q) { ide_drive_t *drive = q->queuedata; ide_hwif_t *hwif = drive->hwif; struct ide_host *host = hwif->host; struct request *rq = NULL; ide_startstop_t startstop; unsigned long queue_run_ms = 3; /* old plug delay */ spin_unlock_irq(q->queue_lock); /* HLD do_request() callback might sleep, make sure it's okay */ might_sleep(); if (ide_lock_host(host, hwif)) goto plug_device_2; spin_lock_irq(&hwif->lock); if (!ide_lock_port(hwif)) { ide_hwif_t *prev_port; WARN_ON_ONCE(hwif->rq); repeat: prev_port = hwif->host->cur_port; if (drive->dev_flags & IDE_DFLAG_SLEEPING && time_after(drive->sleep, jiffies)) { unsigned long left = jiffies - drive->sleep; queue_run_ms = jiffies_to_msecs(left + 1); ide_unlock_port(hwif); goto plug_device; } if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && hwif != prev_port) { ide_drive_t *cur_dev = prev_port ? prev_port->cur_dev : NULL; /* * set nIEN for previous port, drives in the * quirk list may not like intr setups/cleanups */ if (cur_dev && (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) prev_port->tp_ops->write_devctl(prev_port, ATA_NIEN | ATA_DEVCTL_OBS); hwif->host->cur_port = hwif; } hwif->cur_dev = drive; drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); spin_unlock_irq(&hwif->lock); spin_lock_irq(q->queue_lock); /* * we know that the queue isn't empty, but this can happen * if the q->prep_rq_fn() decides to kill a request */ if (!rq) rq = blk_fetch_request(drive->queue); spin_unlock_irq(q->queue_lock); spin_lock_irq(&hwif->lock); if (!rq) { ide_unlock_port(hwif); goto out; } /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as * blk_stop_queue() doesn't prevent the blk_fetch_request() * above to return us whatever is in the queue. Since we call * ide_do_request() ourselves, we end up taking requests while * the queue is blocked... * * We let requests forced at head of queue with ide-preempt * though. I hope that doesn't happen too much, hopefully not * unless the subdriver triggers such a thing in its own PM * state machine. */ if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && blk_pm_request(rq) == 0 && (rq->cmd_flags & REQ_PREEMPT) == 0) { /* there should be no pending command at this point */ ide_unlock_port(hwif); goto plug_device; } hwif->rq = rq; spin_unlock_irq(&hwif->lock); startstop = start_request(drive, rq); spin_lock_irq(&hwif->lock); if (startstop == ide_stopped) { rq = hwif->rq; hwif->rq = NULL; goto repeat; } } else goto plug_device; out: spin_unlock_irq(&hwif->lock); if (rq == NULL) ide_unlock_host(host); spin_lock_irq(q->queue_lock); return; plug_device: spin_unlock_irq(&hwif->lock); ide_unlock_host(host); plug_device_2: spin_lock_irq(q->queue_lock); if (rq) { blk_requeue_request(q, rq); blk_delay_queue(q, queue_run_ms); } }
/* * Issue a new request to a device. */ void do_ide_request(struct request_queue *q) { ide_drive_t *drive = q->queuedata; ide_hwif_t *hwif = drive->hwif; struct ide_host *host = hwif->host; struct request *rq = NULL; ide_startstop_t startstop; /* * drive is doing pre-flush, ordered write, post-flush sequence. even * though that is 3 requests, it must be seen as a single transaction. * we must not preempt this drive until that is complete */ if (blk_queue_flushing(q)) /* * small race where queue could get replugged during * the 3-request flush cycle, just yank the plug since * we want it to finish asap */ blk_remove_plug(q); spin_unlock_irq(q->queue_lock); /* HLD do_request() callback might sleep, make sure it's okay */ might_sleep(); if (ide_lock_host(host, hwif)) goto plug_device_2; spin_lock_irq(&hwif->lock); if (!ide_lock_port(hwif)) { ide_hwif_t *prev_port; WARN_ON_ONCE(hwif->rq); repeat: prev_port = hwif->host->cur_port; if (drive->dev_flags & IDE_DFLAG_SLEEPING && time_after(drive->sleep, jiffies)) { ide_unlock_port(hwif); goto plug_device; } if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && hwif != prev_port) { ide_drive_t *cur_dev = prev_port ? prev_port->cur_dev : NULL; /* * set nIEN for previous port, drives in the * quirk list may not like intr setups/cleanups */ if (cur_dev && (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) prev_port->tp_ops->write_devctl(prev_port, ATA_NIEN | ATA_DEVCTL_OBS); hwif->host->cur_port = hwif; } hwif->cur_dev = drive; drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); spin_unlock_irq(&hwif->lock); spin_lock_irq(q->queue_lock); /* * we know that the queue isn't empty, but this can happen * if the q->prep_rq_fn() decides to kill a request */ if (!rq) rq = blk_fetch_request(drive->queue); spin_unlock_irq(q->queue_lock); spin_lock_irq(&hwif->lock); if (!rq) { ide_unlock_port(hwif); goto out; } /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as * blk_stop_queue() doesn't prevent the blk_fetch_request() * above to return us whatever is in the queue. Since we call * ide_do_request() ourselves, we end up taking requests while * the queue is blocked... * * We let requests forced at head of queue with ide-preempt * though. I hope that doesn't happen too much, hopefully not * unless the subdriver triggers such a thing in its own PM * state machine. */ if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && blk_pm_request(rq) == 0 && (rq->cmd_flags & REQ_PREEMPT) == 0) { /* there should be no pending command at this point */ ide_unlock_port(hwif); goto plug_device; } hwif->rq = rq; spin_unlock_irq(&hwif->lock); startstop = start_request(drive, rq); spin_lock_irq(&hwif->lock); if (startstop == ide_stopped) { rq = hwif->rq; hwif->rq = NULL; goto repeat; } } else goto plug_device; out: spin_unlock_irq(&hwif->lock); if (rq == NULL) ide_unlock_host(host); spin_lock_irq(q->queue_lock); return; plug_device: spin_unlock_irq(&hwif->lock); ide_unlock_host(host); plug_device_2: spin_lock_irq(q->queue_lock); if (rq) blk_requeue_request(q, rq); if (!elv_queue_empty(q)) blk_plug_device(q); }