static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense, struct request *failed_command) { struct cdrom_info *info = drive->driver_data; struct request *rq = &drive->request_sense_rq; ide_debug_log(IDE_DBG_SENSE, "enter"); if (sense == NULL) sense = &info->sense_data; /* stuff the sense request in front of our current request */ blk_rq_init(NULL, rq); rq->cmd_type = REQ_TYPE_ATA_PC; rq->rq_disk = info->disk; rq->data = sense; rq->cmd[0] = GPCMD_REQUEST_SENSE; rq->cmd[4] = 18; rq->data_len = 18; rq->cmd_type = REQ_TYPE_SENSE; rq->cmd_flags |= REQ_PREEMPT; /* NOTE! Save the failed command in "rq->buffer" */ rq->buffer = (void *) failed_command; if (failed_command) ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x", failed_command->cmd[0]); drive->hwif->rq = NULL; elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0); }
static int blk_do_rq(request_queue_t *q, struct block_device *bdev, struct request *rq) { char sense[SCSI_SENSE_BUFFERSIZE]; DECLARE_COMPLETION(wait); int err = 0; rq->rq_disk = bdev->bd_disk; /* * we need an extra reference to the request, so we can look at * it after io completion */ rq->ref_count++; if (!rq->sense) { memset(sense, 0, sizeof(sense)); rq->sense = sense; rq->sense_len = 0; } rq->flags |= REQ_NOMERGE; rq->waiting = &wait; elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); generic_unplug_device(q); wait_for_completion(&wait); if (rq->errors) err = -EIO; return err; }
static void emc_pg_init(struct hw_handler *hwh, unsigned bypassed, struct path *path) { struct request *rq; struct request_queue *q = bdev_get_queue(path->dev->bdev); /* * We can either blindly init the pg (then look at the sense), * or we can send some commands to get the state here (then * possibly send the fo cmnd), or we can also have the * initial state passed into us and then get an update here. */ if (!q) { DMINFO("dm-emc: emc_pg_init: no queue"); goto fail_path; } /* FIXME: The request should be pre-allocated. */ rq = emc_trespass_get(hwh->context, path); if (!rq) { DMERR("dm-emc: emc_pg_init: no rq"); goto fail_path; } DMINFO("dm-emc: emc_pg_init: sending switch-over command"); elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1); return; fail_path: dm_pg_init_complete(path, MP_FAIL_PATH); }
static int send_request(request_queue_t * q, struct bio *bio, struct block_device *bdev, struct tbio_interface *inter, int writing) { struct request *rq; void *buffer = NULL; unsigned long start_time; int err; rq = blk_get_request(q, writing ? WRITE : READ, __GFP_WAIT); rq->cmd_len = inter->cmd_len; //printk("inter.cmd %s\n" , inter->cmd); if (copy_from_user(rq->cmd, inter->cmd, inter->cmd_len)) goto out_request; //printk("tbio: rq->cmd : %s\n",rq->cmd); if (sizeof(rq->cmd) != inter->cmd_len) memset(rq->cmd + inter->cmd_len, 0, sizeof(rq->cmd) - inter->cmd_len); rq->bio = rq->biotail = NULL; blk_rq_bio_prep(q, rq, bio); rq->data = buffer; rq->data_len = inter->data_len; rq->timeout = 0; if (!rq->timeout) rq->timeout = q->sg_timeout; if (!rq->timeout) rq->timeout = BLK_DEFAULT_TIMEOUT; start_time = jiffies; DECLARE_COMPLETION(wait); rq->rq_disk = bdev->bd_disk; rq->waiting = &wait; elv_add_request(q, rq, 1, 1); generic_unplug_device(q); wait_for_completion(&wait); //printk("tbio: completion\n"); if (rq->errors) { err = -EIO; printk("tbio: rq->errors\n"); return err; } blk_put_request(rq); return 0; out_request: blk_put_request(rq); return -EFAULT; }
static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) { ide_hwif_t *hwif = drive->hwif; struct request_queue *q = drive->queue; struct request *rq; int rc; timeout += jiffies; spin_lock_irq(&hwif->lock); if (drive->dev_flags & IDE_DFLAG_PARKED) { int reset_timer = time_before(timeout, drive->sleep); int start_queue = 0; drive->sleep = timeout; wake_up_all(&ide_park_wq); if (reset_timer && del_timer(&hwif->timer)) start_queue = 1; spin_unlock_irq(&hwif->lock); if (start_queue) blk_run_queue(q); return; } spin_unlock_irq(&hwif->lock); rq = blk_get_request(q, READ, __GFP_WAIT); rq->cmd[0] = REQ_PARK_HEADS; rq->cmd_len = 1; rq->cmd_type = REQ_TYPE_SPECIAL; rq->special = &timeout; rc = blk_execute_rq(q, NULL, rq, 1); blk_put_request(rq); if (rc) goto out; /* */ rq = blk_get_request(q, READ, GFP_NOWAIT); if (unlikely(!rq)) goto out; rq->cmd[0] = REQ_UNPARK_HEADS; rq->cmd_len = 1; rq->cmd_type = REQ_TYPE_SPECIAL; elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); out: return; }
static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) { ide_hwif_t *hwif = drive->hwif; struct request_queue *q = drive->queue; struct request *rq; int rc; timeout += jiffies; spin_lock_irq(&hwif->lock); if (drive->dev_flags & IDE_DFLAG_PARKED) { int reset_timer = time_before(timeout, drive->sleep); int start_queue = 0; drive->sleep = timeout; wake_up_all(&ide_park_wq); if (reset_timer && del_timer(&hwif->timer)) start_queue = 1; spin_unlock_irq(&hwif->lock); if (start_queue) blk_run_queue(q); return; } spin_unlock_irq(&hwif->lock); rq = blk_get_request(q, READ, __GFP_WAIT); rq->cmd[0] = REQ_PARK_HEADS; rq->cmd_len = 1; rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->special = &timeout; rc = blk_execute_rq(q, NULL, rq, 1); blk_put_request(rq); if (rc) goto out; /* * Make sure that *some* command is sent to the drive after the * timeout has expired, so power management will be reenabled. */ rq = blk_get_request(q, READ, GFP_NOWAIT); if (IS_ERR(rq)) goto out; rq->cmd[0] = REQ_UNPARK_HEADS; rq->cmd_len = 1; rq->cmd_type = REQ_TYPE_DRV_PRIV; elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); out: return; }
int ide_queue_sense_rq(ide_drive_t *drive, void *special) { /* deferred failure from ide_prep_sense() */ if (!drive->sense_rq_armed) { printk(KERN_WARNING PFX "%s: error queuing a sense request\n", drive->name); return -ENOMEM; } drive->sense_rq.special = special; drive->sense_rq_armed = false; drive->hwif->rq = NULL; elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT); return 0; }