コード例 #1
0
ファイル: ide-cd.c プロジェクト: mecke/linux-2.6
static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
				      struct request *failed_command)
{
	struct cdrom_info *info		= drive->driver_data;
	struct request *rq		= &drive->request_sense_rq;

	ide_debug_log(IDE_DBG_SENSE, "enter");

	if (sense == NULL)
		sense = &info->sense_data;

	/* stuff the sense request in front of our current request */
	blk_rq_init(NULL, rq);
	rq->cmd_type = REQ_TYPE_ATA_PC;
	rq->rq_disk = info->disk;

	rq->data = sense;
	rq->cmd[0] = GPCMD_REQUEST_SENSE;
	rq->cmd[4] = 18;
	rq->data_len = 18;

	rq->cmd_type = REQ_TYPE_SENSE;
	rq->cmd_flags |= REQ_PREEMPT;

	/* NOTE! Save the failed command in "rq->buffer" */
	rq->buffer = (void *) failed_command;

	if (failed_command)
		ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x",
					     failed_command->cmd[0]);

	drive->hwif->rq = NULL;

	elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
}
コード例 #2
0
int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
{
	struct request rq;

	blk_rq_init(NULL, &rq);
	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
	rq.buffer = buf;

	/*
	 * (ks) We transfer currently only whole sectors.
	 * This is suffient for now.  But, it would be great,
	 * if we would find a solution to transfer any size.
	 * To support special commands like READ LONG.
	 */
	rq.hard_nr_sectors = rq.nr_sectors = nsect;
	rq.hard_cur_sectors = rq.current_nr_sectors = nsect;

	if (task->tf_flags & IDE_TFLAG_WRITE)
		rq.cmd_flags |= REQ_RW;

	rq.special = task;
	task->rq = &rq;

	return ide_do_drive_cmd(drive, &rq, ide_wait);
}
コード例 #3
0
ファイル: ide-atapi.c プロジェクト: ramlaxman/linux
void ide_prep_sense(ide_drive_t *drive, struct request *rq)
{
	struct request_sense *sense = &drive->sense_data;
	struct request *sense_rq = &drive->sense_rq;
	unsigned int cmd_len, sense_len;
	int err;

	switch (drive->media) {
	case ide_floppy:
		cmd_len = 255;
		sense_len = 18;
		break;
	case ide_tape:
		cmd_len = 20;
		sense_len = 20;
		break;
	default:
		cmd_len = 18;
		sense_len = 18;
	}

	BUG_ON(sense_len > sizeof(*sense));

	if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed)
		return;

	memset(sense, 0, sizeof(*sense));

	blk_rq_init(rq->q, sense_rq);

	err = blk_rq_map_kern(drive->queue, sense_rq, sense, sense_len,
			      GFP_NOIO);
	if (unlikely(err)) {
		if (printk_ratelimit())
			printk(KERN_WARNING PFX "%s: failed to map sense "
					    "buffer\n", drive->name);
		return;
	}

	sense_rq->rq_disk = rq->rq_disk;
	sense_rq->cmd[0] = GPCMD_REQUEST_SENSE;
	sense_rq->cmd[4] = cmd_len;
	sense_rq->cmd_type = REQ_TYPE_ATA_SENSE;
	sense_rq->cmd_flags |= REQ_PREEMPT;

	if (drive->media == ide_tape)
		sense_rq->cmd[13] = REQ_IDETAPE_PC1;

	drive->sense_rq_armed = true;
}
コード例 #4
0
ファイル: blk-barrier.c プロジェクト: 274914765/C
static inline struct request *start_ordered(struct request_queue *q,
                        struct request *rq)
{
    q->orderr = 0;
    q->ordered = q->next_ordered;
    q->ordseq |= QUEUE_ORDSEQ_STARTED;

    /*
     * Prep proxy barrier request.
     */
    blkdev_dequeue_request(rq);
    q->orig_bar_rq = rq;
    rq = &q->bar_rq;
    blk_rq_init(q, rq);
    if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
        rq->cmd_flags |= REQ_RW;
    if (q->ordered & QUEUE_ORDERED_FUA)
        rq->cmd_flags |= REQ_FUA;
    init_request_from_bio(rq, q->orig_bar_rq->bio);
    rq->end_io = bar_end_io;

    /*
     * Queue ordered sequence.  As we stack them at the head, we
     * need to queue in reverse order.  Note that we rely on that
     * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
     * request gets inbetween ordered sequence. If this request is
     * an empty barrier, we don't need to do a postflush ever since
     * there will be no data written between the pre and post flush.
     * Hence a single flush will suffice.
     */
    if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
        queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
    else
        q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;

    elv_insert(q, rq, ELEVATOR_INSERT_FRONT);

    if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
        queue_flush(q, QUEUE_ORDERED_PREFLUSH);
        rq = &q->pre_flush_rq;
    } else
        q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;

    if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
        q->ordseq |= QUEUE_ORDSEQ_DRAIN;
    else
        rq = NULL;

    return rq;
}
コード例 #5
0
static struct request *clone_old_rq(struct request *rq, struct mapped_device *md,
				    struct dm_rq_target_io *tio, gfp_t gfp_mask)
{
	/*
	 * Create clone for use with .request_fn request_queue
	 */
	struct request *clone;

	clone = alloc_old_clone_request(md, gfp_mask);
	if (!clone)
		return NULL;

	blk_rq_init(NULL, clone);
	if (setup_clone(clone, rq, tio, gfp_mask)) {
		/* -ENOMEM */
		free_old_clone_request(md, clone);
		return NULL;
	}

	return clone;
}
コード例 #6
0
ファイル: blk-barrier.c プロジェクト: johnny/CobraDroidBeta
static void queue_flush(struct request_queue *q, unsigned which)
{
	struct request *rq;
	rq_end_io_fn *end_io;

	if (which == QUEUE_ORDERED_DO_PREFLUSH) {
		rq = &q->pre_flush_rq;
		end_io = pre_flush_end_io;
	} else {
		rq = &q->post_flush_rq;
		end_io = post_flush_end_io;
	}

	blk_rq_init(q, rq);
	rq->cmd_flags = REQ_HARDBARRIER;
	rq->rq_disk = q->bar_rq.rq_disk;
	rq->end_io = end_io;
	q->prepare_flush_fn(q, rq);

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
}
コード例 #7
0
ファイル: blk-flush.c プロジェクト: 710leo/LVS
static struct request *queue_next_fseq(struct request_queue *q)
{
	struct request *orig_rq = q->orig_flush_rq;
	struct request *rq = &q->flush_rq;

	blk_rq_init(q, rq);

	switch (blk_flush_cur_seq(q)) {
	case QUEUE_FSEQ_PREFLUSH:
		rq->end_io = pre_flush_end_io;
		init_flush_request(q, rq, orig_rq->rq_disk);
		break;
	case QUEUE_FSEQ_DATA:
		init_request_from_bio(rq, orig_rq->bio);
		/*
		 * orig_rq->rq_disk may be different from
		 * bio->bi_bdev->bd_disk if orig_rq got here through
		 * remapping drivers.  Make sure rq->rq_disk points
		 * to the same one as orig_rq.
		 */
		rq->rq_disk = orig_rq->rq_disk;
		rq->cmd_flags &= ~(REQ_FLUSH | REQ_FUA);
		rq->cmd_flags |= orig_rq->cmd_flags & (REQ_FLUSH | REQ_FUA);
		rq->end_io = flush_data_end_io;
		break;
	case QUEUE_FSEQ_POSTFLUSH:
		rq->end_io = post_flush_end_io;
		init_flush_request(q, rq, orig_rq->rq_disk);
		break;
	default:
		BUG();
	}

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	return rq;
}
コード例 #8
0
ファイル: blk-barrier.c プロジェクト: johnny/CobraDroidBeta
static inline bool start_ordered(struct request_queue *q, struct request **rqp)
{
	struct request *rq = *rqp;
	unsigned skip = 0;

	q->orderr = 0;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;

	/*
	 * For an empty barrier, there's no actual BAR request, which
	 * in turn makes POSTFLUSH unnecessary.  Mask them off.
	 */
	if (!rq->hard_nr_sectors) {
		q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
				QUEUE_ORDERED_DO_POSTFLUSH);
		/*
		 * Empty barrier on a write-through device w/ ordered
		 * tag has no command to issue and without any command
		 * to issue, ordering by tag can't be used.  Drain
		 * instead.
		 */
		if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
		    !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
			q->ordered &= ~QUEUE_ORDERED_BY_TAG;
			q->ordered |= QUEUE_ORDERED_BY_DRAIN;
		}
	}

	/* stash away the original request */
	elv_dequeue_request(q, rq);
	q->orig_bar_rq = rq;
	rq = NULL;

	/*
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
	 * request gets inbetween ordered sequence.
	 */
	if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
		queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
		rq = &q->post_flush_rq;
	} else
		skip |= QUEUE_ORDSEQ_POSTFLUSH;

	if (q->ordered & QUEUE_ORDERED_DO_BAR) {
		rq = &q->bar_rq;

		/* initialize proxy request and queue it */
		blk_rq_init(q, rq);
		if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
			rq->cmd_flags |= REQ_RW;
		if (q->ordered & QUEUE_ORDERED_DO_FUA)
			rq->cmd_flags |= REQ_FUA;
		init_request_from_bio(rq, q->orig_bar_rq->bio);
		rq->end_io = bar_end_io;

		elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
	} else
		skip |= QUEUE_ORDSEQ_BAR;

	if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
		queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
		rq = &q->pre_flush_rq;
	} else
		skip |= QUEUE_ORDSEQ_PREFLUSH;

	if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
		rq = NULL;
	else
		skip |= QUEUE_ORDSEQ_DRAIN;

	*rqp = rq;

	/*
	 * Complete skipped sequences.  If whole sequence is complete,
	 * return false to tell elevator that this request is gone.
	 */
	return !blk_ordered_complete_seq(q, skip, 0);
}