Ejemplo n.º 1
0
void xsysace_end_request(struct request *req, int uptodate)
{
	if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
		blkdev_dequeue_request(req);
		end_that_request_last(req);
	}
}
Ejemplo n.º 2
0
void sbull_request(request_queue_t *q)
{
    Sbull_Dev *device;
    struct request *req;
    int status;

    /* Find our device */
    device = sbull_locate_device (blkdev_entry_next_request(&q->queue_head));
    if (device->busy) /* no race here - io_request_lock held */
        return;
    device->busy = 1;

    /* Process requests in the queue */
    while(! list_empty(&q->queue_head)) {

    /* Pull the next request off the list. */
        req = blkdev_entry_next_request(&q->queue_head);
        blkdev_dequeue_request(req);
        spin_unlock_irq (&io_request_lock);
        spin_lock(&device->lock);

    /* Process all of the buffers in this (possibly clustered) request. */
        do {
            status = sbull_transfer(device, req);
        } while (end_that_request_first(req, status, DEVICE_NAME));
        spin_unlock(&device->lock);
        spin_lock_irq (&io_request_lock);
        end_that_request_last(req);
    }
    device->busy = 0;
}
Ejemplo n.º 3
0
int ide_end_request (ide_drive_t *drive, int uptodate)
{
	struct request *rq;
	unsigned long flags;
	int ret = 1;

	spin_lock_irqsave(&io_request_lock, flags);
	rq = HWGROUP(drive)->rq;

	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
		drive->state = 0;
		HWGROUP(drive)->hwif->ide_dma_on(drive);
	}

	if (!end_that_request_first(rq, uptodate, drive->name)) {
		add_blkdev_randomness(MAJOR(rq->rq_dev));
		blkdev_dequeue_request(rq);
		HWGROUP(drive)->rq = NULL;
		end_that_request_last(rq);
		ret = 0;
	}

	spin_unlock_irqrestore(&io_request_lock, flags);
	return ret;
}
Ejemplo n.º 4
0
/*
 * Generic MMC request handler.  This is called for any queue on a
 * particular host.  When the host is not busy, we look for a request
 * on any queue on this host, and attempt to issue it.  This may
 * not be the queue we were asked to process.
 */
static void mmc_request(struct request_queue *q)
{
	struct mmc_queue *mq = q->queuedata;
	struct request *req;
	int ret, count = 0;

	if (!mq) {
		printk(KERN_ERR "MMC: killing requests for dead queue\n");
		while ((req = elv_next_request(q)) != NULL) {
			do {
				ret = end_that_request_chunk(req, 0,
					req->current_nr_sectors << 9);
				count++;
				if (count > 100) {
				/* dequeue the request in the queue */
					printk(KERN_ERR "dequeue req\n");
					blkdev_dequeue_request(req);
					break;
				}
			count++;
			} while (ret);
		}
		return;
	}
	if (!mq->req)
		wake_up_process(mq->thread);
}
Ejemplo n.º 5
0
static inline struct request *start_ordered(struct request_queue *q,
					    struct request *rq)
{
	q->orderr = 0;
	q->ordered = q->next_ordered;
	q->ordseq |= QUEUE_ORDSEQ_STARTED;

	/*
	 * Prep proxy barrier request.
	 */
	blkdev_dequeue_request(rq);
	q->orig_bar_rq = rq;
	rq = &q->bar_rq;
	rq->cmd_flags = 0;
	rq_init(q, rq);
	if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
		rq->cmd_flags |= REQ_RW;
	if (q->ordered & QUEUE_ORDERED_FUA)
		rq->cmd_flags |= REQ_FUA;
	rq->elevator_private = NULL;
	rq->elevator_private2 = NULL;
	init_request_from_bio(rq, q->orig_bar_rq->bio);
	rq->end_io = bar_end_io;

	/*
	 * Queue ordered sequence.  As we stack them at the head, we
	 * need to queue in reverse order.  Note that we rely on that
	 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
	 * request gets inbetween ordered sequence. If this request is
	 * an empty barrier, we don't need to do a postflush ever since
	 * there will be no data written between the pre and post flush.
	 * Hence a single flush will suffice.
	 */
	if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
		queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
	else
		q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;

	elv_insert(q, rq, ELEVATOR_INSERT_FRONT);

	if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
		queue_flush(q, QUEUE_ORDERED_PREFLUSH);
		rq = &q->pre_flush_rq;
	} else
		q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;

	if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
		q->ordseq |= QUEUE_ORDSEQ_DRAIN;
	else
		rq = NULL;

	return rq;
}
Ejemplo n.º 6
0
int blk_do_ordered(struct request_queue *q, struct request **rqp)
{
	struct request *rq = *rqp;
	const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);

	if (!q->ordseq) {
		if (!is_barrier)
			return 1;

		if (q->next_ordered != QUEUE_ORDERED_NONE) {
			*rqp = start_ordered(q, rq);
			return 1;
		} else {
			/*
			 * This can happen when the queue switches to
			 * ORDERED_NONE while this request is on it.
			 */
			blkdev_dequeue_request(rq);
			if (__blk_end_request(rq, -EOPNOTSUPP,
					      blk_rq_bytes(rq)))
				BUG();
			*rqp = NULL;
			return 0;
		}
	}

	/*
	 * Ordered sequence in progress
	 */

	/* Special requests are not subject to ordering rules. */
	if (!blk_fs_request(rq) &&
	    rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
		return 1;

	if (q->ordered & QUEUE_ORDERED_TAG) {
		/* Ordered by tag.  Blocking the next barrier is enough. */
		if (is_barrier && rq != &q->bar_rq)
			*rqp = NULL;
	} else {
		/* Ordered by draining.  Wait for turn. */
		WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
		if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
			*rqp = NULL;
	}

	return 1;
}
Ejemplo n.º 7
0
Archivo: blk-tag.c Proyecto: E-LLP/n900
/**
 * blk_queue_start_tag - find a free tag and assign it
 * @q:  the request queue for the device
 * @rq:  the block request that needs tagging
 *
 *  Description:
 *    This can either be used as a stand-alone helper, or possibly be
 *    assigned as the queue &prep_rq_fn (in which case &struct request
 *    automagically gets a tag assigned). Note that this function
 *    assumes that any type of request can be queued! if this is not
 *    true for your device, you must check the request type before
 *    calling this function.  The request will also be removed from
 *    the request queue, so it's the drivers responsibility to readd
 *    it if it should need to be restarted for some reason.
 *
 *  Notes:
 *   queue lock must be held.
 **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
	struct blk_queue_tag *bqt = q->queue_tags;
	unsigned max_depth, offset;
	int tag;

	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
		printk(KERN_ERR
		       "%s: request %p for device [%s] already tagged %d",
		       __func__, rq,
		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
		BUG();
	}

	/*
	 * Protect against shared tag maps, as we may not have exclusive
	 * access to the tag map.
	 *
	 * We reserve a few tags just for sync IO, since we don't want
	 * to starve sync IO on behalf of flooding async IO.
	 */
	max_depth = bqt->max_depth;
	if (rq_is_sync(rq))
		offset = 0;
	else
		offset = max_depth >> 2;

	do {
		tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
		if (tag >= max_depth)
			return 1;

	} while (test_and_set_bit_lock(tag, bqt->tag_map));
	/*
	 * We need lock ordering semantics given by test_and_set_bit_lock.
	 * See blk_queue_end_tag for details.
	 */

	rq->cmd_flags |= REQ_QUEUED;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
	blkdev_dequeue_request(rq);
	list_add(&rq->queuelist, &q->tag_busy_list);
	return 0;
}
Ejemplo n.º 8
0
/**
 *	ide_complete_pm_request - end the current Power Management request
 *	@drive: target drive
 *	@rq: request
 *
 *	This function cleans up the current PM request and stops the queue
 *	if necessary.
 */
static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
{
	unsigned long flags;

#ifdef DEBUG_PM
	printk("%s: completing PM request, %s\n", drive->name,
	       blk_pm_suspend_request(rq) ? "suspend" : "resume");
#endif
	spin_lock_irqsave(&ide_lock, flags);
	if (blk_pm_suspend_request(rq)) {
		blk_stop_queue(drive->queue);
	} else {
		drive->blocked = 0;
		blk_start_queue(drive->queue);
	}
	blkdev_dequeue_request(rq);
	HWGROUP(drive)->rq = NULL;
	end_that_request_last(rq);
	spin_unlock_irqrestore(&ide_lock, flags);
}
Ejemplo n.º 9
0
int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
					  int nr_sectors)
{
	int ret = 1;

	BUG_ON(!(rq->flags & REQ_STARTED));

	/*
	 * if failfast is set on a request, override number of sectors and
	 * complete the whole request right now
	 */
	if (blk_noretry_request(rq) && end_io_error(uptodate))
		nr_sectors = rq->hard_nr_sectors;

	if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors)
 		rq->errors = -EIO;
	/*
	 * decide whether to reenable DMA -- 3 is a random magic for now,
	 * if we DMA timeout more than 3 times, just stay in PIO
	 */
	if (drive->state == DMA_PIO_RETRY && drive->retry_pio <= 3) {
		drive->state = 0;
		HWGROUP(drive)->hwif->ide_dma_on(drive);
	}

	if (!end_that_request_first(rq, uptodate, nr_sectors)) {
		add_disk_randomness(rq->rq_disk);

		if (blk_rq_tagged(rq))
			blk_queue_end_tag(drive->queue, rq);

		blkdev_dequeue_request(rq);
		HWGROUP(drive)->rq = NULL;
		end_that_request_last(rq);
		ret = 0;
	}
	return ret;
}
Ejemplo n.º 10
0
/**
 * blk_queue_start_tag - find a free tag and assign it
 * @q:  the request queue for the device
 * @rq:  the block request that needs tagging
 *
 *  Description:
 *    This can either be used as a stand-alone helper, or possibly be
 *    assigned as the queue &prep_rq_fn (in which case &struct request
 *    automagically gets a tag assigned). Note that this function
 *    assumes that any type of request can be queued! if this is not
 *    true for your device, you must check the request type before
 *    calling this function.  The request will also be removed from
 *    the request queue, so it's the drivers responsibility to readd
 *    it if it should need to be restarted for some reason.
 *
 *  Notes:
 *   queue lock must be held.
 **/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
	struct blk_queue_tag *bqt = q->queue_tags;
	int tag;

	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
		printk(KERN_ERR
		       "%s: request %p for device [%s] already tagged %d",
		       __func__, rq,
		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
		BUG();
	}

	/*
	 * Protect against shared tag maps, as we may not have exclusive
	 * access to the tag map.
	 */
	do {
		tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
		if (tag >= bqt->max_depth)
			return 1;

	} while (test_and_set_bit_lock(tag, bqt->tag_map));
	/*
	 * We need lock ordering semantics given by test_and_set_bit_lock.
	 * See blk_queue_end_tag for details.
	 */

	rq->cmd_flags |= REQ_QUEUED;
	rq->tag = tag;
	bqt->tag_index[tag] = rq;
	blkdev_dequeue_request(rq);
	list_add(&rq->queuelist, &q->tag_busy_list);
	bqt->busy++;
	return 0;
}
Ejemplo n.º 11
0
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	struct mmc_blk_request brq;
	int ret = 1;

	if (mmc_card_claim_host(card))
		goto flush_queue;

	do {
		struct mmc_command cmd;
		u32 readcmd, writecmd;

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

		brq.cmd.arg = req->sector;
		if (!mmc_card_blockaddr(card))
			brq.cmd.arg <<= 9;
		brq.cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
		brq.data.blksz = 1 << md->block_bits;
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
		brq.stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
		if (brq.data.blocks > card->host->max_blk_count)
			brq.data.blocks = card->host->max_blk_count;

		mmc_set_data_timeout(&brq.data, card, rq_data_dir(req) != READ);

#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		if (mmc_card_movinand(card)) {
			if ((brq.data.blocks > 1) || (rq_data_dir(req) == WRITE)) {
				cmd.opcode = MMC_SET_BLOCK_COUNT;
				cmd.arg = req->nr_sectors;
				cmd.flags = MMC_RSP_R1;
				ret = mmc_wait_for_cmd(card->host, &cmd, 2);
			}
			if (rq_data_dir(req) == READ) {
				if (brq.data.blocks > 1) {
					brq.cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
					brq.data.flags |= (MMC_DATA_READ | MMC_DATA_MULTI);
//					brq.mrq.stop = &brq.stop;
				} else {
					brq.cmd.opcode = MMC_READ_SINGLE_BLOCK;
					brq.data.flags |= MMC_DATA_READ;
					brq.mrq.stop = NULL;
				}
			} else {
				brq.cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
				brq.data.flags |= MMC_DATA_WRITE | MMC_DATA_MULTI;
//				brq.mrq.stop = &brq.stop;
			}
		} else {
#endif

		/*
		 * If the host doesn't support multiple block writes, force
		 * block writes to single block. SD cards are excepted from
		 * this rule as they support querying the number of
		 * successfully written sectors.
		 */
		if (rq_data_dir(req) != READ &&
		    !(card->host->caps & MMC_CAP_MULTIWRITE) &&
		    !mmc_card_sd(card))
			brq.data.blocks = 1;

		if (brq.data.blocks > 1) {
			brq.data.flags |= MMC_DATA_MULTI;
			brq.mrq.stop = &brq.stop;
			readcmd = MMC_READ_MULTIPLE_BLOCK;
			writecmd = MMC_WRITE_MULTIPLE_BLOCK;
		} else {
			brq.mrq.stop = NULL;
			readcmd = MMC_READ_SINGLE_BLOCK;
			writecmd = MMC_WRITE_BLOCK;
		}

		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = readcmd;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = writecmd;
			brq.data.flags |= MMC_DATA_WRITE;
		}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
		}
#endif

		brq.data.sg = mq->sg;
		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);

		mmc_wait_for_req(card->host, &brq.mrq);
		if (brq.cmd.error) {
			printk(KERN_ERR "%s: error %d sending read/write command\n",
			       req->rq_disk->disk_name, brq.cmd.error);
			goto cmd_err;
		}

		if (brq.data.error) {
			printk(KERN_ERR "%s: error %d transferring data\n",
			       req->rq_disk->disk_name, brq.data.error);
			goto cmd_err;
		}

		if (brq.stop.error) {
			printk(KERN_ERR "%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, brq.stop.error);
			goto cmd_err;
		}

		if (rq_data_dir(req) != READ) {
			do {
				int err;

				cmd.opcode = MMC_SEND_STATUS;
				cmd.arg = card->rca << 16;
				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
				err = mmc_wait_for_cmd(card->host, &cmd, 5);
				if (err) {
					printk(KERN_ERR "%s: error %d requesting status\n",
					       req->rq_disk->disk_name, err);
					goto cmd_err;
				}
#ifdef CONFIG_MMC_SUPPORT_MOVINAND
				/* Work-around for broken cards setting READY_FOR_DATA
				 * when not actually ready.
				 */
				if (mmc_card_movinand(card)) {
					if (R1_CURRENT_STATE(cmd.resp[0]) == 7)
						cmd.resp[0] &= ~R1_READY_FOR_DATA;
				}
#endif
			} while (!(cmd.resp[0] & R1_READY_FOR_DATA));

#if 0
			if (cmd.resp[0] & ~0x00000900)
				printk(KERN_ERR "%s: status = %08x\n",
				       req->rq_disk->disk_name, cmd.resp[0]);
			if (mmc_decode_status(cmd.resp))
				goto cmd_err;
#endif
		}

		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		if (!ret) {
			/*
			 * The whole request completed successfully.
			 */
			add_disk_randomness(req->rq_disk);
			blkdev_dequeue_request(req);
			end_that_request_last(req, 1);
		}
		spin_unlock_irq(&md->lock);
	} while (ret);

	mmc_card_release_host(card);

	return 1;

 cmd_err:
 	/*
 	 * If this is an SD card and we're writing, we can first
 	 * mark the known good sectors as ok.
 	 *
	 * If the card is not SD, we can still ok written sectors
	 * if the controller can do proper error reporting.
	 *
	 * For reads we just fail the entire chunk as that should
	 * be safe in all cases.
	 */
 	if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
		u32 blocks;
		unsigned int bytes;

		blocks = mmc_sd_num_wr_blocks(card);
		if (blocks != (u32)-1) {
			if (card->csd.write_partial)
				bytes = blocks << md->block_bits;
			else
				bytes = blocks << 9;
			spin_lock_irq(&md->lock);
			ret = end_that_request_chunk(req, 1, bytes);
			spin_unlock_irq(&md->lock);
		}
	} else if (rq_data_dir(req) != READ &&
		   (card->host->caps & MMC_CAP_MULTIWRITE)) {
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		spin_unlock_irq(&md->lock);
	}

flush_queue:

	mmc_card_release_host(card);

	spin_lock_irq(&md->lock);
	while (ret) {
		ret = end_that_request_chunk(req, 0,
				req->current_nr_sectors << 9);
	}

	add_disk_randomness(req->rq_disk);
	blkdev_dequeue_request(req);
	end_that_request_last(req, 0);
	spin_unlock_irq(&md->lock);

	return 0;
}
Ejemplo n.º 12
0
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
	struct mmc_blk_data *md = mq->data;
	struct mmc_card *card = md->queue.card;
	int ret;

	if (mmc_card_claim_host(card))
		goto cmd_err;

	do {
		struct mmc_blk_request brq;
		struct mmc_command cmd;

		memset(&brq, 0, sizeof(struct mmc_blk_request));
		brq.mrq.cmd = &brq.cmd;
		brq.mrq.data = &brq.data;

		brq.cmd.arg = req->sector << 9;
		brq.cmd.flags = MMC_RSP_R1;
		brq.data.timeout_ns = card->csd.tacc_ns * 10;
		brq.data.timeout_clks = card->csd.tacc_clks * 10;
		brq.data.blksz_bits = md->block_bits;
		brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
		brq.stop.opcode = MMC_STOP_TRANSMISSION;
		brq.stop.arg = 0;
		brq.stop.flags = MMC_RSP_R1B;

		if (rq_data_dir(req) == READ) {
			brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
			brq.data.flags |= MMC_DATA_READ;
		} else {
			brq.cmd.opcode = MMC_WRITE_BLOCK;
			brq.cmd.flags = MMC_RSP_R1B;
			brq.data.flags |= MMC_DATA_WRITE;
			brq.data.blocks = 1;
		}
		brq.mrq.stop = brq.data.blocks > 1 ? &brq.stop : NULL;

		brq.data.sg = mq->sg;
		brq.data.sg_len = blk_rq_map_sg(req->q, req, brq.data.sg);

		mmc_wait_for_req(card->host, &brq.mrq);
		if (brq.cmd.error) {
			printk(KERN_ERR "%s: error %d sending read/write command\n",
			       req->rq_disk->disk_name, brq.cmd.error);
			goto cmd_err;
		}

		if (brq.data.error) {
			printk(KERN_ERR "%s: error %d transferring data\n",
			       req->rq_disk->disk_name, brq.data.error);
			goto cmd_err;
		}

		if (brq.stop.error) {
			printk(KERN_ERR "%s: error %d sending stop command\n",
			       req->rq_disk->disk_name, brq.stop.error);
			goto cmd_err;
		}

		do {
			int err;

			cmd.opcode = MMC_SEND_STATUS;
			cmd.arg = card->rca << 16;
			cmd.flags = MMC_RSP_R1;
			err = mmc_wait_for_cmd(card->host, &cmd, 5);
			if (err) {
				printk(KERN_ERR "%s: error %d requesting status\n",
				       req->rq_disk->disk_name, err);
				goto cmd_err;
			}
		} while (!(cmd.resp[0] & R1_READY_FOR_DATA));

#if 0
		if (cmd.resp[0] & ~0x00000900)
			printk(KERN_ERR "%s: status = %08x\n",
			       req->rq_disk->disk_name, cmd.resp[0]);
		if (mmc_decode_status(cmd.resp))
			goto cmd_err;
#endif

		/*
		 * A block was successfully transferred.
		 */
		spin_lock_irq(&md->lock);
		ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
		if (!ret) {
			/*
			 * The whole request completed successfully.
			 */
			add_disk_randomness(req->rq_disk);
			blkdev_dequeue_request(req);
			end_that_request_last(req);
		}
		spin_unlock_irq(&md->lock);
	} while (ret);

	mmc_card_release_host(card);

	return 1;

 cmd_err:
	mmc_card_release_host(card);

	/*
	 * This is a little draconian, but until we get proper
	 * error handling sorted out here, its the best we can
	 * do - especially as some hosts have no idea how much
	 * data was transferred before the error occurred.
	 */
	spin_lock_irq(&md->lock);
	do {
		ret = end_that_request_chunk(req, 0,
				req->current_nr_sectors << 9);
	} while (ret);

	add_disk_randomness(req->rq_disk);
	blkdev_dequeue_request(req);
	end_that_request_last(req);
	spin_unlock_irq(&md->lock);

	return 0;
}
Ejemplo n.º 13
0
/*
 * Returns:
 * 0: if the request should be continued.
 * 1: if the request will be going through error recovery.
 * 2: if the request should be ended.
 */
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = hwif->rq;
	int stat, err, sense_key;

	/* check for errors */
	stat = hwif->tp_ops->read_status(hwif);

	if (stat_ret)
		*stat_ret = stat;

	if (OK_STAT(stat, good_stat, BAD_R_STAT))
		return 0;

	/* get the IDE error register */
	err = ide_read_error(drive);
	sense_key = err >> 4;

	ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, "
				  "rq->cmd_type: 0x%x, err: 0x%x",
				  stat, good_stat, rq->cmd[0], rq->cmd_type,
				  err);

	if (blk_sense_request(rq)) {
		/*
		 * We got an error trying to get sense info from the drive
		 * (probably while trying to recover from a former error).
		 * Just give up.
		 */
		rq->cmd_flags |= REQ_FAILED;
		return 2;
	} else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) {
		/* All other functions, except for READ. */

		/*
		 * if we have an error, pass back CHECK_CONDITION as the
		 * scsi status byte
		 */
		if (blk_pc_request(rq) && !rq->errors)
			rq->errors = SAM_STAT_CHECK_CONDITION;

		/* check for tray open */
		if (sense_key == NOT_READY) {
			cdrom_saw_media_change(drive);
		} else if (sense_key == UNIT_ATTENTION) {
			/* check for media change */
			cdrom_saw_media_change(drive);
			return 0;
		} else if (sense_key == ILLEGAL_REQUEST &&
			   rq->cmd[0] == GPCMD_START_STOP_UNIT) {
			/*
			 * Don't print error message for this condition--
			 * SFF8090i indicates that 5/24/00 is the correct
			 * response to a request to close the tray if the
			 * drive doesn't have that capability.
			 * cdrom_log_sense() knows this!
			 */
		} else if (!(rq->cmd_flags & REQ_QUIET)) {
			/* otherwise, print an error */
			ide_dump_status(drive, "packet command error", stat);
		}

		rq->cmd_flags |= REQ_FAILED;

		/*
		 * instead of playing games with moving completions around,
		 * remove failed request completely and end it when the
		 * request sense has completed
		 */
		goto end_request;

	} else if (blk_fs_request(rq)) {
		int do_end_request = 0;

		/* handle errors from READ and WRITE requests */

		if (blk_noretry_request(rq))
			do_end_request = 1;

		if (sense_key == NOT_READY) {
			/* tray open */
			if (rq_data_dir(rq) == READ) {
				cdrom_saw_media_change(drive);

				/* fail the request */
				printk(KERN_ERR PFX "%s: tray open\n",
						drive->name);
				do_end_request = 1;
			} else {
				struct cdrom_info *info = drive->driver_data;

				/*
				 * Allow the drive 5 seconds to recover, some
				 * devices will return this error while flushing
				 * data from cache.
				 */
				if (!rq->errors)
					info->write_timeout = jiffies +
							ATAPI_WAIT_WRITE_BUSY;
				rq->errors = 1;
				if (time_after(jiffies, info->write_timeout))
					do_end_request = 1;
				else {
					struct request_queue *q = drive->queue;
					unsigned long flags;

					/*
					 * take a breather relying on the unplug
					 * timer to kick us again
					 */
					spin_lock_irqsave(q->queue_lock, flags);
					blk_plug_device(q);
					spin_unlock_irqrestore(q->queue_lock, flags);

					return 1;
				}
			}
		} else if (sense_key == UNIT_ATTENTION) {
			/* media change */
			cdrom_saw_media_change(drive);

			/*
			 * Arrange to retry the request but be sure to give up
			 * if we've retried too many times.
			 */
			if (++rq->errors > ERROR_MAX)
				do_end_request = 1;
		} else if (sense_key == ILLEGAL_REQUEST ||
			   sense_key == DATA_PROTECT) {
			/*
			 * No point in retrying after an illegal request or data
			 * protect error.
			 */
			ide_dump_status(drive, "command error", stat);
			do_end_request = 1;
		} else if (sense_key == MEDIUM_ERROR) {
			/*
			 * No point in re-trying a zillion times on a bad
			 * sector. If we got here the error is not correctable.
			 */
			ide_dump_status(drive, "media error (bad sector)",
					stat);
			do_end_request = 1;
		} else if (sense_key == BLANK_CHECK) {
			/* disk appears blank ?? */
			ide_dump_status(drive, "media error (blank)", stat);
			do_end_request = 1;
		} else if ((err & ~ATA_ABORTED) != 0) {
			/* go to the default handler for other errors */
			ide_error(drive, "cdrom_decode_status", stat);
			return 1;
		} else if ((++rq->errors > ERROR_MAX)) {
			/* we've racked up too many retries, abort */
			do_end_request = 1;
		}

		/*
		 * End a request through request sense analysis when we have
		 * sense data. We need this in order to perform end of media
		 * processing.
		 */
		if (do_end_request)
			goto end_request;

		/*
		 * If we got a CHECK_CONDITION status, queue
		 * a request sense command.
		 */
		if (stat & ATA_ERR)
			cdrom_queue_request_sense(drive, NULL, NULL);
		return 1;
	} else {
		blk_dump_rq_flags(rq, PFX "bad rq");
		return 2;
	}

end_request:
	if (stat & ATA_ERR) {
		struct request_queue *q = drive->queue;
		unsigned long flags;

		spin_lock_irqsave(q->queue_lock, flags);
		blkdev_dequeue_request(rq);
		spin_unlock_irqrestore(q->queue_lock, flags);

		hwif->rq = NULL;

		cdrom_queue_request_sense(drive, rq->sense, rq);
		return 1;
	} else
		return 2;
}
Ejemplo n.º 14
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;
	int i;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = &ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(&ace->cf_id);
		ace_dump_mem(&ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occured, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd, ace->cf_id.lba_capacity);
			dev_info(ace->dev, "capacity: %i sectors\n",
				 ace->cf_id.lba_capacity);
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%lx hcnt=%lx, ccnt=%x, dir=%i\n",
			req->sector, req->hard_nr_sectors,
			req->current_nr_sectors, rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = req->buffer;
		ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF);

		count = req->hard_nr_sectors;
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->req->current_nr_sectors * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->req->current_nr_sectors * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		i = 16;
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		i = ace->req->current_nr_sectors;
		if (end_that_request_first(ace->req, 1, i)) {
			/* dev_dbg(ace->dev, "next block; h=%li c=%i\n",
			 *      ace->req->hard_nr_sectors,
			 *      ace->req->current_nr_sectors);
			 */
			ace->data_ptr = ace->req->buffer;
			ace->data_count = ace->req->current_nr_sectors * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		/* Complete the block request */
		blkdev_dequeue_request(ace->req);
		end_that_request_last(ace->req, 1);
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}