Exemplo n.º 1
0
static inline void setup_transfer(struct floppy_state *fs)
{
	int n;
	struct swim3 __iomem *sw = fs->swim3;
	struct dbdma_cmd *cp = fs->dma_cmd;
	struct dbdma_regs __iomem *dr = fs->dma;
	struct request *req = fs->cur_req;

	if (blk_rq_cur_sectors(req) <= 0) {
		swim3_warn("%s", "Transfer 0 sectors ?\n");
		return;
	}
	if (rq_data_dir(req) == WRITE)
		n = 1;
	else {
		n = fs->secpertrack - fs->req_sector + 1;
		if (n > blk_rq_cur_sectors(req))
			n = blk_rq_cur_sectors(req);
	}

	swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
		  fs->req_sector, fs->secpertrack, fs->head, n);

	fs->scount = n;
	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
	out_8(&sw->sector, fs->req_sector);
	out_8(&sw->nsect, n);
	out_8(&sw->gap3, 0);
	out_le32(&dr->cmdptr, virt_to_bus(cp));
	if (rq_data_dir(req) == WRITE) {
		/* Set up 3 dma commands: write preamble, data, postamble */
		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
		++cp;
		init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
		++cp;
		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
	} else {
		init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
	}
	++cp;
	out_le16(&cp->command, DBDMA_STOP);
	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
	in_8(&sw->error);
	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
	if (rq_data_dir(req) == WRITE)
		out_8(&sw->control_bis, WRITE_SECTORS);
	in_8(&sw->intr);
	out_le32(&dr->control, (RUN << 16) | RUN);
	/* enable intr when transfer complete */
	out_8(&sw->intr_enable, TRANSFER_DONE);
	out_8(&sw->control_bis, DO_ACTION);
	set_timeout(fs, 2*HZ, xfer_timeout);	/* enable timeout */
}
Exemplo n.º 2
0
static void tbio_transfer(struct request *req, struct tbio_device *dev)
{

	struct bio *bio = req->bio;

	//printk("tbio: bio_data(bio) %s\n" , (char *)bio_data(bio));
	if (bio_data_dir(bio)) {
		printk("tbio: write \"%s\" to dev\n", (char *)bio_data(bio));
		memcpy(dev->data, bio_data(bio), bio->bi_size);
	} else {
		memcpy(bio_data(bio), dev->data, bio->bi_size);
		printk("tbio: read \"%s\" from dev\n", (char *)bio_data(bio));
	}

}
static void drv_chn_receive(struct msg_buffer* buffer)
{
    struct ramdisk_message* rep = buffer->buffer;
    struct req_data* rdata;
    struct ramdisk_dev *dev;
    struct request *req;
    char* buff;
    unsigned long offset = rep->sector*KERNEL_SECTOR_SIZE;
    unsigned long nbytes = rep->nsect*KERNEL_SECTOR_SIZE;

    printk("response: %ld, sector: %ld, nsect: %ld, write:%d\n",
        rep->req_number, rep->sector, rep->nsect, rep->write);

    rdata = (struct req_data*)rep->req_number;
    dev = rdata->dev;
    req = rdata->req;
    buff = bio_data(req->bio);

    if (!rep->write) {
        printk("Writing from %p to %p, %ld nbytes\n", buffer->buffer, buff, nbytes);
        memcpy(buff, buffer->buffer + sizeof(struct ramdisk_message), nbytes);
    }

    if(blk_end_request_cur(req, 0)){
        ramdisk_transfer(dev, req);
    }

    buffer->release(buffer);
}
Exemplo n.º 4
0
void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
	if (blk_fs_request(rq) || blk_discard_rq(rq)) {
		rq->hard_sector += nsect;
		rq->hard_nr_sectors -= nsect;

		/*
		 * Move the I/O submission pointers ahead if required.
		 */
		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
		    (rq->sector <= rq->hard_sector)) {
			rq->sector = rq->hard_sector;
			rq->nr_sectors = rq->hard_nr_sectors;
			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
			rq->current_nr_sectors = rq->hard_cur_sectors;
			rq->buffer = bio_data(rq->bio);
		}

		/*
		 * if total number of sectors is less than the first segment
		 * size, something has gone terribly wrong
		 */
		if (rq->nr_sectors < rq->current_nr_sectors) {
			printk(KERN_ERR "blk: request botched\n");
			rq->nr_sectors = rq->current_nr_sectors;
		}
	}
}
Exemplo n.º 5
0
int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
{
	struct request_queue *q = pblk->dev->q;
	struct pblk_w_ctx w_ctx;
	sector_t lba = pblk_get_lba(bio);
	unsigned long start_time = jiffies;
	unsigned int bpos, pos;
	int nr_entries = pblk_get_secs(bio);
	int i, ret;

	generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0);

	/* Update the write buffer head (mem) with the entries that we can
	 * write. The write in itself cannot fail, so there is no need to
	 * rollback from here on.
	 */
retry:
	ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
	switch (ret) {
	case NVM_IO_REQUEUE:
		io_schedule();
		goto retry;
	case NVM_IO_ERR:
		pblk_pipeline_stop(pblk);
		goto out;
	}

	if (unlikely(!bio_has_data(bio)))
		goto out;

	pblk_ppa_set_empty(&w_ctx.ppa);
	w_ctx.flags = flags;
	if (bio->bi_opf & REQ_PREFLUSH)
		w_ctx.flags |= PBLK_FLUSH_ENTRY;

	for (i = 0; i < nr_entries; i++) {
		void *data = bio_data(bio);

		w_ctx.lba = lba + i;

		pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
		pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);

		bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
	}

	atomic64_add(nr_entries, &pblk->user_wa);

#ifdef CONFIG_NVM_DEBUG
	atomic_long_add(nr_entries, &pblk->inflight_writes);
	atomic_long_add(nr_entries, &pblk->req_writes);
#endif

	pblk_rl_inserted(&pblk->rl, nr_entries);

out:
	generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time);
	pblk_write_should_kick(pblk);
	return ret;
}
Exemplo n.º 6
0
static int htifblk_segment(struct htifblk_device *dev,
	struct request *req)
{
	static struct htifblk_request pkt __aligned(HTIF_ALIGN);
	u64 offset, size, end;

	offset = (blk_rq_pos(req) << SECTOR_SIZE_SHIFT);
	size = (blk_rq_cur_sectors(req) << SECTOR_SIZE_SHIFT);

	end = offset + size;
	if (unlikely(end < offset || end > dev->size)) {
		dev_err(&dev->dev->dev, "out-of-bounds access:"
			" offset=%llu size=%llu\n", offset, size);
		return -EINVAL;
	}

	rmb();
	pkt.addr = __pa(bio_data(req->bio));
	pkt.offset = offset;
	pkt.size = size;
	pkt.tag = dev->tag;

	dev->req = req;
	dev->msg_buf.dev = dev->dev->index;
	dev->msg_buf.cmd = (rq_data_dir(req) == READ) ?
		HTIF_CMD_READ : HTIF_CMD_WRITE;
	dev->msg_buf.data = __pa(&pkt);
	htif_tohost(&dev->msg_buf);
	return 0;
}
Exemplo n.º 7
0
static struct request *emc_trespass_get(struct emc_handler *h,
					struct path *path)
{
	struct bio *bio;
	struct request *rq;
	unsigned char *page22;
	unsigned char long_trespass_pg[] = {
		0, 0, 0, 0,
		TRESPASS_PAGE,        /* Page code */
		0x09,                 /* Page length - 2 */
		h->hr ? 0x01 : 0x81,  /* Trespass code + Honor reservation bit */
		0xff, 0xff,           /* Trespass target */
		0, 0, 0, 0, 0, 0      /* Reserved bytes / unknown */
		};
	unsigned char short_trespass_pg[] = {
		0, 0, 0, 0,
		TRESPASS_PAGE,        /* Page code */
		0x02,                 /* Page length - 2 */
		h->hr ? 0x01 : 0x81,  /* Trespass code + Honor reservation bit */
		0xff,                 /* Trespass target */
		};
	unsigned data_size = h->short_trespass ? sizeof(short_trespass_pg) :
				sizeof(long_trespass_pg);

	/* get bio backing */
	if (data_size > PAGE_SIZE)
		/* this should never happen */
		return NULL;

	bio = get_failover_bio(path, data_size);
	if (!bio) {
		DMERR("dm-emc: emc_trespass_get: no bio");
		return NULL;
	}

	page22 = (unsigned char *)bio_data(bio);
	memset(page22, 0, data_size);

	memcpy(page22, h->short_trespass ?
		short_trespass_pg : long_trespass_pg, data_size);

	/* get request for block layer packet command */
	rq = get_failover_req(h, bio, path);
	if (!rq) {
		DMERR("dm-emc: emc_trespass_get: no rq");
		free_bio(bio);
		return NULL;
	}

	/* Prepare the command. */
	rq->cmd[0] = MODE_SELECT;
	rq->cmd[1] = 0x10;
	rq->cmd[4] = data_size;
	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);

	return rq;
}
Exemplo n.º 8
0
/*
 * un-busy the port etc, and clear any pending DMA status. we want to
 * retry the current request in pio mode instead of risking tossing it
 * all away
 */
static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq;
	ide_startstop_t ret = ide_stopped;

	/*
	 * end current dma transaction
	 */

	if (error < 0) {
		printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
		(void)hwif->dma_ops->dma_end(drive);
		ret = ide_error(drive, "dma timeout error",
				hwif->tp_ops->read_status(hwif));
	} else {
		printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
		hwif->dma_ops->dma_timeout(drive);
	}

	/*
	 * disable dma for now, but remember that we did so because of
	 * a timeout -- we'll reenable after we finish this next request
	 * (or rather the first chunk of it) in pio.
	 */
	drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
	drive->retry_pio++;
	ide_dma_off_quietly(drive);

	/*
	 * un-busy drive etc and make sure request is sane
	 */

	rq = hwif->rq;
	if (!rq)
		goto out;

	hwif->rq = NULL;

	rq->errors = 0;

	if (!rq->bio)
		goto out;

	rq->sector = rq->bio->bi_sector;
	rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
	rq->hard_cur_sectors = rq->current_nr_sectors;
	rq->buffer = bio_data(rq->bio);
out:
	return ret;
}
/*
 * Handle an I/O request.
 */
static void ramdisk_transfer(struct ramdisk_dev *dev, struct request *req)
{
    unsigned long sector = blk_rq_pos(req);
    unsigned long nsect = blk_rq_cur_sectors(req);
    char* buffer = bio_data(req->bio);
    int write = rq_data_dir(req);
    unsigned long offset = sector*KERNEL_SECTOR_SIZE;
    unsigned long nbytes = nsect*KERNEL_SECTOR_SIZE;

    struct req_data* rdata;
    struct ramdisk_message* msg;
    char* payload;
    int payload_length;

    struct msg_buffer* snd_buff;

    if ((offset + nbytes) > dev->size) {
        printk (KERN_NOTICE "Beyond-end write (%ld %ld)\n", offset, nbytes);
        return;
    }

    payload_length = nbytes * write;
    rdata = kmalloc(sizeof(struct req_data), GFP_KERNEL);
    rdata->dev = dev;
    rdata->req = req;
    rdata->buffer = buffer;
    msg = kmalloc(sizeof(struct ramdisk_message) + payload_length, GFP_KERNEL);
    msg->req_number = (unsigned long)rdata;
    msg->sector = sector;
    msg->nsect = nsect;
    msg->write = write;
    payload = ((void*)msg) + sizeof(struct ramdisk_message);

    /* Copy payload if it is a write request */
    if (write) {
        memcpy(payload, buffer, nbytes);
    }

    /* Create sending buffer */
    snd_buff = kmalloc(sizeof(struct msg_buffer), GFP_KERNEL);
    snd_buff->buffer = msg;
    snd_buff->length = sizeof(struct ramdisk_message) + payload_length;
    snd_buff->capacity = sizeof(struct ramdisk_message) + payload_length;
    snd_buff->release = drv_sent;
    snd_buff->channel = &chn;

    msg_channel_send(snd_buff);
}
Exemplo n.º 10
0
static int ide_floppy_callback(ide_drive_t *drive, int dsc)
{
	struct ide_disk_obj *floppy = drive->driver_data;
	struct ide_atapi_pc *pc = drive->pc;
	struct request *rq = pc->rq;
	int uptodate = pc->error ? 0 : 1;

	ide_debug_log(IDE_DBG_FUNC, "enter");

	if (drive->failed_pc == pc)
		drive->failed_pc = NULL;

	if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 ||
	    (rq && blk_pc_request(rq)))
		uptodate = 1; 
	else if (pc->c[0] == GPCMD_REQUEST_SENSE) {

		u8 *buf = bio_data(rq->bio);

		if (!pc->error) {
			floppy->sense_key = buf[2] & 0x0F;
			floppy->asc = buf[12];
			floppy->ascq = buf[13];
			floppy->progress_indication = buf[15] & 0x80 ?
				(u16)get_unaligned((u16 *)&buf[16]) : 0x10000;

			if (drive->failed_pc)
				ide_debug_log(IDE_DBG_PC, "pc = %x",
					      drive->failed_pc->c[0]);

			ide_debug_log(IDE_DBG_SENSE, "sense key = %x, asc = %x,"
				      "ascq = %x", floppy->sense_key,
				      floppy->asc, floppy->ascq);
		} else
			printk(KERN_ERR PFX "Error in REQUEST SENSE itself - "
			       "Aborting request!\n");
	}

	if (blk_special_request(rq))
		rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL;

	return uptodate;
}
Exemplo n.º 11
0
static int nbdx_request(struct request *req, struct nbdx_queue *xq)
{
	struct nbdx_file *xdev;
	unsigned long start = blk_rq_pos(req) << NBDX_SECT_SHIFT;
	unsigned long len  = blk_rq_cur_bytes(req);
	int write = rq_data_dir(req) == WRITE;
	int err;
	void* buffer = bio_data(req->bio);

	pr_debug("%s called\n", __func__);

	xdev = req->rq_disk->private_data;

	err = nbdx_transfer(xdev, buffer, start, len, write, req, xq);
	if (unlikely(err))
		pr_err("transfer failed for req %p\n", req);

	return err;

}
Exemplo n.º 12
0
/*
 * The simple form of the request function.
 */
static void sbull_request(struct request_queue *q)
{
	struct request *req;

	while ((req = blk_fetch_request(q)) != NULL) {
		struct sbull_dev *dev = req->rq_disk->private_data;
		if (req->cmd_type != REQ_TYPE_FS) {
			printk (KERN_NOTICE "Skip non-fs request\n");
			__blk_end_request_cur(req, -EIO);
			continue;
		}
    //    	printk (KERN_NOTICE "Req dev %d dir %ld sec %ld, nr %d f %lx\n",
    //    			dev - Devices, rq_data_dir(req),
    //    			req->sector, req->current_nr_sectors,
    //    			req->flags);
		sbull_transfer(dev, blk_rq_pos(req), blk_rq_cur_sectors(req),
				bio_data(req->bio), rq_data_dir(req));
		__blk_end_request_cur(req, 0);
	}
}
Exemplo n.º 13
0
static void do_z2_request(struct request_queue *q)
{
	struct request *req;

	req = blk_fetch_request(q);
	while (req) {
		unsigned long start = blk_rq_pos(req) << 9;
		unsigned long len  = blk_rq_cur_bytes(req);
		blk_status_t err = BLK_STS_OK;

		if (start + len > z2ram_size) {
			pr_err(DEVICE_NAME ": bad access: block=%llu, "
			       "count=%u\n",
			       (unsigned long long)blk_rq_pos(req),
			       blk_rq_cur_sectors(req));
			err = BLK_STS_IOERR;
			goto done;
		}
		while (len) {
			unsigned long addr = start & Z2RAM_CHUNKMASK;
			unsigned long size = Z2RAM_CHUNKSIZE - addr;
			void *buffer = bio_data(req->bio);

			if (len < size)
				size = len;
			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
			if (rq_data_dir(req) == READ)
				memcpy(buffer, (char *)addr, size);
			else
				memcpy((char *)addr, buffer, size);
			start += size;
			len -= size;
		}
	done:
		if (!__blk_end_request_cur(req, err))
			req = blk_fetch_request(q);
	}
}
Exemplo n.º 14
0
static void ace_fsm_dostate(struct ace_device *ace)
{
	struct request *req;
	u32 status;
	u16 val;
	int count;

#if defined(DEBUG)
	dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n",
		ace->fsm_state, ace->id_req_count);
#endif

	/* Verify that there is actually a CF in the slot. If not, then
	 * bail out back to the idle state and wake up all the waiters */
	status = ace_in32(ace, ACE_STATUS);
	if ((status & ACE_STATUS_CFDETECT) == 0) {
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->media_change = 1;
		set_capacity(ace->gd, 0);
		dev_info(ace->dev, "No CF in slot\n");

		/* Drop all in-flight and pending requests */
		if (ace->req) {
			__blk_end_request_all(ace->req, -EIO);
			ace->req = NULL;
		}
		while ((req = blk_fetch_request(ace->queue)) != NULL)
			__blk_end_request_all(req, -EIO);

		/* Drop back to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = -EIO;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
	}

	switch (ace->fsm_state) {
	case ACE_FSM_STATE_IDLE:
		/* See if there is anything to do */
		if (ace->id_req_count || ace_get_next_request(ace->queue)) {
			ace->fsm_iter_num++;
			ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
			mod_timer(&ace->stall_timer, jiffies + HZ);
			if (!timer_pending(&ace->stall_timer))
				add_timer(&ace->stall_timer);
			break;
		}
		del_timer(&ace->stall_timer);
		ace->fsm_continue_flag = 0;
		break;

	case ACE_FSM_STATE_REQ_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* Already have the lock, jump to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* Request the lock */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ);
		ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK;
		break;

	case ACE_FSM_STATE_WAIT_LOCK:
		if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) {
			/* got the lock; move to next state */
			ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY;
			break;
		}

		/* wait a bit for the lock */
		ace_fsm_yield(ace);
		break;

	case ACE_FSM_STATE_WAIT_CFREADY:
		status = ace_in32(ace, ACE_STATUS);
		if (!(status & ACE_STATUS_RDYFORCFCMD) ||
		    (status & ACE_STATUS_CFBSY)) {
			/* CF card isn't ready; it needs to be polled */
			ace_fsm_yield(ace);
			break;
		}

		/* Device is ready for command; determine what to do next */
		if (ace->id_req_count)
			ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE;
		else
			ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE;
		break;

	case ACE_FSM_STATE_IDENTIFY_PREPARE:
		/* Send identify command */
		ace->fsm_task = ACE_TASK_IDENTIFY;
		ace->data_ptr = ace->cf_id;
		ace->data_count = ACE_BUF_PER_SECTOR;
		ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY);

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* irq handler takes over from this point; wait for the
		 * transfer to complete */
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER;
		ace_fsm_yieldirq(ace);
		break;

	case ACE_FSM_STATE_IDENTIFY_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				ace->data_count);
			ace_fsm_yield(ace);
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			ace_fsm_yield(ace);
			break;
		}

		/* Transfer the next buffer */
		ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* transfer finished; kick state machine */
		dev_dbg(ace->dev, "identify finished\n");
		ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE;
		break;

	case ACE_FSM_STATE_IDENTIFY_COMPLETE:
		ace_fix_driveid(ace->cf_id);
		ace_dump_mem(ace->cf_id, 512);	/* Debug: Dump out disk ID */

		if (ace->data_result) {
			/* Error occurred, disable the disk */
			ace->media_change = 1;
			set_capacity(ace->gd, 0);
			dev_err(ace->dev, "error fetching CF id (%i)\n",
				ace->data_result);
		} else {
			ace->media_change = 0;

			/* Record disk parameters */
			set_capacity(ace->gd,
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
			dev_info(ace->dev, "capacity: %i sectors\n",
				ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY));
		}

		/* We're done, drop to IDLE state and notify waiters */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		ace->id_result = ace->data_result;
		while (ace->id_req_count) {
			complete(&ace->id_completion);
			ace->id_req_count--;
		}
		break;

	case ACE_FSM_STATE_REQ_PREPARE:
		req = ace_get_next_request(ace->queue);
		if (!req) {
			ace->fsm_state = ACE_FSM_STATE_IDLE;
			break;
		}
		blk_start_request(req);

		/* Okay, it's a data request, set it up for transfer */
		dev_dbg(ace->dev,
			"request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n",
			(unsigned long long)blk_rq_pos(req),
			blk_rq_sectors(req), blk_rq_cur_sectors(req),
			rq_data_dir(req));

		ace->req = req;
		ace->data_ptr = bio_data(req->bio);
		ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
		ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);

		count = blk_rq_sectors(req);
		if (rq_data_dir(req)) {
			/* Kick off write request */
			dev_dbg(ace->dev, "write data\n");
			ace->fsm_task = ACE_TASK_WRITE;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_WRITE_DATA);
		} else {
			/* Kick off read request */
			dev_dbg(ace->dev, "read data\n");
			ace->fsm_task = ACE_TASK_READ;
			ace_out(ace, ACE_SECCNTCMD,
				count | ACE_SECCNTCMD_READ_DATA);
		}

		/* As per datasheet, put config controller in reset */
		val = ace_in(ace, ACE_CTRL);
		ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET);

		/* Move to the transfer state.  The systemace will raise
		 * an interrupt once there is something to do
		 */
		ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER;
		if (ace->fsm_task == ACE_TASK_READ)
			ace_fsm_yieldirq(ace);	/* wait for data ready */
		break;

	case ACE_FSM_STATE_REQ_TRANSFER:
		/* Check that the sysace is ready to receive data */
		status = ace_in32(ace, ACE_STATUS);
		if (status & ACE_STATUS_CFBSY) {
			dev_dbg(ace->dev,
				"CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yield(ace);	/* need to poll CFBSY bit */
			break;
		}
		if (!(status & ACE_STATUS_DATABUFRDY)) {
			dev_dbg(ace->dev,
				"DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n",
				ace->fsm_task, ace->fsm_iter_num,
				blk_rq_cur_sectors(ace->req) * 16,
				ace->data_count, ace->in_irq);
			ace_fsm_yieldirq(ace);
			break;
		}

		/* Transfer the next buffer */
		if (ace->fsm_task == ACE_TASK_WRITE)
			ace->reg_ops->dataout(ace);
		else
			ace->reg_ops->datain(ace);
		ace->data_count--;

		/* If there are still buffers to be transfers; jump out here */
		if (ace->data_count != 0) {
			ace_fsm_yieldirq(ace);
			break;
		}

		/* bio finished; is there another one? */
		if (__blk_end_request_cur(ace->req, 0)) {
			/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
			 *      blk_rq_sectors(ace->req),
			 *      blk_rq_cur_sectors(ace->req));
			 */
			ace->data_ptr = bio_data(ace->req->bio);
			ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
			ace_fsm_yieldirq(ace);
			break;
		}

		ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE;
		break;

	case ACE_FSM_STATE_REQ_COMPLETE:
		ace->req = NULL;

		/* Finished request; go to idle state */
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;

	default:
		ace->fsm_state = ACE_FSM_STATE_IDLE;
		break;
	}
}
Exemplo n.º 15
0
static void start_request(struct floppy_state *fs)
{
	struct request *req;
	unsigned long x;

	swim3_dbg("start request, initial state=%d\n", fs->state);

	if (fs->state == idle && fs->wanted) {
		fs->state = available;
		wake_up(&fs->wait);
		return;
	}
	while (fs->state == idle) {
		swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
		if (!fs->cur_req) {
			fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
			swim3_dbg("  fetched request %p\n", fs->cur_req);
			if (!fs->cur_req)
				break;
		}
		req = fs->cur_req;

		if (fs->mdev->media_bay &&
		    check_media_bay(fs->mdev->media_bay) != MB_FD) {
			swim3_dbg("%s", "  media bay absent, dropping req\n");
			swim3_end_request(fs, BLK_STS_IOERR, 0);
			continue;
		}

#if 0 /* This is really too verbose */
		swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
			  req->rq_disk->disk_name, req->cmd,
			  (long)blk_rq_pos(req), blk_rq_sectors(req),
			  bio_data(req->bio));
		swim3_dbg("           current_nr_sectors=%u\n",
			  blk_rq_cur_sectors(req));
#endif

		if (blk_rq_pos(req) >= fs->total_secs) {
			swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
				  (long)blk_rq_pos(req), (long)fs->total_secs);
			swim3_end_request(fs, BLK_STS_IOERR, 0);
			continue;
		}
		if (fs->ejected) {
			swim3_dbg("%s", "  disk ejected\n");
			swim3_end_request(fs, BLK_STS_IOERR, 0);
			continue;
		}

		if (rq_data_dir(req) == WRITE) {
			if (fs->write_prot < 0)
				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
			if (fs->write_prot) {
				swim3_dbg("%s", "  try to write, disk write protected\n");
				swim3_end_request(fs, BLK_STS_IOERR, 0);
				continue;
			}
		}

		/* Do not remove the cast. blk_rq_pos(req) is now a
		 * sector_t and can be 64 bits, but it will never go
		 * past 32 bits for this driver anyway, so we can
		 * safely cast it down and not have to do a 64/32
		 * division
		 */
		fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
		x = ((long)blk_rq_pos(req)) % fs->secpercyl;
		fs->head = x / fs->secpertrack;
		fs->req_sector = x % fs->secpertrack + 1;
		fs->state = do_transfer;
		fs->retries = 0;

		act(fs);
	}
}