示例#1
0
static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
{
	u8 cmd = rq->cmd[0];

	if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
		ide_task_t task;
		struct ide_taskfile *tf = &task.tf;

		memset(&task, 0, sizeof(task));
		if (cmd == REQ_PARK_HEADS) {
			drive->sleep = *(unsigned long *)rq->special;
			drive->dev_flags |= IDE_DFLAG_SLEEPING;
			tf->command = ATA_CMD_IDLEIMMEDIATE;
			tf->feature = 0x44;
			tf->lbal = 0x4c;
			tf->lbam = 0x4e;
			tf->lbah = 0x55;
			task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
		} else		/* cmd == REQ_UNPARK_HEADS */
			tf->command = ATA_CMD_CHK_POWER;

		task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
		task.rq = rq;
		drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
		return do_rw_taskfile(drive, &task);
	}

	switch (cmd) {
	case REQ_DEVSET_EXEC:
	{
		int err, (*setfunc)(ide_drive_t *, int) = rq->special;

		err = setfunc(drive, *(int *)&rq->cmd[1]);
		if (err)
			rq->errors = err;
		else
			err = 1;
		ide_end_request(drive, err, 0);
		return ide_stopped;
	}
	case REQ_DRIVE_RESET:
		return ide_do_reset(drive);
	default:
		blk_dump_rq_flags(rq, "ide_special_rq - bad request");
		ide_end_request(drive, 0, 0);
		return ide_stopped;
	}
}
示例#2
0
ide_startstop_t redwood_ide_intr (ide_drive_t *drive)
{
	int i;
	byte dma_stat;
	unsigned int nsect;
	ide_hwgroup_t *hwgroup = HWGROUP(drive);
	struct request *rq = hwgroup->rq;
	unsigned long block,b1,b2,b3,b4;

	nsect = rq->current_nr_sectors;

	dma_stat = HWIF(drive)->dmaproc(ide_dma_end, drive);

	rq->sector += nsect;
	rq->buffer += nsect<<9;
	rq->errors = 0;
	i = (rq->nr_sectors -= nsect);
	ide_end_request(1, HWGROUP(drive));
	if (i > 0) {
		b1 = IN_BYTE(IDE_SECTOR_REG);
		b2 = IN_BYTE(IDE_LCYL_REG);
		b3 = IN_BYTE(IDE_HCYL_REG);
		b4 = IN_BYTE(IDE_SELECT_REG);
		block = ((b4 & 0x0f) << 24) + (b3 << 16) + (b2 << 8) + (b1);
		block++;
		if (drive->select.b.lba) {
			OUT_BYTE(block,IDE_SECTOR_REG);
			OUT_BYTE(block>>=8,IDE_LCYL_REG);
			OUT_BYTE(block>>=8,IDE_HCYL_REG);
			OUT_BYTE(((block>>8)&0x0f)|drive->select.all,IDE_SELECT_REG);
		} else {
static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
{
	idescsi_scsi_t *scsi = drive_to_idescsi(drive);
	struct request *rq = HWGROUP(drive)->rq;
	struct ide_atapi_pc *pc = (struct ide_atapi_pc *) rq->special;
	int log = test_bit(IDESCSI_LOG_CMD, &scsi->log);
	struct Scsi_Host *host;
	int errors = rq->errors;
	unsigned long flags;

	if (!blk_special_request(rq) && !blk_sense_request(rq)) {
		ide_end_request(drive, uptodate, nrsecs);
		return 0;
	}
	ide_end_drive_cmd (drive, 0, 0);
	if (blk_sense_request(rq)) {
		struct ide_atapi_pc *opc = (struct ide_atapi_pc *) rq->buffer;
		if (log) {
			printk ("ide-scsi: %s: wrap up check %lu, rst = ", drive->name, opc->scsi_cmd->serial_number);
			ide_scsi_hex_dump(pc->buf, 16);
		}
		memcpy((void *) opc->scsi_cmd->sense_buffer, pc->buf,
			SCSI_SENSE_BUFFERSIZE);
		kfree(pc->buf);
		kfree(pc);
		blk_put_request(rq);
		pc = opc;
		rq = pc->rq;
		pc->scsi_cmd->result = (CHECK_CONDITION << 1) |
				(((pc->flags & PC_FLAG_TIMEDOUT) ?
				  DID_TIME_OUT :
				  DID_OK) << 16);
	} else if (pc->flags & PC_FLAG_TIMEDOUT) {
		if (log)
			printk (KERN_WARNING "ide-scsi: %s: timed out for %lu\n",
					drive->name, pc->scsi_cmd->serial_number);
		pc->scsi_cmd->result = DID_TIME_OUT << 16;
	} else if (errors >= ERROR_MAX) {
		pc->scsi_cmd->result = DID_ERROR << 16;
		if (log)
			printk ("ide-scsi: %s: I/O error for %lu\n", drive->name, pc->scsi_cmd->serial_number);
	} else if (errors) {
		if (log)
			printk ("ide-scsi: %s: check condition for %lu\n", drive->name, pc->scsi_cmd->serial_number);
		if (!idescsi_check_condition(drive, rq))
			/* we started a request sense, so we'll be back, exit for now */
			return 0;
		pc->scsi_cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
	} else {
		pc->scsi_cmd->result = DID_OK << 16;
	}
	host = pc->scsi_cmd->device->host;
	spin_lock_irqsave(host->host_lock, flags);
	pc->done(pc->scsi_cmd);
	spin_unlock_irqrestore(host->host_lock, flags);
	kfree(pc);
	blk_put_request(rq);
	scsi->pc = NULL;
	return 0;
}
示例#4
0
static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
{
	if (rq->rq_disk) {
		struct ide_driver *drv;

		drv = *(struct ide_driver **)rq->rq_disk->private_data;
		drv->end_request(drive, 0, 0);
	} else
		ide_end_request(drive, 0, 0);
}
示例#5
0
/*
 * promise_read_intr() is the handler for disk read/multread interrupts
 */
static ide_startstop_t promise_read_intr (ide_drive_t *drive)
{
	byte stat;
	int i;
	unsigned int sectors_left, sectors_avail, nsect;
	struct request *rq;

	if (!OK_STAT(stat=GET_STAT(),DATA_READY,BAD_R_STAT)) {
		return ide_error(drive, "promise_read_intr", stat);
	}

read_again:
	do {
	    sectors_left = IN_BYTE(IDE_NSECTOR_REG);
	    IN_BYTE(IDE_SECTOR_REG);
	} while (IN_BYTE(IDE_NSECTOR_REG) != sectors_left);
	rq = HWGROUP(drive)->rq;
	sectors_avail = rq->nr_sectors - sectors_left;

read_next:
	rq = HWGROUP(drive)->rq;
	if ((nsect = rq->current_nr_sectors) > sectors_avail)
		nsect = sectors_avail;
	sectors_avail -= nsect;
	ide_input_data(drive, rq->buffer, nsect * SECTOR_WORDS);
#ifdef DEBUG
	printk("%s:  promise_read: sectors(%ld-%ld), buffer=0x%08lx, "
	       "remaining=%ld\n", drive->name, rq->sector, rq->sector+nsect-1, 
	       (unsigned long) rq->buffer+(nsect<<9), rq->nr_sectors-nsect);
#endif
	rq->sector += nsect;
	rq->buffer += nsect<<9;
	rq->errors = 0;
	i = (rq->nr_sectors -= nsect);
	if ((rq->current_nr_sectors -= nsect) <= 0)
		ide_end_request(1, HWGROUP(drive));
	if (i > 0) {
		if (sectors_avail)
		    goto read_next;
		stat = GET_STAT();
		if(stat & DRQ_STAT)
		    goto read_again;
		if(stat & BUSY_STAT) {
		    ide_set_handler (drive, &promise_read_intr, WAIT_CMD, NULL);
		    return ide_started;
		}
		printk("Ah! promise read intr: sectors left !DRQ !BUSY\n");
		return ide_error(drive, "promise read intr", stat);
	}
	return ide_stopped;
}
示例#6
0
/*
 * do_pdc4030_io() is called from do_rw_disk, having had the block number
 * already set up. It issues a READ or WRITE command to the Promise
 * controller, assuming LBA has been used to set up the block number.
 */
ide_startstop_t do_pdc4030_io (ide_drive_t *drive, struct request *rq)
{
	unsigned long timeout;
	byte stat;

	if (rq->cmd == READ) {
	    ide_set_handler(drive, &promise_read_intr, WAIT_CMD, NULL);
	    OUT_BYTE(PROMISE_READ, IDE_COMMAND_REG);
/* The card's behaviour is odd at this point. If the data is
   available, DRQ will be true, and no interrupt will be
   generated by the card. If this is the case, we need to simulate
   an interrupt. Ugh! Otherwise, if an interrupt will occur, bit0
   of the SELECT register will be high, so we can just return and
   be interrupted.*/
	    timeout = jiffies + HZ/20; /* 50ms wait */
	    do {
		stat=GET_STAT();
		if(stat & DRQ_STAT) {
                    disable_irq(HWIF(drive)->irq);
		    ide_intr(HWIF(drive)->irq,HWGROUP(drive),NULL);
                    enable_irq(HWIF(drive)->irq);
		    return ide_stopped;
		}
		if(IN_BYTE(IDE_SELECT_REG) & 0x01)
		    return ide_started;
		udelay(1);
	    } while (time_before(jiffies, timeout));
	    printk("%s: reading: No DRQ and not waiting - Odd!\n",
		   drive->name);
	    return ide_started;
	}
	if (rq->cmd == WRITE) {
	    ide_startstop_t startstop;
	    OUT_BYTE(PROMISE_WRITE, IDE_COMMAND_REG);
	    if (ide_wait_stat(&startstop, drive, DATA_READY, drive->bad_wstat, WAIT_DRQ)) {
		printk("%s: no DRQ after issuing PROMISE_WRITE\n", drive->name);
		return startstop;
	    }
	    if (!drive->unmask)
		__cli();	/* local CPU only */
	    HWGROUP(drive)->wrq = *rq; /* scratchpad */
	    return promise_write(drive);
	}
	printk("%s: bad command: %d\n", drive->name, rq->cmd);
	ide_end_request(0, HWGROUP(drive));
	return ide_stopped;
}
void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
{
	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
		u8 err = ide_read_error(drive);

		ide_end_drive_cmd(drive, stat, err);
		return;
	}

	if (rq->rq_disk) {
		struct ide_driver *drv;

		drv = *(struct ide_driver **)rq->rq_disk->private_data;;
		drv->end_request(drive, 1, rq->nr_sectors);
	} else
		ide_end_request(drive, 1, rq->nr_sectors);
}
示例#8
0
/*
 * promise_write() transfers a block of one or more sectors of data to a
 * drive as part of a disk write operation. All but 4 sectors are transfered
 * in the first attempt, then the interface is polled (nicely!) for completion
 * before the final 4 sectors are transfered. Don't ask me why, but this is
 * how it's done in the drivers for other O/Ses. There is no interrupt
 * generated on writes, which is why we have to do it like this.
 */
static ide_startstop_t promise_write (ide_drive_t *drive)
{
    ide_hwgroup_t *hwgroup = HWGROUP(drive);
    struct request *rq = &hwgroup->wrq;
    int i;

    if (rq->nr_sectors > 4) {
        if (ide_multwrite(drive, rq->nr_sectors - 4))
		return ide_stopped;
        hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
        ide_set_handler (drive, &promise_write_pollfunc, 1, NULL);
        return ide_started;
    } else {
        if (ide_multwrite(drive, rq->nr_sectors))
		return ide_stopped;
        rq = hwgroup->rq;
        for (i = rq->nr_sectors; i > 0;) {
            i -= rq->current_nr_sectors;
            ide_end_request(1, hwgroup);
        }
    }
    return ide_stopped;
}
示例#9
0
/*
 * dma_intr() is the handler for disk read/write DMA interrupts
 */
static void dma_intr (ide_drive_t *drive)
{
	byte stat, dma_stat;
	int i;
	struct request *rq = HWGROUP(drive)->rq;
	unsigned short dma_base = HWIF(drive)->dma_base;

	dma_stat = inb(dma_base+2);		/* get DMA status */
	outb(inb(dma_base)&~1, dma_base);	/* stop DMA operation */
	stat = GET_STAT();			/* get drive status */
	if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
		if ((dma_stat & 7) == 4) {	/* verify good DMA status */
			rq = HWGROUP(drive)->rq;
			for (i = rq->nr_sectors; i > 0;) {
				i -= rq->current_nr_sectors;
				ide_end_request(1, HWGROUP(drive));
			}
			return;
		}
		printk("%s: bad DMA status: 0x%02x\n", drive->name, dma_stat);
	}
	sti();
	ide_error(drive, "dma_intr", stat);
}
示例#10
0
/*
 * promise_write_pollfunc() is the handler for disk write completion polling.
 */
static ide_startstop_t promise_write_pollfunc (ide_drive_t *drive)
{
	int i;
	ide_hwgroup_t *hwgroup = HWGROUP(drive);
	struct request *rq;

        if (IN_BYTE(IDE_NSECTOR_REG) != 0) {
            if (time_before(jiffies, hwgroup->poll_timeout)) {
                ide_set_handler (drive, &promise_write_pollfunc, 1, NULL);
                return ide_started; /* continue polling... */
            }
            printk("%s: write timed-out!\n",drive->name);
            return ide_error (drive, "write timeout", GET_STAT());
        }
        
	if (ide_multwrite(drive, 4))
		return ide_stopped;
        rq = hwgroup->rq;
        for (i = rq->nr_sectors; i > 0;) {
            i -= rq->current_nr_sectors;
            ide_end_request(1, hwgroup);
        }
        return ide_stopped;
}
示例#11
0
/*
 * dma_intr() is the handler for disk read/write DMA interrupts
 */
void ide_dma_intr (ide_drive_t *drive)
{
	int i;
	byte stat, dma_stat;

	DPRINT("ide_dma_intr\n");
	dma_stat = HWIF(drive)->dmaproc(ide_dma_end, drive);
	stat = GET_STAT();			/* get drive status */
	DPRINT("stat=%02x\n", stat);
	if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
		if (!dma_stat) {
			struct request *rq = HWGROUP(drive)->rq;
			rq = HWGROUP(drive)->rq;
			for (i = rq->nr_sectors; i > 0;) {
				i -= rq->current_nr_sectors;
				ide_end_request(1, HWGROUP(drive));
			}
			return;
		}
		printk("%s: dma_intr: bad DMA status\n", drive->name);
	}
	ide__sti();	/* local CPU only */
	ide_error(drive, "dma_intr", stat);
}