Ejemplo n.º 1
0
static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;

	/* ATA IRQ doesn't wait for DMA transfer completion and vice
	 * versa.  Mask IRQ selectively to detect command completion.
	 * Without it, ATA DMA read command can cause data corruption.
	 *
	 * Something similar might be needed for ATAPI writes.  I
	 * tried a lot of combinations but couldn't find the solution.
	 */
	if (qc->tf.protocol == ATA_PROT_DMA &&
	    !(qc->tf.flags & ATA_TFLAG_WRITE))
		inic_set_pirq_mask(ap, PIRQ_MASK_DMA_READ);
	else
		inic_set_pirq_mask(ap, PIRQ_MASK_OTHER);

	/* Issuing a command to yet uninitialized port locks up the
	 * controller.  Most of the time, this happens for the first
	 * command after reset which are ATA and ATAPI IDENTIFYs.
	 * Fast fail if stat is 0x7f or 0xff for those commands.
	 */
	if (unlikely(qc->tf.command == ATA_CMD_ID_ATA ||
		     qc->tf.command == ATA_CMD_ID_ATAPI)) {
		u8 stat = ata_chk_status(ap);
		if (stat == 0x7f || stat == 0xff)
			return AC_ERR_HSM;
	}

	return ata_qc_issue_prot(qc);
}
Ejemplo n.º 2
0
static unsigned int pacpi_qc_issue_prot(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct ata_device *adev = qc->dev;
	struct pata_acpi *acpi = ap->private_data;

	if (acpi->gtm.flags & 0x10)
		return ata_qc_issue_prot(qc);

	if (adev != acpi->last) {
		pacpi_set_piomode(ap, adev);
		if (adev->dma_mode)
			pacpi_set_dmamode(ap, adev);
		acpi->last = adev;
	}
	return ata_qc_issue_prot(qc);
}
Ejemplo n.º 3
0
static unsigned int cmd640_qc_issue_prot(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct ata_device *adev = qc->dev;
	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
	struct cmd640_reg *timing = ap->private_data;

	if (ap->port_no != 0 && adev->devno != timing->last) {
		pci_write_config_byte(pdev, DRWTIM23, timing->reg58[adev->devno]);
		timing->last = adev->devno;
	}
	return ata_qc_issue_prot(qc);
}
Ejemplo n.º 4
0
static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct ata_device *adev = qc->dev;

	/* If modes have been configured and the channel data is not loaded
	   then load it. We have to check if pio_mode is set as the core code
	   does not set adev->pio_mode to XFER_PIO_0 while probing as would be
	   logical */

	if (adev->pio_mode && adev != ap->private_data)
		ns87410_set_piomode(ap, adev);

	return ata_qc_issue_prot(qc);
}
Ejemplo n.º 5
0
static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc)
{
    struct ata_port *ap = qc->ap;
    struct ata_device *adev = qc->dev;

    if (adev != ap->private_data) {
        /* UDMA timing is not shared */
        if (adev->dma_mode < XFER_UDMA_0) {
            if (adev->dma_mode)
                radisys_set_dmamode(ap, adev);
            else if (adev->pio_mode)
                radisys_set_piomode(ap, adev);
        }
    }
    return ata_qc_issue_prot(qc);
}
Ejemplo n.º 6
0
static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc)
{
	struct ata_port *ap = qc->ap;
	struct ata_device *adev = qc->dev;
	struct ata_device *prev = ap->private_data;

	/* See if the DMA settings could be wrong */
	if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
		/* Maybe, but do the channels match MWDMA/UDMA ? */
		if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
		    (adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
		    	/* Switch the mode bits */
		    	cs5530_set_dmamode(ap, adev);
	}

	return ata_qc_issue_prot(qc);
}