Exemple #1
0
/*
 * Add a command to the queue and start controller.
 *
 * MUST BE CALLED AT splbio()!
 */
void
ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
{

	ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer,
	    chp->ch_channel, xfer->c_drive), DEBUG_XFERS);

	/* complete xfer setup */
	xfer->c_chp = chp;

	/* insert at the end of command list */
	TAILQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer, c_xferchain);
	ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n",
	    chp->ch_flags), DEBUG_XFERS);
	/*
	 * if polling and can sleep, wait for the xfer to be at head of queue
	 */
	if ((xfer->c_flags & (C_POLL | C_WAIT)) ==  (C_POLL | C_WAIT)) {
		while (chp->ch_queue->active_xfer != NULL ||
		    TAILQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
			xfer->c_flags |= C_WAITACT;
			tsleep(xfer, PRIBIO, "ataact", 0);
			xfer->c_flags &= ~C_WAITACT;
			if (xfer->c_flags & C_FREE) {
				ata_free_xfer(chp, xfer);
				return;
			}
		}
	}
	atastart(chp);
}
Exemple #2
0
static void
wdc_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer)
{
	struct atac_softc *atac = chp->ch_atac;
	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
	int drive = xfer->c_drive;

	ATADEBUG_PRINT(("wdc_atapi_done %s:%d:%d: flags 0x%x\n",
	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
	    (u_int)xfer->c_flags), DEBUG_XFERS);
	callout_stop(&chp->ch_callout);
	/* mark controller inactive and free the command */
	chp->ch_queue->active_xfer = NULL;
	ata_free_xfer(chp, xfer);

	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
		sc_xfer->error = XS_DRIVER_STUFFUP;
		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
		wakeup(&chp->ch_queue->active_xfer);
	}

	ATADEBUG_PRINT(("wdc_atapi_done: scsipi_done\n"), DEBUG_XFERS);
	scsipi_done(sc_xfer);
	ATADEBUG_PRINT(("atastart from wdc_atapi_done, flags 0x%x\n",
	    chp->ch_flags), DEBUG_XFERS);
	atastart(chp);
}
static void
via_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa,
    int satareg_shift)
{
	struct pciide_channel *cp;
	struct ata_channel *wdc_cp;
	struct wdc_regs *wdr;
	pcireg_t interface;
	int channel;

	interface = PCI_INTERFACE(pa->pa_class);

	if (via_sata_chip_map_common(sc, pa) == 0)
		return;

	if (interface == 0) {
		ATADEBUG_PRINT(("via_sata_chip_map interface == 0\n"),
		    DEBUG_PROBE);
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	sc->sc_wdcdev.sc_atac.atac_probe = wdc_sataprobe;
	sc->sc_wdcdev.wdc_maxdrives = 1;
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		wdc_cp = &cp->ata_channel;
		wdr = CHAN_TO_WDC_REGS(wdc_cp);
		wdr->sata_iot = sc->sc_ba5_st;
		wdr->sata_baseioh = sc->sc_ba5_sh;
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << satareg_shift) + 0x0, 4,
		    &wdr->sata_status) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_status regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << satareg_shift) + 0x4, 4,
		    &wdr->sata_error) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_error regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		if (bus_space_subregion(wdr->sata_iot, wdr->sata_baseioh,
		    (wdc_cp->ch_channel << satareg_shift) + 0x8, 4,
		    &wdr->sata_control) != 0) {
			aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "couldn't map channel %d sata_control regs\n",
			    wdc_cp->ch_channel);
			continue;
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
Exemple #4
0
/*
 * Fabricate a default disk label, and try to read the correct one.
 */
static void
edgetdisklabel(dev_t dev, struct ed_softc *ed)
{
    struct disklabel *lp = ed->sc_dk.dk_label;
    const char *errstring;

    ATADEBUG_PRINT(("edgetdisklabel\n"), DEBUG_FUNCS);

    memset(ed->sc_dk.dk_cpulabel, 0, sizeof(struct cpu_disklabel));

    edgetdefaultlabel(ed, lp);

    errstring = readdisklabel(
                    EDLABELDEV(dev), edmcastrategy, lp, ed->sc_dk.dk_cpulabel);
    if (errstring) {
        /*
         * This probably happened because the drive's default
         * geometry doesn't match the DOS geometry.  We
         * assume the DOS geometry is now in the label and try
         * again.  XXX This is a kluge.
         */
#if 0
        if (wd->drvp->state > RECAL)
            wd->drvp->drive_flags |= ATA_DRIVE_RESET;
#endif
        errstring = readdisklabel(EDLABELDEV(dev),
                                  edmcastrategy, lp, ed->sc_dk.dk_cpulabel);
    }
    if (errstring) {
        printf("%s: %s\n", device_xname(ed->sc_dev), errstring);
        return;
    }
}
Exemple #5
0
int
edmcaclose(dev_t dev, int flag, int fmt, struct lwp *l)
{
    struct ed_softc *wd = device_lookup_private(&ed_cd, DISKUNIT(dev));
    int part = DISKPART(dev);

    ATADEBUG_PRINT(("edmcaclose\n"), DEBUG_FUNCS);

    mutex_enter(&wd->sc_dk.dk_openlock);

    switch (fmt) {
    case S_IFCHR:
        wd->sc_dk.dk_copenmask &= ~(1 << part);
        break;
    case S_IFBLK:
        wd->sc_dk.dk_bopenmask &= ~(1 << part);
        break;
    }
    wd->sc_dk.dk_openmask =
        wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;

    if (wd->sc_dk.dk_openmask == 0) {
#if 0
        wd_flushcache(wd, AT_WAIT);
#endif
        /* XXXX Must wait for I/O to complete! */

        if (! (wd->sc_flags & WDF_KLABEL))
            wd->sc_flags &= ~WDF_LOADED;
    }

    mutex_exit(&wd->sc_dk.dk_openlock);

    return 0;
}
Exemple #6
0
int
edmcasize(dev_t dev)
{
    struct ed_softc *wd;
    int part, omask;
    int size;

    ATADEBUG_PRINT(("edsize\n"), DEBUG_FUNCS);

    wd = device_lookup_private(&ed_cd, DISKUNIT(dev));
    if (wd == NULL)
        return (-1);

    part = DISKPART(dev);
    omask = wd->sc_dk.dk_openmask & (1 << part);

    if (omask == 0 && edmcaopen(dev, 0, S_IFBLK, NULL) != 0)
        return (-1);
    if (wd->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
        size = -1;
    else
        size = wd->sc_dk.dk_label->d_partitions[part].p_size *
               (wd->sc_dk.dk_label->d_secsize / DEV_BSIZE);
    if (omask == 0 && edmcaclose(dev, 0, S_IFBLK, NULL) != 0)
        return (-1);
    return (size);
}
Exemple #7
0
int
ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags)
{
	struct ata_command ata_c;
	struct ata_channel *chp = drvp->chnl_softc;
	struct atac_softc *atac = chp->ch_atac;

	ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS);
	memset(&ata_c, 0, sizeof(struct ata_command));

	ata_c.r_command = SET_FEATURES;
	ata_c.r_st_bmask = 0;
	ata_c.r_st_pmask = 0;
	ata_c.r_features = WDSF_SET_MODE;
	ata_c.r_count = mode;
	ata_c.flags = flags;
	ata_c.timeout = 1000; /* 1s */
	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
						&ata_c) != ATACMD_COMPLETE)
		return CMD_AGAIN;
	if (ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
		return CMD_ERR;
	}
	return CMD_OK;
}
Exemple #8
0
static void
edgetdefaultlabel(struct ed_softc *ed, struct disklabel *lp)
{
    ATADEBUG_PRINT(("edgetdefaultlabel\n"), DEBUG_FUNCS);
    memset(lp, 0, sizeof(struct disklabel));

    lp->d_secsize = DEV_BSIZE;
    lp->d_ntracks = ed->heads;
    lp->d_nsectors = ed->sectors;
    lp->d_ncylinders = ed->cyl;
    lp->d_secpercyl = lp->d_ntracks * lp->d_nsectors;

    lp->d_type = DKTYPE_ESDI;

    strncpy(lp->d_typename, "ESDI", 16);
    strncpy(lp->d_packname, "fictitious", 16);
    lp->d_secperunit = ed->sc_capacity;
    lp->d_rpm = 3600;
    lp->d_interleave = 1;
    lp->d_flags = 0;

    lp->d_partitions[RAW_PART].p_offset = 0;
    lp->d_partitions[RAW_PART].p_size =
        lp->d_secperunit * (lp->d_secsize / DEV_BSIZE);
    lp->d_partitions[RAW_PART].p_fstype = FS_UNUSED;
    lp->d_npartitions = RAW_PART + 1;

    lp->d_magic = DISKMAGIC;
    lp->d_magic2 = DISKMAGIC;
    lp->d_checksum = dkcksum(lp);
}
Exemple #9
0
static int
wdc_atapi_get_params(struct scsipi_channel *chan, int drive,
    struct ataparams *id)
{
	struct wdc_softc *wdc = device_private(chan->chan_adapter->adapt_dev);
	struct atac_softc *atac = &wdc->sc_atac;
	struct wdc_regs *wdr = &wdc->regs[chan->chan_channel];
	struct ata_channel *chp = atac->atac_channels[chan->chan_channel];
	struct ata_command ata_c;

	memset(&ata_c, 0, sizeof(struct ata_command));
	ata_c.r_command = ATAPI_SOFT_RESET;
	ata_c.r_st_bmask = 0;
	ata_c.r_st_pmask = 0;
	ata_c.flags = AT_WAIT | AT_POLL;
	ata_c.timeout = WDC_RESET_WAIT;
	if (wdc_exec_command(&chp->ch_drive[drive], &ata_c) != ATACMD_COMPLETE) {
		printf("wdc_atapi_get_params: ATAPI_SOFT_RESET failed for"
		    " drive %s:%d:%d: driver failed\n",
		    device_xname(atac->atac_dev), chp->ch_channel, drive);
		panic("wdc_atapi_get_params");
	}
	if (ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
		ATADEBUG_PRINT(("wdc_atapi_get_params: ATAPI_SOFT_RESET "
		    "failed for drive %s:%d:%d: error 0x%x\n",
		    device_xname(atac->atac_dev), chp->ch_channel, drive,
		    ata_c.r_error), DEBUG_PROBE);
		return -1;
	}
	chp->ch_drive[drive].state = 0;

	(void)bus_space_read_1(wdr->cmd_iot, wdr->cmd_iohs[wd_status], 0);

	/* Some ATAPI devices need a bit more time after software reset. */
	delay(5000);
	if (ata_get_params(&chp->ch_drive[drive], AT_WAIT, id) != 0) {
		ATADEBUG_PRINT(("wdc_atapi_get_params: ATAPI_IDENTIFY_DEVICE "
		    "failed for drive %s:%d:%d: error 0x%x\n",
		    device_xname(atac->atac_dev), chp->ch_channel, drive,
		    ata_c.r_error), DEBUG_PROBE);
		return -1;
	}
	return 0;
}
Exemple #10
0
/*
 * ata_reset_channel:
 *
 *	Reset and ATA channel.
 *
 *	MUST BE CALLED AT splbio()!
 */
void
ata_reset_channel(struct ata_channel *chp, int flags)
{
	struct atac_softc *atac = chp->ch_atac;
	int drive;

#ifdef ATA_DEBUG
	int spl1, spl2;

	spl1 = splbio();
	spl2 = splbio();
	if (spl2 != spl1) {
		printf("ata_reset_channel: not at splbio()\n");
		panic("ata_reset_channel");
	}
	splx(spl2);
	splx(spl1);
#endif /* ATA_DEBUG */

	chp->ch_queue->queue_freeze++;

	/*
	 * If we can poll or wait it's OK, otherwise wake up the
	 * kernel thread to do it for us.
	 */
	ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n",
	    flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
	if ((flags & (AT_POLL | AT_WAIT)) == 0) {
		if (chp->ch_flags & ATACH_TH_RESET) {
			/* No need to schedule a reset more than one time. */
			chp->ch_queue->queue_freeze--;
			return;
		}
		chp->ch_flags |= ATACH_TH_RESET;
		chp->ch_reset_flags = flags & (AT_RST_EMERG | AT_RST_NOCMD);
		wakeup(&chp->ch_thread);
		return;
	}

	(*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);

	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
	for (drive = 0; drive < chp->ch_ndrives; drive++)
		chp->ch_drive[drive].state = 0;

	chp->ch_flags &= ~ATACH_TH_RESET;
	if ((flags & AT_RST_EMERG) == 0)  {
		chp->ch_queue->queue_freeze--;
		atastart(chp);
	} else {
		/* make sure that we can use polled commands */
		TAILQ_INIT(&chp->ch_queue->queue_xfer);
		chp->ch_queue->queue_freeze = 0;
		chp->ch_queue->active_xfer = NULL;
	}
}
Exemple #11
0
static void
sis_sata_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	int channel;

	if (pciide_chipen(sc, pa) == 0)
		return;

	if (interface == 0) {
		ATADEBUG_PRINT(("sis_sata_chip_map interface == 0\n"),
		    DEBUG_PROBE);
		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
	}

	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "Silicon Integrated Systems 180/96X SATA controller "
	    "(rev. 0x%02x)\n", PCI_REVISION(pa->pa_class));

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA | ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	sc->sc_wdcdev.sc_atac.atac_set_modes = sata_setup_channel;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
static void
acer_setup_channel(struct ata_channel *chp)
{
	struct ata_drive_datas *drvp;
	int drive, s;
	u_int32_t acer_fifo_udma;
	u_int32_t idedma_ctl;
	struct pciide_channel *cp = (struct pciide_channel*)chp;
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);

	idedma_ctl = 0;
	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
	ATADEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
	    acer_fifo_udma), DEBUG_PROBE);
	/* setup DMA if needed */
	pciide_channel_dma_setup(cp);

	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
	    ATA_DRIVE_UDMA) { /* check 80 pins cable */
		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
		    ACER_0x4A_80PIN(chp->ch_channel)) {
			if (chp->ch_drive[0].UDMA_mode > 2)
				chp->ch_drive[0].UDMA_mode = 2;
			if (chp->ch_drive[1].UDMA_mode > 2)
				chp->ch_drive[1].UDMA_mode = 2;
		}
	}

	for (drive = 0; drive < 2; drive++) {
		drvp = &chp->ch_drive[drive];
		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;
		ATADEBUG_PRINT(("acer_setup_channel: old timings reg for "
		    "channel %d drive %d 0x%x\n", chp->ch_channel, drive,
		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
		    ACER_IDETIM(chp->ch_channel, drive))), DEBUG_PROBE);
		/* clear FIFO/DMA mode */
		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->ch_channel, drive, 0x3) |
		    ACER_UDMA_EN(chp->ch_channel, drive) |
		    ACER_UDMA_TIM(chp->ch_channel, drive, 0x7));

		/* add timing values, setup DMA if needed */
		if ((drvp->drive_flags & ATA_DRIVE_DMA) == 0 &&
		    (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) {
			acer_fifo_udma |=
			    ACER_FTH_OPL(chp->ch_channel, drive, 0x1);
			goto pio;
		}

		acer_fifo_udma |= ACER_FTH_OPL(chp->ch_channel, drive, 0x2);
		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
			/* use Ultra/DMA */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_DMA;
			splx(s);
			acer_fifo_udma |= ACER_UDMA_EN(chp->ch_channel, drive);
			acer_fifo_udma |=
			    ACER_UDMA_TIM(chp->ch_channel, drive,
				acer_udma[drvp->UDMA_mode]);
			/* XXX disable if one drive < UDMA3 ? */
			if (drvp->UDMA_mode >= 3) {
				pciide_pci_write(sc->sc_pc, sc->sc_tag,
				    ACER_0x4B,
				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
					ACER_0x4B) | ACER_0x4B_UDMA66);
			}
		} else {
			/*
			 * use Multiword DMA
			 * Timings will be used for both PIO and DMA,
			 * so adjust DMA mode if needed
			 */
			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
				drvp->PIO_mode = drvp->DMA_mode + 2;
			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
				    drvp->PIO_mode - 2 : 0;
			if (drvp->DMA_mode == 0)
				drvp->PIO_mode = 0;
		}
		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
		    ACER_IDETIM(chp->ch_channel, drive),
		    acer_pio[drvp->PIO_mode]);
	}
	ATADEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
	    acer_fifo_udma), DEBUG_PROBE);
	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
		    idedma_ctl);
	}
}
Exemple #13
0
static void
piix_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	u_int32_t idetim;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		/* Do all revisions require DMA alignment workaround? */
		sc->sc_wdcdev.dma_init = piix_dma_init;
		switch(sc->sc_pp->ide_product) {
		case PCI_PRODUCT_INTEL_82371AB_IDE:
		case PCI_PRODUCT_INTEL_82440MX_IDE:
		case PCI_PRODUCT_INTEL_82801AA_IDE:
		case PCI_PRODUCT_INTEL_82801AB_IDE:
		case PCI_PRODUCT_INTEL_82801BA_IDE:
		case PCI_PRODUCT_INTEL_82801BAM_IDE:
		case PCI_PRODUCT_INTEL_82801CA_IDE_1:
		case PCI_PRODUCT_INTEL_82801CA_IDE_2:
		case PCI_PRODUCT_INTEL_82801DB_IDE:
		case PCI_PRODUCT_INTEL_82801DBM_IDE:
		case PCI_PRODUCT_INTEL_82801EB_IDE:
		case PCI_PRODUCT_INTEL_6300ESB_IDE:
		case PCI_PRODUCT_INTEL_82801FB_IDE:
		case PCI_PRODUCT_INTEL_82801G_IDE:
		case PCI_PRODUCT_INTEL_82801HBM_IDE:
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
		}
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	switch(sc->sc_pp->ide_product) {
	case PCI_PRODUCT_INTEL_82801AA_IDE:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
		break;
	case PCI_PRODUCT_INTEL_82801BA_IDE:
	case PCI_PRODUCT_INTEL_82801BAM_IDE:
	case PCI_PRODUCT_INTEL_82801CA_IDE_1:
	case PCI_PRODUCT_INTEL_82801CA_IDE_2:
	case PCI_PRODUCT_INTEL_82801DB_IDE:
	case PCI_PRODUCT_INTEL_82801DBM_IDE:
	case PCI_PRODUCT_INTEL_82801EB_IDE:
	case PCI_PRODUCT_INTEL_6300ESB_IDE:
	case PCI_PRODUCT_INTEL_82801FB_IDE:
	case PCI_PRODUCT_INTEL_82801G_IDE:
	case PCI_PRODUCT_INTEL_82801HBM_IDE:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
		break;
	default:
		sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
	}
	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
		sc->sc_wdcdev.sc_atac.atac_set_modes = piix_setup_channel;
	else
		sc->sc_wdcdev.sc_atac.atac_set_modes = piix3_4_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	ATADEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
	    DEBUG_PROBE);
	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
		ATADEBUG_PRINT((", sidetim=0x%x",
		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
		    DEBUG_PROBE);
		if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
			ATADEBUG_PRINT((", udamreg 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
			    DEBUG_PROBE);
		}
		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801G_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) {
			ATADEBUG_PRINT((", IDE_CONTROL 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
			    DEBUG_PROBE);
		}

	}
	ATADEBUG_PRINT(("\n"), DEBUG_PROBE);

	wdc_allocate_regs(&sc->sc_wdcdev);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;
		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
		if ((PIIX_IDETIM_READ(idetim, channel) &
		    PIIX_IDETIM_IDE) == 0) {
#if 1
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
#else
			pcireg_t interface;

			idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
			    channel);
			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
			    idetim);
			interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc,
			    sc->sc_tag, PCI_CLASS_REG));
			aprint_normal("channel %d idetim=%08x interface=%02x\n",
			    channel, idetim, interface);
#endif
		}
		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}

	ATADEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
	    DEBUG_PROBE);
	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
		ATADEBUG_PRINT((", sidetim=0x%x",
		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
		    DEBUG_PROBE);
		if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
			ATADEBUG_PRINT((", udamreg 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
			    DEBUG_PROBE);
		}
		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_1 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE_2 ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801G_IDE ||
		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) {
			ATADEBUG_PRINT((", IDE_CONTROL 0x%x",
			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
			    DEBUG_PROBE);
		}
	}
	ATADEBUG_PRINT(("\n"), DEBUG_PROBE);
}
Exemple #14
0
static void
wdc_atapi_phase_complete(struct ata_xfer *xfer)
{
	struct ata_channel *chp = xfer->c_chp;
	struct atac_softc *atac = chp->ch_atac;
#if NATA_DMA || NATA_PIOBM
	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
#endif
	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];

	/* wait for DSC if needed */
	if (drvp->drive_flags & ATA_DRIVE_ATAPIDSCW) {
		ATADEBUG_PRINT(("wdc_atapi_phase_complete(%s:%d:%d) "
		    "polldsc %d\n", device_xname(atac->atac_dev),
		    chp->ch_channel,
		    xfer->c_drive, xfer->c_dscpoll), DEBUG_XFERS);
#if 1
		if (cold)
			panic("wdc_atapi_phase_complete: cold");
#endif
		if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10,
		    AT_POLL) == WDCWAIT_TOUT) {
			/* 10ms not enough, try again in 1 tick */
			if (xfer->c_dscpoll++ >
			    mstohz(sc_xfer->timeout)) {
				printf("%s:%d:%d: wait_for_dsc "
				    "failed\n",
				    device_xname(atac->atac_dev),
				    chp->ch_channel, xfer->c_drive);
				sc_xfer->error = XS_TIMEOUT;
				wdc_atapi_reset(chp, xfer);
				return;
			} else
				callout_reset(&chp->ch_callout, 1,
				    wdc_atapi_polldsc, xfer);
			return;
		}
	}

	/*
	 * Some drive occasionally set WDCS_ERR with
	 * "ATA illegal length indication" in the error
	 * register. If we read some data the sense is valid
	 * anyway, so don't report the error.
	 */
	if (chp->ch_status & WDCS_ERR &&
	    ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 ||
	    sc_xfer->resid == sc_xfer->datalen)) {
		/* save the short sense */
		sc_xfer->error = XS_SHORTSENSE;
		sc_xfer->sense.atapi_sense = chp->ch_error;
		if ((sc_xfer->xs_periph->periph_quirks &
		    PQUIRK_NOSENSE) == 0) {
			/* ask scsipi to send a REQUEST_SENSE */
			sc_xfer->error = XS_BUSY;
			sc_xfer->status = SCSI_CHECK;
		}
#if NATA_DMA || NATA_PIOBM
		else if (wdc->dma_status &
		    (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) {
#if NATA_DMA
			ata_dmaerr(drvp,
			    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
#endif
			sc_xfer->error = XS_RESET;
			wdc_atapi_reset(chp, xfer);
			return;
		}
#endif
	}
	if (xfer->c_bcount != 0) {
		ATADEBUG_PRINT(("wdc_atapi_intr: bcount value is "
		    "%d after io\n", xfer->c_bcount), DEBUG_XFERS);
	}
#ifdef DIAGNOSTIC
	if (xfer->c_bcount < 0) {
		printf("wdc_atapi_intr warning: bcount value "
		    "is %d after io\n", xfer->c_bcount);
	}
#endif
	ATADEBUG_PRINT(("wdc_atapi_phase_complete: wdc_atapi_done(), "
	    "error 0x%x sense 0x%x\n", sc_xfer->error,
	    sc_xfer->sense.atapi_sense), DEBUG_INTR);
	wdc_atapi_done(chp, xfer);
}
Exemple #15
0
/*
 * Start I/O on a controller, for the given channel.
 * The first xfer may be not for our channel if the channel queues
 * are shared.
 *
 * MUST BE CALLED AT splbio()!
 */
void
atastart(struct ata_channel *chp)
{
	struct atac_softc *atac = chp->ch_atac;
	struct ata_xfer *xfer;

#ifdef ATA_DEBUG
	int spl1, spl2;

	spl1 = splbio();
	spl2 = splbio();
	if (spl2 != spl1) {
		printf("atastart: not at splbio()\n");
		panic("atastart");
	}
	splx(spl2);
	splx(spl1);
#endif /* ATA_DEBUG */

	/* is there a xfer ? */
	if ((xfer = TAILQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL)
		return;

	/* adjust chp, in case we have a shared queue */
	chp = xfer->c_chp;

	if (chp->ch_queue->active_xfer != NULL) {
		return; /* channel aleady active */
	}
	if (__predict_false(chp->ch_queue->queue_freeze > 0)) {
		if (chp->ch_queue->queue_flags & QF_IDLE_WAIT) {
			chp->ch_queue->queue_flags &= ~QF_IDLE_WAIT;
			wakeup(&chp->ch_queue->queue_flags);
		}
		return; /* queue frozen */
	}
	/*
	 * if someone is waiting for the command to be active, wake it up
	 * and let it process the command
	 */
	if (xfer->c_flags & C_WAITACT) {
		ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d "
		    "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
		    DEBUG_XFERS);
		wakeup(xfer);
		return;
	}
#ifdef DIAGNOSTIC
	if ((chp->ch_flags & ATACH_IRQ_WAIT) != 0)
		panic("atastart: channel waiting for irq");
#endif
	if (atac->atac_claim_hw)
		if (!(*atac->atac_claim_hw)(chp, 0))
			return;

	ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d\n", xfer,
	    chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
	if (chp->ch_drive[xfer->c_drive].drive_flags & ATA_DRIVE_RESET) {
		chp->ch_drive[xfer->c_drive].drive_flags &= ~ATA_DRIVE_RESET;
		chp->ch_drive[xfer->c_drive].state = 0;
	}
	chp->ch_queue->active_xfer = xfer;
	TAILQ_REMOVE(&chp->ch_queue->queue_xfer, xfer, c_xferchain);

	if (atac->atac_cap & ATAC_CAP_NOIRQ)
		KASSERT(xfer->c_flags & C_POLL);

	xfer->c_start(chp, xfer);
}
Exemple #16
0
/* Get the disk's parameters */
int
ata_get_params(struct ata_drive_datas *drvp, uint8_t flags,
    struct ataparams *prms)
{
	struct ata_command ata_c;
	struct ata_channel *chp = drvp->chnl_softc;
	struct atac_softc *atac = chp->ch_atac;
	char *tb;
	int i, rv;
	uint16_t *p;

	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);

	tb = kmem_zalloc(DEV_BSIZE, KM_SLEEP);
	memset(prms, 0, sizeof(struct ataparams));
	memset(&ata_c, 0, sizeof(struct ata_command));

	if (drvp->drive_type == ATA_DRIVET_ATA) {
		ata_c.r_command = WDCC_IDENTIFY;
		ata_c.r_st_bmask = WDCS_DRDY;
		ata_c.r_st_pmask = WDCS_DRQ;
		ata_c.timeout = 3000; /* 3s */
	} else if (drvp->drive_type == ATA_DRIVET_ATAPI) {
		ata_c.r_command = ATAPI_IDENTIFY_DEVICE;
		ata_c.r_st_bmask = 0;
		ata_c.r_st_pmask = WDCS_DRQ;
		ata_c.timeout = 10000; /* 10s */
	} else {
		ATADEBUG_PRINT(("ata_get_parms: no disks\n"),
		    DEBUG_FUNCS|DEBUG_PROBE);
		rv = CMD_ERR;
		goto out;
	}
	ata_c.flags = AT_READ | flags;
	ata_c.data = tb;
	ata_c.bcount = DEV_BSIZE;
	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
						&ata_c) != ATACMD_COMPLETE) {
		ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"),
		    DEBUG_FUNCS|DEBUG_PROBE);
		rv = CMD_AGAIN;
		goto out;
	}
	if (ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
		ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n",
		    ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE);
		rv = CMD_ERR;
		goto out;
	}
	/* if we didn't read any data something is wrong */
	if ((ata_c.flags & AT_XFDONE) == 0) {
		rv = CMD_ERR;
		goto out;
	}

	/* Read in parameter block. */
	memcpy(prms, tb, sizeof(struct ataparams));

	/*
	 * Shuffle string byte order.
	 * ATAPI NEC, Mitsumi and Pioneer drives and
	 * old ATA TDK CompactFlash cards
	 * have different byte order.
	 */
#if BYTE_ORDER == BIG_ENDIAN
# define M(n)	prms->atap_model[(n) ^ 1]
#else
# define M(n)	prms->atap_model[n]
#endif
	if (
#if BYTE_ORDER == BIG_ENDIAN
	    !
#endif
	    ((drvp->drive_type == ATA_DRIVET_ATAPI) ?
	     ((M(0) == 'N' && M(1) == 'E') ||
	      (M(0) == 'F' && M(1) == 'X') ||
	      (M(0) == 'P' && M(1) == 'i')) :
	     ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) {
		rv = CMD_OK;
		goto out;
	     }
#undef M
	for (i = 0; i < sizeof(prms->atap_model); i += 2) {
		p = (uint16_t *)(prms->atap_model + i);
		*p = bswap16(*p);
	}
	for (i = 0; i < sizeof(prms->atap_serial); i += 2) {
		p = (uint16_t *)(prms->atap_serial + i);
		*p = bswap16(*p);
	}
	for (i = 0; i < sizeof(prms->atap_revision); i += 2) {
		p = (uint16_t *)(prms->atap_revision + i);
		*p = bswap16(*p);
	}

	rv = CMD_OK;
 out:
	kmem_free(tb, DEV_BSIZE);
	return rv;
}
Exemple #17
0
/*
 * Read/write routine for a buffer.  Validates the arguments and schedules the
 * transfer.  Does not wait for the transfer to complete.
 */
void
edmcastrategy(struct buf *bp)
{
    struct ed_softc *ed;
    struct disklabel *lp;
    daddr_t blkno;

    ed = device_lookup_private(&ed_cd, DISKUNIT(bp->b_dev));
    lp = ed->sc_dk.dk_label;

    ATADEBUG_PRINT(("edmcastrategy (%s)\n", device_xname(ed->sc_dev)),
                   DEBUG_XFERS);

    /* Valid request?  */
    if (bp->b_blkno < 0 ||
            (bp->b_bcount % lp->d_secsize) != 0 ||
            (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) {
        bp->b_error = EINVAL;
        goto done;
    }

    /* If device invalidated (e.g. media change, door open), error. */
    if ((ed->sc_flags & WDF_LOADED) == 0) {
        bp->b_error = EIO;
        goto done;
    }

    /* If it's a null transfer, return immediately. */
    if (bp->b_bcount == 0)
        goto done;

    /*
     * Do bounds checking, adjust transfer. if error, process.
     * If end of partition, just return.
     */
    if (DISKPART(bp->b_dev) != RAW_PART &&
            bounds_check_with_label(&ed->sc_dk, bp,
                                    (ed->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0)
        goto done;

    /*
     * Now convert the block number to absolute and put it in
     * terms of the device's logical block size.
     */
    if (lp->d_secsize >= DEV_BSIZE)
        blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
    else
        blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize);

    if (DISKPART(bp->b_dev) != RAW_PART)
        blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;

    bp->b_rawblkno = blkno;

    /* Queue transfer on drive, activate drive and controller if idle. */
    mutex_enter(&ed->sc_q_lock);
    bufq_put(ed->sc_q, bp);
    mutex_exit(&ed->sc_q_lock);

    /* Ring the worker thread */
    wakeup(ed->edc_softc);

    return;
done:
    /* Toss transfer; we're done early. */
    bp->b_resid = bp->b_bcount;
    biodone(bp);
}
Exemple #18
0
static void
atabusconfig(struct atabus_softc *atabus_sc)
{
	struct ata_channel *chp = atabus_sc->sc_chan;
	struct atac_softc *atac = chp->ch_atac;
	struct atabus_initq *atabus_initq = NULL;
	int i, s, error;

	/* we are in the atabus's thread context */
	s = splbio();
	chp->ch_flags |= ATACH_TH_RUN;
	splx(s);

	/*
	 * Probe for the drives attached to controller, unless a PMP
	 * is already known
	 */
	/* XXX for SATA devices we will power up all drives at once */
	if (chp->ch_satapmp_nports == 0)
		(*atac->atac_probe)(chp);

	if (chp->ch_ndrives >= 2) {
		ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n",
		    chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
		    DEBUG_PROBE);
	}

	/* next operations will occurs in a separate thread */
	s = splbio();
	chp->ch_flags &= ~ATACH_TH_RUN;
	splx(s);

	/* Make sure the devices probe in atabus order to avoid jitter. */
	mutex_enter(&atabus_qlock);
	for (;;) {
		atabus_initq = TAILQ_FIRST(&atabus_initq_head);
		if (atabus_initq->atabus_sc == atabus_sc)
			break;
		cv_wait(&atabus_qcv, &atabus_qlock);
	}
	mutex_exit(&atabus_qlock);

	/* If no drives, abort here */
	if (chp->ch_drive == NULL)
		goto out;
	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
	for (i = 0; i < chp->ch_ndrives; i++)
		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
			break;
	if (i == chp->ch_ndrives)
		goto out;

	/* Shortcut in case we've been shutdown */
	if (chp->ch_flags & ATACH_SHUTDOWN)
		goto out;

	if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread,
	    atabus_sc, &atabus_cfg_lwp,
	    "%scnf", device_xname(atac->atac_dev))) != 0)
		aprint_error_dev(atac->atac_dev,
		    "unable to create config thread: error %d\n", error);
	return;

 out:
	mutex_enter(&atabus_qlock);
	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
	cv_broadcast(&atabus_qcv);
	mutex_exit(&atabus_qlock);

	free(atabus_initq, M_DEVBUF);

	ata_delref(chp);

	config_pending_decr(atac->atac_dev);
}
Exemple #19
0
int
edmcaioctl(dev_t dev, u_long xfer, void *addr, int flag, struct lwp *l)
{
    struct ed_softc *ed = device_lookup_private(&ed_cd, DISKUNIT(dev));
    int error;

    ATADEBUG_PRINT(("edioctl\n"), DEBUG_FUNCS);

    if ((ed->sc_flags & WDF_LOADED) == 0)
        return EIO;

    error = disk_ioctl(&ed->sc_dk, dev, xfer, addr, flag, l);
    if (error != EPASSTHROUGH)
        return error;

    switch (xfer) {
    case DIOCWDINFO:
    case DIOCSDINFO:
    {
        struct disklabel *lp;

        lp = (struct disklabel *)addr;

        if ((flag & FWRITE) == 0)
            return EBADF;

        mutex_enter(&ed->sc_dk.dk_openlock);
        ed->sc_flags |= WDF_LABELLING;

        error = setdisklabel(ed->sc_dk.dk_label,
                             lp, /*wd->sc_dk.dk_openmask : */0,
                             ed->sc_dk.dk_cpulabel);
        if (error == 0) {
#if 0
            if (wd->drvp->state > RECAL)
                wd->drvp->drive_flags |= ATA_DRIVE_RESET;
#endif
            if (xfer == DIOCWDINFO)
                error = writedisklabel(EDLABELDEV(dev),
                                       edmcastrategy, ed->sc_dk.dk_label,
                                       ed->sc_dk.dk_cpulabel);
        }

        ed->sc_flags &= ~WDF_LABELLING;
        mutex_exit(&ed->sc_dk.dk_openlock);
        return (error);
    }

    case DIOCKLABEL:
        if (*(int *)addr)
            ed->sc_flags |= WDF_KLABEL;
        else
            ed->sc_flags &= ~WDF_KLABEL;
        return 0;

    case DIOCWLABEL:
        if ((flag & FWRITE) == 0)
            return EBADF;
        if (*(int *)addr)
            ed->sc_flags |= WDF_WLABEL;
        else
            ed->sc_flags &= ~WDF_WLABEL;
        return 0;

    case DIOCGDEFLABEL:
        edgetdefaultlabel(ed, (struct disklabel *)addr);
        return 0;

#if 0
    case DIOCWFORMAT:
        if ((flag & FWRITE) == 0)
            return EBADF;
        {
            register struct format_op *fop;
            struct iovec aiov;
            struct uio auio;

            fop = (struct format_op *)addr;
            aiov.iov_base = fop->df_buf;
            aiov.iov_len = fop->df_count;
            auio.uio_iov = &aiov;
            auio.uio_iovcnt = 1;
            auio.uio_resid = fop->df_count;
            auio.uio_segflg = 0;
            auio.uio_offset =
                fop->df_startblk * wd->sc_dk.dk_label->d_secsize;
            auio.uio_lwp = l;
            error = physio(wdformat, NULL, dev, B_WRITE, minphys,
                           &auio);
            fop->df_count -= auio.uio_resid;
            fop->df_reg[0] = wdc->sc_status;
            fop->df_reg[1] = wdc->sc_error;
            return error;
        }
#endif

    default:
        return ENOTTY;
    }

#ifdef DIAGNOSTIC
    panic("edioctl: impossible");
#endif
}
static void
ite_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	int channel;
	pcireg_t interface;
	pcireg_t cfg, modectl;

	/* fake interface since IT8212 claims to be a RAID device */
	interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
	    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);

	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK,
	    modectl & IT_MODE_MASK), DEBUG_PROBE);

	if (pciide_chipen(sc, pa) == 0)
		return;

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");

	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;

	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA | ATAC_CAP_UDMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;

	sc->sc_wdcdev.sc_atac.atac_set_modes = ite_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	wdc_allocate_regs(&sc->sc_wdcdev);

	/* Disable RAID */
	modectl &= ~IT_MODE_RAID1;
	/* Disable CPU firmware mode */
	modectl &= ~IT_MODE_CPU;

	pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl);

	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) {
		cp = &sc->pciide_channels[channel];

		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		pciide_mapchan(pa, cp, interface, pciide_pci_intr);
	}
	/* Re-read configuration registers after channels setup */
	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	ATADEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), cfg & IT_CFG_MASK,
	    modectl & IT_MODE_MASK), DEBUG_PROBE);
}
Exemple #21
0
int
edmcaread(dev_t dev, struct uio *uio, int flags)
{
    ATADEBUG_PRINT(("edread\n"), DEBUG_XFERS);
    return (physio(edmcastrategy, NULL, dev, B_READ, minphys, uio));
}
static void
via_setup_channel(struct ata_channel *chp)
{
	u_int32_t udmatim_reg, datatim_reg;
	u_int8_t idedma_ctl;
	int mode, drive, s;
	struct ata_drive_datas *drvp;
	struct atac_softc *atac = chp->ch_atac;
	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
#ifndef PCIIDE_AMD756_ENABLEDMA
	int rev = PCI_REVISION(
	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
#endif

	idedma_ctl = 0;
	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc));
	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc));
	datatim_reg &= ~APO_DATATIM_MASK(chp->ch_channel);
	udmatim_reg &= ~APO_UDMA_MASK(chp->ch_channel);

	/* setup DMA if needed */
	pciide_channel_dma_setup(cp);

	for (drive = 0; drive < 2; drive++) {
		drvp = &chp->ch_drive[drive];
		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;
		/* add timing values, setup DMA if needed */
		if (((drvp->drive_flags & ATA_DRIVE_DMA) == 0 &&
		    (drvp->drive_flags & ATA_DRIVE_UDMA) == 0)) {
			mode = drvp->PIO_mode;
			goto pio;
		}
		if ((atac->atac_cap & ATAC_CAP_UDMA) &&
		    (drvp->drive_flags & ATA_DRIVE_UDMA)) {
			/* use Ultra/DMA */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_DMA;
			splx(s);
			udmatim_reg |= APO_UDMA_EN(chp->ch_channel, drive) |
			    APO_UDMA_EN_MTH(chp->ch_channel, drive);
			switch (PCI_VENDOR(sc->sc_pci_id)) {
			case PCI_VENDOR_VIATECH:
				if (sc->sc_wdcdev.sc_atac.atac_udma_cap == 6) {
					/* 8233a */
					udmatim_reg |= APO_UDMA_TIME(
					    chp->ch_channel,
					    drive,
					    via_udma133_tim[drvp->UDMA_mode]);
				} else if (sc->sc_wdcdev.sc_atac.atac_udma_cap == 5) {
					/* 686b */
					udmatim_reg |= APO_UDMA_TIME(
					    chp->ch_channel,
					    drive,
					    via_udma100_tim[drvp->UDMA_mode]);
				} else if (sc->sc_wdcdev.sc_atac.atac_udma_cap == 4) {
					/* 596b or 686a */
					udmatim_reg |= APO_UDMA_CLK66(
					    chp->ch_channel);
					udmatim_reg |= APO_UDMA_TIME(
					    chp->ch_channel,
					    drive,
					    via_udma66_tim[drvp->UDMA_mode]);
				} else {
					/* 596a or 586b */
					udmatim_reg |= APO_UDMA_TIME(
					    chp->ch_channel,
					    drive,
					    via_udma33_tim[drvp->UDMA_mode]);
				}
				break;
			case PCI_VENDOR_AMD:
			case PCI_VENDOR_NVIDIA:
				udmatim_reg |= APO_UDMA_TIME(chp->ch_channel,
				    drive, amd7x6_udma_tim[drvp->UDMA_mode]);
				 break;
			}
			/* can use PIO timings, MW DMA unused */
			mode = drvp->PIO_mode;
		} else {
			/* use Multiword DMA, but only if revision is OK */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
			splx(s);
#ifndef PCIIDE_AMD756_ENABLEDMA
			/*
			 * The workaround doesn't seem to be necessary
			 * with all drives, so it can be disabled by
			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
			 * triggered.
			 */
			if (PCI_VENDOR(sc->sc_pci_id) == PCI_VENDOR_AMD &&
			    sc->sc_pp->ide_product ==
			    PCI_PRODUCT_AMD_PBC756_IDE &&
			    AMD756_CHIPREV_DISABLEDMA(rev)) {
				aprint_normal(
				    "%s:%d:%d: multi-word DMA disabled due "
				    "to chip revision\n",
				    device_xname(
				      sc->sc_wdcdev.sc_atac.atac_dev),
				    chp->ch_channel, drive);
				mode = drvp->PIO_mode;
				s = splbio();
				drvp->drive_flags &= ~ATA_DRIVE_DMA;
				splx(s);
				goto pio;
			}
#endif
			/* mode = min(pio, dma+2) */
			if (drvp->PIO_mode <= (drvp->DMA_mode + 2))
				mode = drvp->PIO_mode;
			else
				mode = drvp->DMA_mode + 2;
		}
		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);

pio:		/* setup PIO mode */
		if (mode <= 2) {
			drvp->DMA_mode = 0;
			drvp->PIO_mode = 0;
			mode = 0;
		} else {
			drvp->PIO_mode = mode;
			drvp->DMA_mode = mode - 2;
		}
		datatim_reg |=
		    APO_DATATIM_PULSE(chp->ch_channel, drive,
			apollo_pio_set[mode]) |
		    APO_DATATIM_RECOV(chp->ch_channel, drive,
			apollo_pio_rec[mode]);
	}
	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
		    idedma_ctl);
	}
	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc), datatim_reg);
	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA(sc), udmatim_reg);
	ATADEBUG_PRINT(("via_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))), DEBUG_PROBE);
}
Exemple #23
0
static void
sis96x_setup_channel(struct ata_channel *chp)
{
	struct ata_drive_datas *drvp;
	int drive, s;
	u_int32_t sis_tim;
	u_int32_t idedma_ctl;
	int regtim;
	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);

	sis_tim = 0;
	idedma_ctl = 0;
	/* setup DMA if needed */
	pciide_channel_dma_setup(cp);

	for (drive = 0; drive < 2; drive++) {
		regtim = SIS_TIM133(
		    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57),
		    chp->ch_channel, drive);
		drvp = &chp->ch_drive[drive];
		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;
		/* add timing values, setup DMA if needed */
		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
			/* use Ultra/DMA */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_DMA;
			splx(s);
			if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
			    SIS96x_REG_CBL(chp->ch_channel)) & SIS96x_REG_CBL_33) {
				if (drvp->UDMA_mode > 2)
					drvp->UDMA_mode = 2;
			}
			sis_tim |= sis_udma133new_tim[drvp->UDMA_mode];
			sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
		} else if (drvp->drive_flags & ATA_DRIVE_DMA) {
			/*
			 * use Multiword DMA
			 * Timings will be used for both PIO and DMA,
			 * so adjust DMA mode if needed
			 */
			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
				drvp->PIO_mode = drvp->DMA_mode + 2;
			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
				    drvp->PIO_mode - 2 : 0;
			sis_tim |= sis_dma133new_tim[drvp->DMA_mode];
			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
		} else {
			sis_tim |= sis_pio133new_tim[drvp->PIO_mode];
		}
		ATADEBUG_PRINT(("sis96x_setup_channel: new timings reg for "
		    "channel %d drive %d: 0x%x (reg 0x%x)\n",
		    chp->ch_channel, drive, sis_tim, regtim), DEBUG_PROBE);
		pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim);
	}
	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
		    idedma_ctl);
	}
}
Exemple #24
0
int
edmcawrite(dev_t dev, struct uio *uio, int flags)
{
    ATADEBUG_PRINT(("edwrite\n"), DEBUG_XFERS);
    return (physio(edmcastrategy, NULL, dev, B_WRITE, minphys, uio));
}
Exemple #25
0
static void
cy693_setup_channel(struct ata_channel *chp)
{
    struct ata_drive_datas *drvp;
    int drive;
    u_int32_t cy_cmd_ctrl;
    u_int32_t idedma_ctl;
    struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
    struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
    int dma_mode = -1;

    ATADEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
                    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);

    cy_cmd_ctrl = idedma_ctl = 0;

    /* setup DMA if needed */
    pciide_channel_dma_setup(cp);

    for (drive = 0; drive < 2; drive++) {
        drvp = &chp->ch_drive[drive];
        /* If no drive, skip */
        if (drvp->drive_type == ATA_DRIVET_NONE)
            continue;
        /* add timing values, setup DMA if needed */
        if (drvp->drive_flags & ATA_DRIVE_DMA) {
            idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
            /* use Multiword DMA */
            if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
                dma_mode = drvp->DMA_mode;
        }
        cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
                        CY_CMD_CTRL_IOW_PULSE_OFF(drive));
        cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
                        CY_CMD_CTRL_IOW_REC_OFF(drive));
        cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
                        CY_CMD_CTRL_IOR_PULSE_OFF(drive));
        cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
                        CY_CMD_CTRL_IOR_REC_OFF(drive));
    }
    pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
    chp->ch_drive[0].DMA_mode = dma_mode;
    chp->ch_drive[1].DMA_mode = dma_mode;

    if (dma_mode == -1)
        dma_mode = 0;

    if (sc->sc_cy_handle != NULL) {
        /* Note: `multiple' is implied. */
        cy82c693_write(sc->sc_cy_handle,
                       (sc->sc_cy_compatchan == 0) ?
                       CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
    }

    if (idedma_ctl != 0) {
        /* Add software bits in status register */
        bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
                          idedma_ctl);
    }
    ATADEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
                    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
}
Exemple #26
0
/*
 * atabus_detach:
 *
 *	Autoconfiguration detach routine.
 */
static int
atabus_detach(device_t self, int flags)
{
	struct atabus_softc *sc = device_private(self);
	struct ata_channel *chp = sc->sc_chan;
	device_t dev = NULL;
	int s, i, error = 0;

	/* Shutdown the channel. */
	s = splbio();		/* XXX ALSO NEED AN INTERLOCK HERE. */
	chp->ch_flags |= ATACH_SHUTDOWN;
	splx(s);

	wakeup(&chp->ch_thread);

	while (chp->ch_thread != NULL)
		(void) tsleep(&chp->ch_flags, PRIBIO, "atadown", 0);


	/*
	 * Detach atapibus and its children.
	 */
	if ((dev = chp->atapibus) != NULL) {
		ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n",
		    device_xname(self), device_xname(dev)), DEBUG_DETACH);

		error = config_detach(dev, flags);
		if (error)
			goto out;
		KASSERT(chp->atapibus == NULL);
	}

	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);

	/*
	 * Detach our other children.
	 */
	for (i = 0; i < chp->ch_ndrives; i++) {
		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
			continue;
		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
		if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
			ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__,
			    __LINE__, device_xname(self), device_xname(dev)),
			    DEBUG_DETACH);
			error = config_detach(dev, flags);
			if (error)
				goto out;
			KASSERT(chp->ch_drive[i].drv_softc == NULL);
			KASSERT(chp->ch_drive[i].drive_type == 0);
		}
	}
	atabus_free_drives(chp);

 out:
#ifdef ATADEBUG
	if (dev != NULL && error != 0)
		ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__,
		    device_xname(self), error, device_xname(dev)),
		    DEBUG_DETACH);
#endif /* ATADEBUG */

	return (error);
}
static void
via_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
{
	struct pciide_channel *cp;
	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
	pcireg_t vendor = PCI_VENDOR(pa->pa_id);
	int channel;
	u_int32_t ideconf;
	pcireg_t pcib_id, pcib_class;
	struct pci_attach_args pcib_pa;

	if (pciide_chipen(sc, pa) == 0)
		return;

	switch (vendor) {
	case PCI_VENDOR_VIATECH:
		switch (PCI_PRODUCT(pa->pa_id)) {
		case PCI_PRODUCT_VIATECH_VT6410_RAID:
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies VT6410 IDE controller\n");
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
			    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
			break;
		case PCI_PRODUCT_VIATECH_VX900_IDE:
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies VX900 ATA133 controller\n");
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		default:
			/*
			 * get a PCI tag for the ISA bridge.
			 */
			if (pci_find_device(&pcib_pa, via_pcib_match) == 0)
				goto unknown;
			pcib_id = pcib_pa.pa_id;
			pcib_class = pcib_pa.pa_class;
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "VIA Technologies ");
			switch (PCI_PRODUCT(pcib_id)) {
			case PCI_PRODUCT_VIATECH_VT82C586_ISA:
				aprint_normal("VT82C586 (Apollo VP) ");
				if(PCI_REVISION(pcib_class) >= 0x02) {
					aprint_normal("ATA33 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
				} else {
					aprint_normal("controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT82C596A:
				aprint_normal("VT82C596A (Apollo Pro) ");
				if (PCI_REVISION(pcib_class) >= 0x12) {
					aprint_normal("ATA66 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
				} else {
					aprint_normal("ATA33 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
				aprint_normal("VT82C686A (Apollo KX133) ");
				if (PCI_REVISION(pcib_class) >= 0x40) {
					aprint_normal("ATA100 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				} else {
					aprint_normal("ATA66 controller\n");
					sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
				}
				break;
			case PCI_PRODUCT_VIATECH_VT8231:
				aprint_normal("VT8231 ATA100 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				break;
			case PCI_PRODUCT_VIATECH_VT8233:
				aprint_normal("VT8233 ATA100 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
				break;
			case PCI_PRODUCT_VIATECH_VT8233A:
				aprint_normal("VT8233A ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8235:
				aprint_normal("VT8235 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8237:
				aprint_normal("VT8237 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8237A_ISA:
				aprint_normal("VT8237A ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_CX700:
				aprint_normal("CX700 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			case PCI_PRODUCT_VIATECH_VT8251:
				aprint_normal("VT8251 ATA133 controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
				break;
			default:
		unknown:
				aprint_normal("unknown VIA ATA controller\n");
				sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
			}
			break;
		}
		sc->sc_apo_regbase = APO_VIA_REGBASE;
		break;
	case PCI_VENDOR_AMD:
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_AMD_PBC8111_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		case PCI_PRODUCT_AMD_CS5536_IDE:
		case PCI_PRODUCT_AMD_PBC766_IDE:
		case PCI_PRODUCT_AMD_PBC768_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			break;
		default:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
		}
		sc->sc_apo_regbase = APO_AMD_REGBASE;
		break;
	case PCI_VENDOR_NVIDIA:
		switch (sc->sc_pp->ide_product) {
		case PCI_PRODUCT_NVIDIA_NFORCE_ATA100:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
			break;
		case PCI_PRODUCT_NVIDIA_NFORCE2_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE2_400_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE3_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE3_250_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE4_ATA133:
		case PCI_PRODUCT_NVIDIA_NFORCE430_ATA133:
		case PCI_PRODUCT_NVIDIA_MCP04_IDE:
		case PCI_PRODUCT_NVIDIA_MCP55_IDE:
		case PCI_PRODUCT_NVIDIA_MCP61_IDE:
		case PCI_PRODUCT_NVIDIA_MCP65_IDE:
		case PCI_PRODUCT_NVIDIA_MCP67_IDE:
		case PCI_PRODUCT_NVIDIA_MCP73_IDE:
		case PCI_PRODUCT_NVIDIA_MCP77_IDE:
			sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
			break;
		}
		sc->sc_apo_regbase = APO_NVIDIA_REGBASE;
		break;
	default:
		panic("via_chip_map: unknown vendor");
	}

	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
	    "bus-master DMA support present");
	pciide_mapreg_dma(sc, pa);
	aprint_verbose("\n");
	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
	if (sc->sc_dma_ok) {
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
		sc->sc_wdcdev.irqack = pciide_irqack;
		if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 0)
			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
	}
	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
	sc->sc_wdcdev.sc_atac.atac_set_modes = via_setup_channel;
	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
	sc->sc_wdcdev.wdc_maxdrives = 2;

	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_RAID)
		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_RAID;

	wdc_allocate_regs(&sc->sc_wdcdev);

	ATADEBUG_PRINT(("via_chip_map: old APO_IDECONF=0x%x, "
	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM(sc)),
	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA(sc))),
	    DEBUG_PROBE);

	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF(sc));
	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
	     channel++) {
		cp = &sc->pciide_channels[channel];
		if (pciide_chansetup(sc, channel, interface) == 0)
			continue;

		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
			aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
			    "%s channel ignored (disabled)\n", cp->name);
			cp->ata_channel.ch_flags |= ATACH_DISABLED;
			continue;
		}
		via_mapchan(pa, cp, interface, pciide_pci_intr);
	}
}
Exemple #28
0
int
edmcaopen(dev_t dev, int flag, int fmt, struct lwp *l)
{
    struct ed_softc *wd;
    int part, error;

    ATADEBUG_PRINT(("edopen\n"), DEBUG_FUNCS);
    wd = device_lookup_private(&ed_cd, DISKUNIT(dev));
    if (wd == NULL || (wd->sc_flags & EDF_INIT) == 0)
        return (ENXIO);

    part = DISKPART(dev);

    mutex_enter(&wd->sc_dk.dk_openlock);

    /*
     * If there are wedges, and this is not RAW_PART, then we
     * need to fail.
     */
    if (wd->sc_dk.dk_nwedges != 0 && part != RAW_PART) {
        error = EBUSY;
        goto bad1;
    }

    if (wd->sc_dk.dk_openmask != 0) {
        /*
         * If any partition is open, but the disk has been invalidated,
         * disallow further opens.
         */
        if ((wd->sc_flags & WDF_LOADED) == 0) {
            error = EIO;
            goto bad1;
        }
    } else {
        if ((wd->sc_flags & WDF_LOADED) == 0) {
            int s;

            wd->sc_flags |= WDF_LOADED;

            /* Load the physical device parameters. */
            s = splbio();
            ed_get_params(wd, NULL);
            splx(s);

            /* Load the partition info if not already loaded. */
            edgetdisklabel(dev, wd);
        }
    }

    /* Check that the partition exists. */
    if (part != RAW_PART &&
            (part >= wd->sc_dk.dk_label->d_npartitions ||
             wd->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
        error = ENXIO;
        goto bad1;
    }

    /* Insure only one open at a time. */
    switch (fmt) {
    case S_IFCHR:
        wd->sc_dk.dk_copenmask |= (1 << part);
        break;
    case S_IFBLK:
        wd->sc_dk.dk_bopenmask |= (1 << part);
        break;
    }
    wd->sc_dk.dk_openmask =
        wd->sc_dk.dk_copenmask | wd->sc_dk.dk_bopenmask;

    error = 0;
bad1:
    mutex_exit(&wd->sc_dk.dk_openlock);
    return (error);
}
Exemple #29
0
static void
sis_setup_channel(struct ata_channel *chp)
{
	struct ata_drive_datas *drvp;
	int drive, s;
	u_int32_t sis_tim;
	u_int32_t idedma_ctl;
	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);

	ATADEBUG_PRINT(("sis_setup_channel: old timings reg for "
	    "channel %d 0x%x\n", chp->ch_channel,
	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->ch_channel))),
	    DEBUG_PROBE);
	sis_tim = 0;
	idedma_ctl = 0;
	/* setup DMA if needed */
	pciide_channel_dma_setup(cp);

	for (drive = 0; drive < 2; drive++) {
		drvp = &chp->ch_drive[drive];
		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;
		/* add timing values, setup DMA if needed */
		if ((drvp->drive_flags & ATA_DRIVE_DMA) == 0 &&
		    (drvp->drive_flags & ATA_DRIVE_UDMA) == 0)
			goto pio;

		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
			/* use Ultra/DMA */
			s = splbio();
			drvp->drive_flags &= ~ATA_DRIVE_DMA;
			splx(s);
			if (pciide_pci_read(sc->sc_pc, sc->sc_tag,
			    SIS_REG_CBL) & SIS_REG_CBL_33(chp->ch_channel)) {
				if (drvp->UDMA_mode > 2)
					drvp->UDMA_mode = 2;
			}
			switch (sc->sis_type) {
			case SIS_TYPE_66:
			case SIS_TYPE_100OLD:
				sis_tim |= sis_udma66_tim[drvp->UDMA_mode] <<
				    SIS_TIM66_UDMA_TIME_OFF(drive);
				break;
			case SIS_TYPE_100NEW:
				sis_tim |=
				    sis_udma100new_tim[drvp->UDMA_mode] <<
				    SIS_TIM100_UDMA_TIME_OFF(drive);
			case SIS_TYPE_133OLD:
				sis_tim |=
				    sis_udma133old_tim[drvp->UDMA_mode] <<
				    SIS_TIM100_UDMA_TIME_OFF(drive);
				break;
			default:
				aprint_error("unknown SiS IDE type %d\n",
				    sc->sis_type);
			}
		} else {
			/*
			 * use Multiword DMA
			 * Timings will be used for both PIO and DMA,
			 * so adjust DMA mode if needed
			 */
			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
				drvp->PIO_mode = drvp->DMA_mode + 2;
			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
				    drvp->PIO_mode - 2 : 0;
			if (drvp->DMA_mode == 0)
				drvp->PIO_mode = 0;
		}
		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
pio:		switch (sc->sis_type) {
		case SIS_TYPE_NOUDMA:
		case SIS_TYPE_66:
		case SIS_TYPE_100OLD:
			sis_tim |= sis_pio_act[drvp->PIO_mode] <<
			    SIS_TIM66_ACT_OFF(drive);
			sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
			    SIS_TIM66_REC_OFF(drive);
			break;
		case SIS_TYPE_100NEW:
		case SIS_TYPE_133OLD:
			sis_tim |= sis_pio_act[drvp->PIO_mode] <<
			    SIS_TIM100_ACT_OFF(drive);
			sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
			    SIS_TIM100_REC_OFF(drive);
			break;
		default:
			aprint_error("unknown SiS IDE type %d\n",
			    sc->sis_type);
		}
	}
	ATADEBUG_PRINT(("sis_setup_channel: new timings reg for "
	    "channel %d 0x%x\n", chp->ch_channel, sis_tim), DEBUG_PROBE);
	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->ch_channel),
		       sis_tim);
	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
		    idedma_ctl);
	}
}
static void
ite_setup_channel(struct ata_channel *chp)
{
	struct ata_drive_datas *drvp;
	int drive, mode = 0;
	u_int32_t idedma_ctl;
	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
	int channel = chp->ch_channel;
	pcireg_t cfg, modectl;
	pcireg_t tim;

	cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG);
	modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE);
	tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel));
	ATADEBUG_PRINT(("%s:%d: tim=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev),
	    channel, tim), DEBUG_PROBE);

	/* Setup DMA if needed */
	pciide_channel_dma_setup(cp);

	/* Clear all bits for this channel */
	idedma_ctl = 0;

	/* Per channel settings */
	for (drive = 0; drive < 2; drive++) {
		drvp = &chp->ch_drive[drive];

		/* If no drive, skip */
		if (drvp->drive_type == ATA_DRIVET_NONE)
			continue;

		if ((chp->ch_atac->atac_cap & ATAC_CAP_UDMA) != 0 &&
		    (drvp->drive_flags & ATA_DRIVE_UDMA) != 0) {
			/* Setup UltraDMA mode */
			drvp->drive_flags &= ~ATA_DRIVE_DMA;
			modectl &= ~IT_MODE_DMA(channel, drive);

#if 0
			/* Check cable, only works in CPU firmware mode */
			if (drvp->UDMA_mode > 2 &&
			    (cfg & IT_CFG_CABLE(channel, drive)) == 0) {
				ATADEBUG_PRINT(("(%s:%d:%d): "
				    "80-wire cable not detected\n",
				    device_xname(sc->sc_wdcdev.sc_atac.atac_dev),
				    channel, drive), DEBUG_PROBE);
				drvp->UDMA_mode = 2;
			}
#endif

			if (drvp->UDMA_mode >= 5)
				tim |= IT_TIM_UDMA5(drive);
			else
				tim &= ~IT_TIM_UDMA5(drive);

			mode = drvp->PIO_mode;
		} else if ((chp->ch_atac->atac_cap & ATAC_CAP_DMA) != 0 &&
		    (drvp->drive_flags & ATA_DRIVE_DMA) != 0) {
			/* Setup multiword DMA mode */
			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
			modectl |= IT_MODE_DMA(channel, drive);

			/* mode = min(pio, dma + 2) */
			if (drvp->PIO_mode <= (drvp->DMA_mode + 2))
				mode = drvp->PIO_mode;
			else
				mode = drvp->DMA_mode + 2;
		} else {
			goto pio;
		}
		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);

pio:
		/* Setup PIO mode */
		if (mode <= 2) {
			drvp->DMA_mode = 0;
			drvp->PIO_mode = 0;
			mode = 0;
		} else {
			drvp->PIO_mode = mode;
			drvp->DMA_mode = mode - 2;
		}

		/* Enable IORDY if PIO mode >= 3 */
		if (drvp->PIO_mode >= 3)
			cfg |= IT_CFG_IORDY(channel);
	}

	ATADEBUG_PRINT(("%s: tim=0x%x\n",
	    device_xname(sc->sc_wdcdev.sc_atac.atac_dev), tim), DEBUG_PROBE);

	pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg);
	pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl);
	pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim);

	if (idedma_ctl != 0) {
		/* Add software bits in status register */
		bus_space_write_1(sc->sc_dma_iot,
		    cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl);
	}
}