Пример #1
0
void
pcib_write_config(device_t dev, int b, int s, int f, int reg, u_int32_t val, int width)
{
    PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, val, width);
}
Пример #2
0
static int
pci_iov_delete(struct cdev *cdev)
{
	device_t bus, dev, vf, *devlist;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	int i, error, devcount;
	uint32_t iov_ctl;

	mtx_lock(&Giant);
	dinfo = cdev->si_drv1;
	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	devlist = NULL;

	if (iov->iov_flags & IOV_BUSY) {
		mtx_unlock(&Giant);
		return (EBUSY);
	}

	if (iov->iov_num_vfs == 0) {
		mtx_unlock(&Giant);
		return (ECHILD);
	}

	iov->iov_flags |= IOV_BUSY;

	error = device_get_children(bus, &devlist, &devcount);

	if (error != 0)
		goto out;

	for (i = 0; i < devcount; i++) {
		vf = devlist[i];

		if (!pci_iov_is_child_vf(iov, vf))
			continue;

		error = device_detach(vf);
		if (error != 0) {
			device_printf(dev,
			   "Could not disable SR-IOV: failed to detach VF %s\n",
			    device_get_nameunit(vf));
			goto out;
		}
	}

	for (i = 0; i < devcount; i++) {
		vf = devlist[i];

		if (pci_iov_is_child_vf(iov, vf))
			device_delete_child(bus, vf);
	}
	PCI_IOV_UNINIT(dev);

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2);

	iov->iov_num_vfs = 0;

	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
		if (iov->iov_bar[i].res != NULL) {
			pci_release_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i),
			    iov->iov_bar[i].res);
			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i));
			iov->iov_bar[i].res = NULL;
		}
	}

	if (iov->iov_flags & IOV_RMAN_INITED) {
		rman_fini(&iov->rman);
		iov->iov_flags &= ~IOV_RMAN_INITED;
	}

	error = 0;
out:
	free(devlist, M_TEMP);
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);
	return (error);
}
Пример #3
0
static int
sata_channel_attach(device_t dev)
{
	struct sata_softc *sc;
	struct ata_channel *ch;
	uint64_t work;
	int error, i;

	sc = device_get_softc(device_get_parent(dev));
	ch = device_get_softc(dev);

	if (ch->attached)
		return (0);

	ch->dev = dev;
	ch->unit = device_get_unit(dev);
	ch->flags |= ATA_USE_16BIT | ATA_NO_SLAVE | ATA_SATA;

	/* Set legacy ATA resources. */
	for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
		ch->r_io[i].res = sc->sc_mem_res;
		ch->r_io[i].offset = SATA_SHADOWR_BASE(ch->unit) + (i << 2);
	}

	ch->r_io[ATA_CONTROL].res = sc->sc_mem_res;
	ch->r_io[ATA_CONTROL].offset = SATA_SHADOWR_CONTROL(ch->unit);

	ch->r_io[ATA_IDX_ADDR].res = sc->sc_mem_res;
	ata_default_registers(dev);

	/* Set SATA resources. */
	ch->r_io[ATA_SSTATUS].res = sc->sc_mem_res;
	ch->r_io[ATA_SSTATUS].offset = SATA_SATA_SSTATUS(ch->unit);
	ch->r_io[ATA_SERROR].res = sc->sc_mem_res;
	ch->r_io[ATA_SERROR].offset = SATA_SATA_SERROR(ch->unit);
	ch->r_io[ATA_SCONTROL].res = sc->sc_mem_res;
	ch->r_io[ATA_SCONTROL].offset = SATA_SATA_SCONTROL(ch->unit);
	ata_generic_hw(dev);

	ch->hw.begin_transaction = sata_channel_begin_transaction;
	ch->hw.end_transaction = sata_channel_end_transaction;
	ch->hw.status = sata_channel_status;

	/* Set DMA resources */
	ata_dmainit(dev);
	ch->dma.setprd = sata_channel_dmasetprd;

	/* Clear work area */
	KASSERT(sc->sc_edma_qlen * (sizeof(struct sata_crqb) +
	    sizeof(struct sata_crpb)) <= ch->dma.max_iosize,
	    ("insufficient DMA memory for request/response queues.\n"));
	bzero(ch->dma.work, sc->sc_edma_qlen * (sizeof(struct sata_crqb) +
	    sizeof(struct sata_crpb)));
	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	/* Turn off EDMA engine */
	error = sata_edma_ctrl(dev, 0);
	if (error) {
		ata_dmafini(dev);
		return (error);
	}

	/*
	 * Initialize EDMA engine:
	 *	- Native Command Queuing off,
	 *	- Non-Queued operation,
	 *	- Host Queue Cache enabled.
	 */
	SATA_OUTL(sc, SATA_EDMA_CFG(ch->unit), SATA_EDMA_CFG_HQCACHE |
	    (sc->sc_version == 1) ? SATA_EDMA_CFG_QL128 : 0);

	/* Set request queue pointers */
	work = ch->dma.work_bus;
	SATA_OUTL(sc, SATA_EDMA_REQBAHR(ch->unit), work >> 32);
	SATA_OUTL(sc, SATA_EDMA_REQIPR(ch->unit), work & 0xFFFFFFFF);
	SATA_OUTL(sc, SATA_EDMA_REQOPR(ch->unit), work & 0xFFFFFFFF);

	/* Set response queue pointers */
	work += sc->sc_edma_qlen * sizeof(struct sata_crqb);
	SATA_OUTL(sc, SATA_EDMA_RESBAHR(ch->unit), work >> 32);
	SATA_OUTL(sc, SATA_EDMA_RESIPR(ch->unit), work & 0xFFFFFFFF);
	SATA_OUTL(sc, SATA_EDMA_RESOPR(ch->unit), work & 0xFFFFFFFF);

	/* Clear any outstanding interrupts */
	ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
	SATA_OUTL(sc, SATA_SATA_FISICR(ch->unit), 0);
	SATA_OUTL(sc, SATA_EDMA_IECR(ch->unit), 0);
	SATA_OUTL(sc, SATA_ICR,
	    ~(SATA_ICR_DEV(ch->unit) | SATA_ICR_DMADONE(ch->unit)));

	/* Umask channel interrupts */
	SATA_OUTL(sc, SATA_EDMA_IEMR(ch->unit), 0xFFFFFFFF);
	SATA_OUTL(sc, SATA_MIMR, SATA_INL(sc, SATA_MIMR) |
	    SATA_MICR_DONE(ch->unit) | SATA_MICR_DMADONE(ch->unit) |
	    SATA_MICR_ERR(ch->unit));

	ch->attached = 1;

	return (ata_attach(dev));
}
Пример #4
0
static int
ata_getparam(struct ata_device *atadev, int init)
{
    struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev));
    struct ata_request *request;
    u_int8_t command = 0;
    int error = ENOMEM, retries = 2;

    if (ch->devices &
	(atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE))
	command = ATA_ATA_IDENTIFY;
    if (ch->devices &
	(atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE))
	command = ATA_ATAPI_IDENTIFY;
    if (!command)
	return ENXIO;

    while (retries-- > 0 && error) {
	if (!(request = ata_alloc_request()))
	    break;
	request->dev = atadev->dev;
	request->timeout = 1;
	request->retries = 0;
	request->u.ata.command = command;
	request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET);
	request->data = (void *)&atadev->param;
	request->bytecount = sizeof(struct ata_params);
	request->donecount = 0;
	request->transfersize = DEV_BSIZE;
	ata_queue_request(request);
	error = request->result;
	ata_free_request(request);
    }

    if (!error && (isprint(atadev->param.model[0]) ||
		   isprint(atadev->param.model[1]))) {
	struct ata_params *atacap = &atadev->param;
	char buffer[64];
	int16_t *ptr;

	for (ptr = (int16_t *)atacap;
	     ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) {
	    *ptr = le16toh(*ptr);
	}
	if (!(!strncmp(atacap->model, "FX", 2) ||
	      !strncmp(atacap->model, "NEC", 3) ||
	      !strncmp(atacap->model, "Pioneer", 7) ||
	      !strncmp(atacap->model, "SHARP", 5))) {
	    bswap(atacap->model, sizeof(atacap->model));
	    bswap(atacap->revision, sizeof(atacap->revision));
	    bswap(atacap->serial, sizeof(atacap->serial));
	}
	btrim(atacap->model, sizeof(atacap->model));
	bpack(atacap->model, atacap->model, sizeof(atacap->model));
	btrim(atacap->revision, sizeof(atacap->revision));
	bpack(atacap->revision, atacap->revision, sizeof(atacap->revision));
	btrim(atacap->serial, sizeof(atacap->serial));
	bpack(atacap->serial, atacap->serial, sizeof(atacap->serial));

	if (bootverbose)
	    kprintf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n",
		   device_get_unit(ch->dev),
		   atadev->unit == ATA_MASTER ? "master" : "slave",
		   ata_mode2str(ata_pmode(atacap)),
		   ata_mode2str(ata_wmode(atacap)),
		   ata_mode2str(ata_umode(atacap)),
		   (atacap->hwres & ATA_CABLE_ID) ? "80":"40");

	if (init) {
	    ksprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision);
	    device_set_desc_copy(atadev->dev, buffer);
	    if ((atadev->param.config & ATA_PROTO_ATAPI) &&
		(atadev->param.config != ATA_CFA_MAGIC1) &&
		(atadev->param.config != ATA_CFA_MAGIC2)) {
		if (atapi_dma && ch->dma &&
		    (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR &&
		    ata_umode(&atadev->param) >= ATA_UDMA2)
		    atadev->mode = ATA_DMA_MAX;
	    }
	    else {
		if (ata_dma && ch->dma &&
		    (ata_umode(&atadev->param) > 0 ||
		     ata_wmode(&atadev->param) > 0))
		    atadev->mode = ATA_DMA_MAX;
	    }
	}
    }
    else {
	if (!error)
	    error = ENXIO;
    }
    return error;
}
Пример #5
0
int
fdt_get_phyaddr(phandle_t node, device_t dev, int *phy_addr, void **phy_sc)
{
	phandle_t phy_node;
	pcell_t phy_handle, phy_reg;
	uint32_t i;
	device_t parent, child;

	if (OF_getencprop(node, "phy-handle", (void *)&phy_handle,
	    sizeof(phy_handle)) <= 0)
		return (ENXIO);

	phy_node = OF_node_from_xref(phy_handle);

	if (OF_getencprop(phy_node, "reg", (void *)&phy_reg,
	    sizeof(phy_reg)) <= 0)
		return (ENXIO);

	*phy_addr = phy_reg;

	/*
	 * Search for softc used to communicate with phy.
	 */

	/*
	 * Step 1: Search for ancestor of the phy-node with a "phy-handle"
	 * property set.
	 */
	phy_node = OF_parent(phy_node);
	while (phy_node != 0) {
		if (OF_getprop(phy_node, "phy-handle", (void *)&phy_handle,
		    sizeof(phy_handle)) > 0)
			break;
		phy_node = OF_parent(phy_node);
	}
	if (phy_node == 0)
		return (ENXIO);

	/*
	 * Step 2: For each device with the same parent and name as ours
	 * compare its node with the one found in step 1, ancestor of phy
	 * node (stored in phy_node).
	 */
	parent = device_get_parent(dev);
	i = 0;
	child = device_find_child(parent, device_get_name(dev), i);
	while (child != NULL) {
		if (ofw_bus_get_node(child) == phy_node)
			break;
		i++;
		child = device_find_child(parent, device_get_name(dev), i);
	}
	if (child == NULL)
		return (ENXIO);

	/*
	 * Use softc of the device found.
	 */
	*phy_sc = (void *)device_get_softc(child);

	return (0);
}
Пример #6
0
static int
atkbdc_isa_probe(device_t dev)
{
	struct resource	*port0;
	struct resource	*port1;
	u_long		start;
	u_long		count;
	int		error;
	int		rid;
#if defined(__i386__) || defined(__amd64__)
	bus_space_tag_t	tag;
	bus_space_handle_t ioh1;
	volatile int	i;
	register_t	flags;
#endif

	/* check PnP IDs */
	if (ISA_PNP_PROBE(device_get_parent(dev), dev, atkbdc_ids) == ENXIO)
		return ENXIO;

	device_set_desc(dev, "Keyboard controller (i8042)");

	/*
	 * Adjust I/O port resources.
	 * The AT keyboard controller uses two ports (a command/data port
	 * 0x60 and a status port 0x64), which may be given to us in 
	 * one resource (0x60 through 0x64) or as two separate resources
	 * (0x60 and 0x64). Some brain-damaged ACPI BIOS has reversed
	 * command/data port and status port. Furthermore, /boot/device.hints
	 * may contain just one port, 0x60. We shall adjust resource settings
	 * so that these two ports are available as two separate resources
	 * in correct order.
	 */
	device_quiet(dev);
	rid = 0;
	if (bus_get_resource(dev, SYS_RES_IOPORT, rid, &start, &count) != 0)
		return ENXIO;
	if (start == IO_KBD + KBD_STATUS_PORT) {
		start = IO_KBD;
		count++;
	}
	if (count > 1)	/* adjust the count and/or start port */
		bus_set_resource(dev, SYS_RES_IOPORT, rid, start, 1);
	port0 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
	if (port0 == NULL)
		return ENXIO;
	rid = 1;
	if (bus_get_resource(dev, SYS_RES_IOPORT, rid, NULL, NULL) != 0)
		bus_set_resource(dev, SYS_RES_IOPORT, 1,
				 start + KBD_STATUS_PORT, 1);
	port1 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE);
	if (port1 == NULL) {
		bus_release_resource(dev, SYS_RES_IOPORT, 0, port0);
		return ENXIO;
	}

#if defined(__i386__) || defined(__amd64__)
	/*
	 * Check if we really have AT keyboard controller. Poll status
	 * register until we get "all clear" indication. If no such
	 * indication comes, it probably means that there is no AT
	 * keyboard controller present. Give up in such case. Check relies
	 * on the fact that reading from non-existing in/out port returns
	 * 0xff on i386. May or may not be true on other platforms.
	 */
	tag = rman_get_bustag(port0);
	ioh1 = rman_get_bushandle(port1);
	flags = intr_disable();
	for (i = 0; i != 65535; i++) {
		if ((bus_space_read_1(tag, ioh1, 0) & 0x2) == 0)
			break;
	}
	intr_restore(flags);
	if (i == 65535) {
		bus_release_resource(dev, SYS_RES_IOPORT, 0, port0);
		bus_release_resource(dev, SYS_RES_IOPORT, 1, port1);
		if (bootverbose)
			device_printf(dev, "AT keyboard controller not found\n");
		return ENXIO;
	}
#endif

	device_verbose(dev);

	error = atkbdc_probe_unit(device_get_unit(dev), port0, port1);

	bus_release_resource(dev, SYS_RES_IOPORT, 0, port0);
	bus_release_resource(dev, SYS_RES_IOPORT, 1, port1);

	return error;
}
Пример #7
0
static int
iicbb_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
{
	return (IICBB_RESET(device_get_parent(dev), speed, addr, oldaddr));
}
/* must be called with ATA channel locked and state_mtx held */
int
ata_end_transaction(struct ata_request *request)
{
    struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
    struct ata_device *atadev = device_get_softc(request->dev);
    int length;

    ATA_DEBUG_RQ(request, "end transaction");

    /* clear interrupt and get status */
    request->status = ATA_IDX_INB(ch, ATA_STATUS);

    switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_CONTROL)) {

    /* ATA PIO data transfer and control commands */
    default:

	/* on timeouts we have no data or anything so just return */
	if (request->flags & ATA_R_TIMEOUT)
	    goto end_finished;

	/* on control commands read back registers to the request struct */
	if (request->flags & ATA_R_CONTROL) {
	    ch->hw.tf_read(request);
	}

	/* if we got an error we are done with the HW */
	if (request->status & ATA_S_ERROR) {
	    request->error = ATA_IDX_INB(ch, ATA_ERROR);
	    goto end_finished;
	}
	
	/* are we moving data ? */
	if (request->flags & (ATA_R_READ | ATA_R_WRITE)) {

	    /* if read data get it */
	    if (request->flags & ATA_R_READ) {
		int flags = ATA_S_DRQ;

		if (request->u.ata.command != ATA_ATAPI_IDENTIFY)
		    flags |= ATA_S_READY;
		if (ata_wait(ch, atadev, flags) < 0) {
		    device_printf(request->dev,
				  "timeout waiting for read DRQ\n");
		    request->result = EIO;
		    goto end_finished;
		}
		ata_pio_read(request, request->transfersize);
	    }

	    /* update how far we've gotten */
	    request->donecount += request->transfersize;

	    /* do we need a scoop more ? */
	    if (request->bytecount > request->donecount) {

		/* set this transfer size according to HW capabilities */
		request->transfersize = 
		    min((request->bytecount - request->donecount),
			request->transfersize);

		/* if data write command, output the data */
		if (request->flags & ATA_R_WRITE) {

		    /* if we get an error here we are done with the HW */
		    if (ata_wait(ch, atadev, (ATA_S_READY | ATA_S_DRQ)) < 0) {
			device_printf(request->dev,
				      "timeout waiting for write DRQ\n");
			request->status = ATA_IDX_INB(ch, ATA_STATUS);
			goto end_finished;
		    }

		    /* output data and return waiting for new interrupt */
		    ata_pio_write(request, request->transfersize);
		    goto end_continue;
		}

		/* if data read command, return & wait for interrupt */
		if (request->flags & ATA_R_READ)
		    goto end_continue;
	    }
	}
	/* done with HW */
	goto end_finished;

    /* ATA DMA data transfer commands */
    case ATA_R_DMA:

	/* stop DMA engine and get status */
	if (ch->dma->stop)
	    request->dmastat = ch->dma->stop(request->dev);

	/* did we get error or data */
	if (request->status & ATA_S_ERROR)
	    request->error = ATA_IDX_INB(ch, ATA_ERROR);
	else if (request->dmastat & ATA_BMSTAT_ERROR)
	    request->status |= ATA_S_ERROR;
	else if (!(request->flags & ATA_R_TIMEOUT))
	    request->donecount = request->bytecount;

	/* release SG list etc */
	ch->dma->unload(ch->dev);

	/* done with HW */
	goto end_finished;

    /* ATAPI PIO commands */
    case ATA_R_ATAPI:
	length = ATA_IDX_INB(ch, ATA_CYL_LSB)|(ATA_IDX_INB(ch, ATA_CYL_MSB)<<8);

	/* on timeouts we have no data or anything so just return */
	if (request->flags & ATA_R_TIMEOUT)
	    goto end_finished;

	switch ((ATA_IDX_INB(ch, ATA_IREASON) & (ATA_I_CMD | ATA_I_IN)) |
		(request->status & ATA_S_DRQ)) {

	case ATAPI_P_CMDOUT:
	    /* this seems to be needed for some (slow) devices */
	    DELAY(10);

	    if (!(request->status & ATA_S_DRQ)) {
		device_printf(request->dev, "command interrupt without DRQ\n");
		request->status = ATA_S_ERROR;
		goto end_finished;
	    }
	    ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
			       (atadev->param.config &
				ATA_PROTO_MASK)== ATA_PROTO_ATAPI_12 ? 6 : 8);
	    /* return wait for interrupt */
	    goto end_continue;

	case ATAPI_P_WRITE:
	    if (request->flags & ATA_R_READ) {
		request->status = ATA_S_ERROR;
		device_printf(request->dev,
			      "%s trying to write on read buffer\n",
			   ata_cmd2str(request));
		goto end_finished;
		break;
	    }
	    ata_pio_write(request, length);
	    request->donecount += length;

	    /* set next transfer size according to HW capabilities */
	    request->transfersize = min((request->bytecount-request->donecount),
					request->transfersize);
	    /* return wait for interrupt */
	    goto end_continue;

	case ATAPI_P_READ:
	    if (request->flags & ATA_R_WRITE) {
		request->status = ATA_S_ERROR;
		device_printf(request->dev,
			      "%s trying to read on write buffer\n",
			   ata_cmd2str(request));
		goto end_finished;
	    }
	    ata_pio_read(request, length);
	    request->donecount += length;

	    /* set next transfer size according to HW capabilities */
	    request->transfersize = min((request->bytecount-request->donecount),
					request->transfersize);
	    /* return wait for interrupt */
	    goto end_continue;

	case ATAPI_P_DONEDRQ:
	    device_printf(request->dev,
			  "WARNING - %s DONEDRQ non conformant device\n",
			  ata_cmd2str(request));
	    if (request->flags & ATA_R_READ) {
		ata_pio_read(request, length);
		request->donecount += length;
	    }
	    else if (request->flags & ATA_R_WRITE) {
		ata_pio_write(request, length);
		request->donecount += length;
	    }
	    else
		request->status = ATA_S_ERROR;
	    /* FALLTHROUGH */

	case ATAPI_P_ABORT:
	case ATAPI_P_DONE:
	    if (request->status & (ATA_S_ERROR | ATA_S_DWF))
		request->error = ATA_IDX_INB(ch, ATA_ERROR);
	    goto end_finished;

	default:
	    device_printf(request->dev, "unknown transfer phase\n");
	    request->status = ATA_S_ERROR;
	}

	/* done with HW */
	goto end_finished;

    /* ATAPI DMA commands */
    case ATA_R_ATAPI|ATA_R_DMA:

	/* stop DMA engine and get status */
	if (ch->dma->stop)
	    request->dmastat = ch->dma->stop(request->dev);

	/* did we get error or data */
	if (request->status & (ATA_S_ERROR | ATA_S_DWF))
	    request->error = ATA_IDX_INB(ch, ATA_ERROR);
	else if (request->dmastat & ATA_BMSTAT_ERROR)
	    request->status |= ATA_S_ERROR;
	else if (!(request->flags & ATA_R_TIMEOUT))
	    request->donecount = request->bytecount;
 
	/* release SG list etc */
	ch->dma->unload(ch->dev);

	/* done with HW */
	goto end_finished;
    }
    /* NOT REACHED */
    printf("ata_end_transaction OOPS!!\n");

end_finished:
    callout_stop(&request->callout);
    return ATA_OP_FINISHED;

end_continue:
    return ATA_OP_CONTINUES;
}
int
ata_generic_command(struct ata_request *request)
{
    struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
    struct ata_device *atadev = device_get_softc(request->dev);

    /* select device */
    ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | atadev->unit);

    /* ready to issue command ? */
    if (ata_wait(ch, atadev, 0) < 0) { 
	device_printf(request->dev, "timeout waiting to issue command\n");
	return -1;
    }

    /* enable interrupt */
    ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT);

    if (request->flags & ATA_R_ATAPI) {
	int timeout = 5000;

	/* issue packet command to controller */
	if (request->flags & ATA_R_DMA) {
	    ATA_IDX_OUTB(ch, ATA_FEATURE, ATA_F_DMA);
	    ATA_IDX_OUTB(ch, ATA_CYL_LSB, 0);
	    ATA_IDX_OUTB(ch, ATA_CYL_MSB, 0);
	}
	else {
	    ATA_IDX_OUTB(ch, ATA_FEATURE, 0);
	    ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->transfersize);
	    ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->transfersize >> 8);
	}
	ATA_IDX_OUTB(ch, ATA_COMMAND, ATA_PACKET_CMD);

	/* command interrupt device ? just return and wait for interrupt */
	if ((atadev->param.config & ATA_DRQ_MASK) == ATA_DRQ_INTR)
	    return 0;

	/* wait for ready to write ATAPI command block */
	while (timeout--) {
	    int reason = ATA_IDX_INB(ch, ATA_IREASON);
	    int status = ATA_IDX_INB(ch, ATA_STATUS);

	    if (((reason & (ATA_I_CMD | ATA_I_IN)) |
		 (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
		break;
	    DELAY(20);
	}
	if (timeout <= 0) {
	    device_printf(request->dev, "timeout waiting for ATAPI ready\n");
	    request->result = EIO;
	    return -1;
	}

	/* this seems to be needed for some (slow) devices */
	DELAY(10);
		    
	/* output command block */
	ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb,
			   (atadev->param.config & ATA_PROTO_MASK) ==
			   ATA_PROTO_ATAPI_12 ? 6 : 8);
    }
Пример #10
0
static void
ciphy_fixup(struct mii_softc *sc)
{
    uint16_t		model;
    uint16_t		status, speed;
    uint16_t		val;

    model = MII_MODEL(PHY_READ(sc, CIPHY_MII_PHYIDR2));
    status = PHY_READ(sc, CIPHY_MII_AUXCSR);
    speed = status & CIPHY_AUXCSR_SPEED;

    if (strcmp(device_get_name(device_get_parent(sc->mii_dev)),
               "nfe") == 0) {
        /* need to set for 2.5V RGMII for NVIDIA adapters */
        val = PHY_READ(sc, CIPHY_MII_ECTL1);
        val &= ~(CIPHY_ECTL1_IOVOL | CIPHY_ECTL1_INTSEL);
        val |= (CIPHY_IOVOL_2500MV | CIPHY_INTSEL_RGMII);
        PHY_WRITE(sc, CIPHY_MII_ECTL1, val);
        /* From Linux. */
        val = PHY_READ(sc, CIPHY_MII_AUXCSR);
        val |= CIPHY_AUXCSR_MDPPS;
        PHY_WRITE(sc, CIPHY_MII_AUXCSR, val);
        val = PHY_READ(sc, CIPHY_MII_10BTCSR);
        val |= CIPHY_10BTCSR_ECHO;
        PHY_WRITE(sc, CIPHY_MII_10BTCSR, val);
    }

    switch (model) {
    case MII_MODEL_CICADA_CS8204:
    case MII_MODEL_CICADA_CS8201:

        /* Turn off "aux mode" (whatever that means) */
        PHY_SETBIT(sc, CIPHY_MII_AUXCSR, CIPHY_AUXCSR_MDPPS);

        /*
         * Work around speed polling bug in VT3119/VT3216
         * when using MII in full duplex mode.
         */
        if ((speed == CIPHY_SPEED10 || speed == CIPHY_SPEED100) &&
                (status & CIPHY_AUXCSR_FDX)) {
            PHY_SETBIT(sc, CIPHY_MII_10BTCSR, CIPHY_10BTCSR_ECHO);
        } else {
            PHY_CLRBIT(sc, CIPHY_MII_10BTCSR, CIPHY_10BTCSR_ECHO);
        }

        /* Enable link/activity LED blink. */
        PHY_SETBIT(sc, CIPHY_MII_LED, CIPHY_LED_LINKACTBLINK);

        break;

    case MII_MODEL_CICADA_CS8201A:
    case MII_MODEL_CICADA_CS8201B:

        /*
         * Work around speed polling bug in VT3119/VT3216
         * when using MII in full duplex mode.
         */
        if ((speed == CIPHY_SPEED10 || speed == CIPHY_SPEED100) &&
                (status & CIPHY_AUXCSR_FDX)) {
            PHY_SETBIT(sc, CIPHY_MII_10BTCSR, CIPHY_10BTCSR_ECHO);
        } else {
            PHY_CLRBIT(sc, CIPHY_MII_10BTCSR, CIPHY_10BTCSR_ECHO);
        }

        break;
    case MII_MODEL_CICADA_VSC8211:
    case MII_MODEL_CICADA_CS8244:
    case MII_MODEL_VITESSE_VSC8601:
        break;
    default:
        device_printf(sc->mii_dev, "unknown CICADA PHY model %x\n",
                      model);
        break;
    }
}
Пример #11
0
static int
ipmi_acpi_attach(device_t dev)
{
	ACPI_HANDLE devh;
	const char *mode;
	struct ipmi_get_info info;
	struct ipmi_softc *sc = device_get_softc(dev);
	int count, error, flags, i, type;
	int interface_type = 0, interface_version = 0;

	error = 0;
	devh = acpi_get_handle(dev);
	if (ACPI_FAILURE(acpi_GetInteger(devh, "_IFT", &interface_type)))
		return (ENXIO);

	if (ACPI_FAILURE(acpi_GetInteger(devh, "_SRV", &interface_version)))
		return (ENXIO);

	switch (interface_type) {
	case KCS_MODE:
		count = 2;
		mode = "KCS";
		break;
	case SMIC_MODE:
		count = 3;
		mode = "SMIC";
		break;
	case BT_MODE:
		device_printf(dev, "BT interface not supported\n");
		return (ENXIO);
	case SSIF_MODE:
		if (ACPI_FAILURE(acpi_GetInteger(devh, "_ADR", &flags)))
			return (ENXIO);
		info.address = flags;
		device_printf(dev, "SSIF interface not supported on ACPI\n");
		return (0);
	default:
		return (ENXIO);
	}

	if (bus_get_resource(dev, SYS_RES_IOPORT, 0, NULL, NULL) == 0)
		type = SYS_RES_IOPORT;
	else if (bus_get_resource(dev, SYS_RES_MEMORY, 0, NULL, NULL) == 0)
		type = SYS_RES_MEMORY;
	else {
		device_printf(dev, "unknown resource type\n");
		return (ENXIO);
	}

	sc->ipmi_io_rid = 0;
	sc->ipmi_io_res[0] = bus_alloc_resource_any(dev, type,
	    &sc->ipmi_io_rid, RF_ACTIVE);
	sc->ipmi_io_type = type;
	sc->ipmi_io_spacing = 1;
	if (sc->ipmi_io_res[0] == NULL) {
		device_printf(dev, "couldn't configure I/O resource\n");
		return (ENXIO);
	}

	/* If we have multiple resources, allocate up to MAX_RES. */
	for (i = 1; i < MAX_RES; i++) {
		sc->ipmi_io_rid = i;
		sc->ipmi_io_res[i] = bus_alloc_resource_any(dev, type,
		    &sc->ipmi_io_rid, RF_ACTIVE);
		if (sc->ipmi_io_res[i] == NULL)
			break;
	}
	sc->ipmi_io_rid = 0;

	/* If we have multiple resources, make sure we have enough of them. */
	if (sc->ipmi_io_res[1] != NULL && sc->ipmi_io_res[count - 1] == NULL) {
		device_printf(dev, "too few I/O resources\n");
		error = ENXIO;
		goto bad;
	}

	device_printf(dev, "%s mode found at %s 0x%jx on %s\n",
	    mode, type == SYS_RES_IOPORT ? "io" : "mem",
	    (uintmax_t)rman_get_start(sc->ipmi_io_res[0]),
	    device_get_name(device_get_parent(dev)));

	sc->ipmi_dev = dev;

	/*
	 * Setup an interrupt if we have an interrupt resource.  We
	 * don't support GPE interrupts via _GPE yet.
	 */
	sc->ipmi_irq_rid = 0;
	sc->ipmi_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
	    &sc->ipmi_irq_rid, RF_SHAREABLE | RF_ACTIVE);

	/* Warn if _GPE exists. */
	if (ACPI_SUCCESS(AcpiEvaluateObject(devh, "_GPE", NULL, NULL)))
		device_printf(dev, "_GPE support not implemented\n");

	/*
	 * We assume an alignment of 1 byte as currently the IPMI spec
	 * doesn't provide any way to determine the alignment via ACPI.
	 */
	switch (interface_type) {
	case KCS_MODE:
		error = ipmi_kcs_attach(sc);
		if (error)
			goto bad;
		break;
	case SMIC_MODE:
		error = ipmi_smic_attach(sc);
		if (error)
			goto bad;
		break;
	}
	error = ipmi_attach(dev);
	if (error)
		goto bad;

	return (0);
bad:
	ipmi_release_resources(dev);
	return (error);
}
Пример #12
0
static struct resource *
obio_alloc_resource(device_t bus, device_t child, int type, int *rid,
    rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
	struct obio_softc		*sc = device_get_softc(bus);
	struct obio_ivar		*ivar = device_get_ivars(child);
	struct resource			*rv;
	struct resource_list_entry	*rle;
	struct rman			*rm;
	int				 isdefault, needactivate, passthrough;

	isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1);
	needactivate = flags & RF_ACTIVE;
	passthrough = (device_get_parent(child) != bus);
	rle = NULL;

	if (passthrough)
		return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type,
		    rid, start, end, count, flags));

	/*
	 * If this is an allocation of the "default" range for a given RID,
	 * and we know what the resources for this device are (ie. they aren't
	 * maintained by a child bus), then work out the start/end values.
	 */
	if (isdefault) {
		rle = resource_list_find(&ivar->resources, type, *rid);
		if (rle == NULL)
			return (NULL);
		if (rle->res != NULL) {
			panic("%s: resource entry is busy", __func__);
		}
		start = rle->start;
		end = rle->end;
		count = rle->count;
	}

	switch (type) {
	case SYS_RES_IRQ:
		rm = &sc->oba_irq_rman;
		break;
	case SYS_RES_MEMORY:
		rm = &sc->oba_mem_rman;
		break;
	default:
		printf("%s: unknown resource type %d\n", __func__, type);
		return (0);
	}

	rv = rman_reserve_resource(rm, start, end, count, flags, child);
	if (rv == NULL) {
		printf("%s: could not reserve resource\n", __func__);
		return (0);
	}

	rman_set_rid(rv, *rid);

	if (needactivate) {
		if (bus_activate_resource(child, type, *rid, rv)) {
			printf("%s: could not activate resource\n", __func__);
			rman_release_resource(rv);
			return (0);
		}
	}

	return (rv);
}
Пример #13
0
/*
 * icoutput()
 */
static int
icoutput(struct ifnet *ifp, struct mbuf *m,
	struct sockaddr *dst, struct rtentry *rt)
{
	device_t icdev = devclass_get_device(ic_devclass, ifp->if_unit);
	device_t parent = device_get_parent(icdev);
	struct ic_softc *sc = (struct ic_softc *)device_get_softc(icdev);

	int s, len, sent;
	struct mbuf *mm;
	u_char *cp;
	u_int hdr = dst->sa_family;

	ifp->if_flags |= IFF_RUNNING;

	s = splhigh();

	/* already sending? */
	if (sc->ic_sending) {
		ifp->if_oerrors ++;
		goto error;
	}
		
	/* insert header */
	bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN);

	cp = sc->ic_obuf + ICHDRLEN;
	len = 0;
	mm = m;
	do {
		if (len + mm->m_len > sc->ic_if.if_mtu) {
			/* packet to large */
			ifp->if_oerrors ++;
			goto error;
		}
			
		bcopy(mtod(mm,char *), cp, mm->m_len);
		cp += mm->m_len;
		len += mm->m_len;

	} while ((mm = mm->m_next));

#if NBPFILTER > 0
	if (ifp->if_bpf) {
		struct mbuf m0, *n = m;

		/*
		 * We need to prepend the address family as
		 * a four byte field.  Cons up a dummy header
		 * to pacify bpf.  This is safe because bpf
		 * will only read from the mbuf (i.e., it won't
		 * try to free it or keep a pointer a to it).
		 */
		m0.m_next = m;
		m0.m_len = sizeof(u_int);
		m0.m_data = (char *)&hdr;
		n = &m0;

		bpf_mtap(ifp, n);
	}
#endif

	sc->ic_sending = 1;

	m_freem(m);
	splx(s);

	/* send the packet */
	if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf,
				len + ICHDRLEN, &sent))

		ifp->if_oerrors ++;
	else {
		ifp->if_opackets ++;
		ifp->if_obytes += len;
	}

	sc->ic_sending = 0;

	return (0);

error:
	m_freem(m);
	splx(s);

	return(0);
}
Пример #14
0
/*
 * iciotcl()
 */
static int
icioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
{
    device_t icdev = devclass_get_device(ic_devclass, ifp->if_unit);
    device_t parent = device_get_parent(icdev);
    struct ic_softc *sc = (struct ic_softc *)device_get_softc(icdev);

    struct ifaddr *ifa = (struct ifaddr *)data;
    struct ifreq *ifr = (struct ifreq *)data;

    u_char *iptr, *optr;
    int error;

    switch (cmd) {

    case SIOCSIFDSTADDR:
    case SIOCAIFADDR:
    case SIOCSIFADDR:
	if (ifa->ifa_addr->sa_family != AF_INET)
	    return EAFNOSUPPORT;
	ifp->if_flags |= IFF_UP;
	/* FALLTHROUGH */
    case SIOCSIFFLAGS:
	if ((!(ifp->if_flags & IFF_UP)) && (ifp->if_flags & IFF_RUNNING)) {

	    /* XXX disable PCF */
	    ifp->if_flags &= ~IFF_RUNNING;

	    /* IFF_UP is not set, try to release the bus anyway */
	    iicbus_release_bus(parent, icdev);
	    break;
	}
	if (((ifp->if_flags & IFF_UP)) && (!(ifp->if_flags & IFF_RUNNING))) {

	    if ((error = iicbus_request_bus(parent, icdev, IIC_WAIT|IIC_INTR)))
		return (error);

	    sc->ic_obuf = malloc(sc->ic_if.if_mtu + ICHDRLEN,
				  M_DEVBUF, M_WAITOK);
	    if (!sc->ic_obuf) {
		iicbus_release_bus(parent, icdev);
		return ENOBUFS;
	    }

	    sc->ic_ifbuf = malloc(sc->ic_if.if_mtu + ICHDRLEN,
				  M_DEVBUF, M_WAITOK);
	    if (!sc->ic_ifbuf) {
		iicbus_release_bus(parent, icdev);
		return ENOBUFS;
	    }

	    iicbus_reset(parent, IIC_FASTEST, 0, NULL);

	    ifp->if_flags |= IFF_RUNNING;
	}
	break;

    case SIOCSIFMTU:
	/* save previous buffers */
	iptr = sc->ic_ifbuf;
	optr = sc->ic_obuf;

	/* allocate input buffer */
	sc->ic_ifbuf = malloc(ifr->ifr_mtu+ICHDRLEN, M_DEVBUF, M_NOWAIT);
	if (!sc->ic_ifbuf) {

	    sc->ic_ifbuf = iptr;
	    sc->ic_obuf = optr;

	    return ENOBUFS;
	}

	/* allocate output buffer */
	sc->ic_ifbuf = malloc(ifr->ifr_mtu+ICHDRLEN, M_DEVBUF, M_NOWAIT);
	if (!sc->ic_obuf) {

	    free(sc->ic_ifbuf,M_DEVBUF);

	    sc->ic_ifbuf = iptr;
	    sc->ic_obuf = optr;

	    return ENOBUFS;
	}

	if (iptr)
	    free(iptr,M_DEVBUF);

	if (optr)
	    free(optr,M_DEVBUF);

	sc->ic_if.if_mtu = ifr->ifr_mtu;
	break;

    case SIOCGIFMTU:
	ifr->ifr_mtu = sc->ic_if.if_mtu;
	break;

    case SIOCADDMULTI:
    case SIOCDELMULTI:
	if (ifr == 0) {
	    return EAFNOSUPPORT;		/* XXX */
	}
	switch (ifr->ifr_addr.sa_family) {

	case AF_INET:
	    break;

	default:
	    return EAFNOSUPPORT;
	}
	break;

    default:
	return EINVAL;
    }
    return 0;
}
Пример #15
0
void
nandbus_read_buffer(device_t dev, void *buf, uint32_t len)
{

	NFC_READ_BUF(device_get_parent(dev), buf, len);
}
/* must be called with ATA channel locked and state_mtx held */
int
ata_begin_transaction(struct ata_request *request)
{
    struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
    struct ata_device *atadev = device_get_softc(request->dev);
    int dummy, error;

    ATA_DEBUG_RQ(request, "begin transaction");

    /* disable ATAPI DMA writes if HW doesn't support it */
    if ((ch->flags & ATA_ATAPI_DMA_RO) &&
	((request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)) ==
	 (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)))
	request->flags &= ~ATA_R_DMA;

    /* check for 48 bit access and convert if needed */
    ata_modify_if_48bit(request);

    switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA)) {

    /* ATA PIO data transfer and control commands */
    default:
	{
	/* record command direction here as our request might be gone later */
	int write = (request->flags & ATA_R_WRITE);

	    /* issue command */
	    if (ch->hw.command(request)) {
		device_printf(request->dev, "error issuing %s command\n",
			   ata_cmd2str(request));
		request->result = EIO;
		goto begin_finished;
	    }

	    /* device reset doesn't interrupt */
	    if (request->u.ata.command == ATA_DEVICE_RESET) {

		int timeout = 1000000;
		do {
		    DELAY(10);
		    request->status = ATA_IDX_INB(ch, ATA_STATUS);
		} while (request->status & ATA_S_BUSY && timeout--);
		if (request->status & ATA_S_ERROR)
		    request->error = ATA_IDX_INB(ch, ATA_ERROR);
		goto begin_finished;
	    }

	    /* if write command output the data */
	    if (write) {
		if (ata_wait(ch, atadev, (ATA_S_READY | ATA_S_DRQ)) < 0) {
		    device_printf(request->dev,
				  "timeout waiting for write DRQ\n");
		    request->result = EIO;
		    goto begin_finished;
		}
		ata_pio_write(request, request->transfersize);
	    }
	}
	goto begin_continue;

    /* ATA DMA data transfer commands */
    case ATA_R_DMA:
	/* check sanity, setup SG list and DMA engine */
	if ((error = ch->dma->load(ch->dev, request->data, request->bytecount,
				   request->flags & ATA_R_READ, ch->dma->sg, 
				   &dummy))) {
	    device_printf(request->dev, "setting up DMA failed\n");
	    request->result = error;
	    goto begin_finished;
	}

	/* issue command */
	if (ch->hw.command(request)) {
	    device_printf(request->dev, "error issuing %s command\n",
		       ata_cmd2str(request));
	    request->result = EIO;
	    goto begin_finished;
	}

	/* start DMA engine */
	if (ch->dma->start && ch->dma->start(request->dev)) {
	    device_printf(request->dev, "error starting DMA\n");
	    request->result = EIO;
	    goto begin_finished;
	}
	goto begin_continue;

    /* ATAPI PIO commands */
    case ATA_R_ATAPI:
	/* is this just a POLL DSC command ? */
	if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
	    ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit);
	    DELAY(10);
	    if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC))
		request->result = EBUSY;
	    goto begin_finished;
	}

	/* start ATAPI operation */
	if (ch->hw.command(request)) {
	    device_printf(request->dev, "error issuing ATA PACKET command\n");
	    request->result = EIO;
	    goto begin_finished;
	}
	goto begin_continue;

   /* ATAPI DMA commands */
    case ATA_R_ATAPI|ATA_R_DMA:
	/* is this just a POLL DSC command ? */
	if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) {
	    ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | atadev->unit);
	    DELAY(10);
	    if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC))
		request->result = EBUSY;
	    goto begin_finished;
	}

	/* check sanity, setup SG list and DMA engine */
	if ((error = ch->dma->load(ch->dev, request->data, request->bytecount,
				   request->flags & ATA_R_READ, ch->dma->sg,
				   &dummy))) {
	    device_printf(request->dev, "setting up DMA failed\n");
	    request->result = error;
	    goto begin_finished;
	}

	/* start ATAPI operation */
	if (ch->hw.command(request)) {
	    device_printf(request->dev, "error issuing ATA PACKET command\n");
	    request->result = EIO;
	    goto begin_finished;
	}

	/* start DMA engine */
	if (ch->dma->start && ch->dma->start(request->dev)) {
	    request->result = EIO;
	    goto begin_finished;
	}
	goto begin_continue;
    }
    /* NOT REACHED */
    printf("ata_begin_transaction OOPS!!!\n");

begin_finished:
    if (ch->dma && ch->dma->flags & ATA_DMA_LOADED)
	ch->dma->unload(ch->dev);
    return ATA_OP_FINISHED;

begin_continue:
    callout_reset(&request->callout, request->timeout * hz,
		  (timeout_t*)ata_timeout, request);
    return ATA_OP_CONTINUES;
}
Пример #17
0
void
nandbus_write_buffer(device_t dev, void *buf, uint32_t len)
{

	NFC_WRITE_BUF(device_get_parent(dev), buf, len);
}
Пример #18
0
/*
 * ppb_MS_microseq()
 *
 * Interprete a microsequence. Some microinstructions are executed at adapter
 * level to avoid function call overhead between ppbus and the adapter
 */
int
ppb_MS_microseq(device_t bus, device_t dev, struct ppb_microseq *msq, int *ret)
{
    struct ppb_data *ppb = (struct ppb_data *)device_get_softc(bus);
    struct ppb_device *ppbdev = (struct ppb_device *)device_get_ivars(dev);

    struct ppb_microseq *mi;		/* current microinstruction */
    int error;

    struct ppb_xfer *xfer;

    /* microsequence executed to initialize the transfer */
    struct ppb_microseq initxfer[] = {
        MS_PTR(MS_UNKNOWN), 	/* set ptr to buffer */
        MS_SET(MS_UNKNOWN),	/* set transfer size */
        MS_RET(0)
    };

    if (ppb->ppb_owner != dev)
        return (EACCES);

#define INCR_PC (mi ++)

    mi = msq;
    for (;;) {
        switch (mi->opcode) {
        case MS_OP_PUT:
        case MS_OP_GET:

            /* attempt to choose the best mode for the device */
            xfer = mode2xfer(bus, ppbdev, mi->opcode);

            /* figure out if we should use ieee1284 code */
            if (!xfer->loop) {
                if (mi->opcode == MS_OP_PUT) {
                    if ((error = PPBUS_WRITE(
                                     device_get_parent(bus),
                                     (char *)mi->arg[0].p,
                                     mi->arg[1].i, 0)))
                        goto error;

                    INCR_PC;
                    goto next;
                } else
                    panic("%s: IEEE1284 read not supported", __func__);
            }

            /* XXX should use ppb_MS_init_msq() */
            initxfer[0].arg[0].p = mi->arg[0].p;
            initxfer[1].arg[0].i = mi->arg[1].i;

            /* initialize transfer */
            ppb_MS_microseq(bus, dev, initxfer, &error);

            if (error)
                goto error;

            /* the xfer microsequence should not contain any
             * MS_OP_PUT or MS_OP_GET!
             */
            ppb_MS_microseq(bus, dev, xfer->loop, &error);

            if (error)
                goto error;

            INCR_PC;
            break;

        case MS_OP_RET:
            if (ret)
                *ret = mi->arg[0].i;	/* return code */
            return (0);
            break;

        default:
            /* executing microinstructions at ppc level is
             * faster. This is the default if the microinstr
             * is unknown here
             */
            if ((error = PPBUS_EXEC_MICROSEQ(
                             device_get_parent(bus), &mi)))
                goto error;
            break;
        }
next:
        continue;
    }
error:
    return (error);
}
Пример #19
0
static int
iicbb_callback(device_t dev, int index, caddr_t data)
{
	return (IICBB_CALLBACK(device_get_parent(dev), index, data));
}
Пример #20
0
static int
vga_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{

	return (BUS_READ_IVAR(device_get_parent(dev), dev, which, result));
}
Пример #21
0
int
ata_device_ioctl(device_t dev, u_long cmd, caddr_t data)
{
    struct ata_device *atadev = device_get_softc(dev);
    struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data;
    struct ata_params *params = (struct ata_params *)data;
    int *mode = (int *)data;
    struct ata_request *request;
    caddr_t buf;
    int error;

    switch (cmd) {
    case IOCATAREQUEST:
	if (!(buf = kmalloc(ioc_request->count, M_ATA, M_WAITOK | M_NULLOK))) {
	    return ENOMEM;
	}
	if (!(request = ata_alloc_request())) {
	    kfree(buf, M_ATA);
	    return  ENOMEM;
	}
	if (ioc_request->flags & ATA_CMD_WRITE) {
	    error = copyin(ioc_request->data, buf, ioc_request->count);
	    if (error) {
		kfree(buf, M_ATA);
		ata_free_request(request);
		return error;
	    }
	}
	request->dev = dev;
	if (ioc_request->flags & ATA_CMD_ATAPI) {
	    request->flags = ATA_R_ATAPI;
	    bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16);
	}
	else {
	    request->u.ata.command = ioc_request->u.ata.command;
	    request->u.ata.feature = ioc_request->u.ata.feature;
	    request->u.ata.lba = ioc_request->u.ata.lba;
	    request->u.ata.count = ioc_request->u.ata.count;
	}
	request->timeout = ioc_request->timeout;
	request->data = buf;
	request->bytecount = ioc_request->count;
	request->transfersize = request->bytecount;
	if (ioc_request->flags & ATA_CMD_CONTROL)
	    request->flags |= ATA_R_CONTROL;
	if (ioc_request->flags & ATA_CMD_READ)
	    request->flags |= ATA_R_READ;
	if (ioc_request->flags & ATA_CMD_WRITE)
	    request->flags |= ATA_R_WRITE;
	ata_queue_request(request);
	if (request->flags & ATA_R_ATAPI) {
	    bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense,
		  sizeof(struct atapi_sense));
	}
	else {
	    ioc_request->u.ata.command = request->u.ata.command;
	    ioc_request->u.ata.feature = request->u.ata.feature;
	    ioc_request->u.ata.lba = request->u.ata.lba;
	    ioc_request->u.ata.count = request->u.ata.count;
	}
	ioc_request->error = request->result;
	if (ioc_request->flags & ATA_CMD_READ)
	    error = copyout(buf, ioc_request->data, ioc_request->count);
	else
	    error = 0;
	kfree(buf, M_ATA);
	ata_free_request(request);
	return error;
   
    case IOCATAGPARM:
	ata_getparam(atadev, 0);
	bcopy(&atadev->param, params, sizeof(struct ata_params));
	return 0;
	
    case IOCATASMODE:
	atadev->mode = *mode;
	ATA_SETMODE(device_get_parent(dev), dev);
	return 0;

    case IOCATAGMODE:
	*mode = atadev->mode;
	return 0;
    default:
	return ENOTTY;
    }
}
Пример #22
0
static int
vga_pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
    void *cookie)
{
	return (BUS_TEARDOWN_INTR(device_get_parent(dev), dev, irq, cookie));
}
Пример #23
0
void
ata_modify_if_48bit(struct ata_request *request)
{
    struct ata_channel *ch = device_get_softc(device_get_parent(request->dev));
    struct ata_device *atadev = device_get_softc(request->dev);

    atadev->flags &= ~ATA_D_48BIT_ACTIVE;

    if ((request->u.ata.lba + request->u.ata.count >= ATA_MAX_28BIT_LBA ||
	 request->u.ata.count > 256) &&
	atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) {

	/* translate command into 48bit version */
	switch (request->u.ata.command) {
	case ATA_READ:
	    request->u.ata.command = ATA_READ48;
	    break;
	case ATA_READ_MUL:
	    request->u.ata.command = ATA_READ_MUL48;
	    break;
	case ATA_READ_DMA:
	    if (ch->flags & ATA_NO_48BIT_DMA) {
		if (request->transfersize > DEV_BSIZE)
		    request->u.ata.command = ATA_READ_MUL48;
		else
		    request->u.ata.command = ATA_READ48;
		request->flags &= ~ATA_R_DMA;
	    }
	    else
		request->u.ata.command = ATA_READ_DMA48;
	    break;
	case ATA_READ_DMA_QUEUED:
	    if (ch->flags & ATA_NO_48BIT_DMA) {
		if (request->transfersize > DEV_BSIZE)
		    request->u.ata.command = ATA_READ_MUL48;
		else
		    request->u.ata.command = ATA_READ48;
		request->flags &= ~ATA_R_DMA;
	    }
	    else
		request->u.ata.command = ATA_READ_DMA_QUEUED48;
	    break;
	case ATA_WRITE:
	    request->u.ata.command = ATA_WRITE48;
	    break;
	case ATA_WRITE_MUL:
	    request->u.ata.command = ATA_WRITE_MUL48;
	    break;
	case ATA_WRITE_DMA:
	    if (ch->flags & ATA_NO_48BIT_DMA) {
		if (request->transfersize > DEV_BSIZE)
		    request->u.ata.command = ATA_WRITE_MUL48;
		else
		    request->u.ata.command = ATA_WRITE48;
		request->flags &= ~ATA_R_DMA;
	    }
	    else
		request->u.ata.command = ATA_WRITE_DMA48;
	    break;
	case ATA_WRITE_DMA_QUEUED:
	    if (ch->flags & ATA_NO_48BIT_DMA) {
		if (request->transfersize > DEV_BSIZE)
		    request->u.ata.command = ATA_WRITE_MUL48;
		else
		    request->u.ata.command = ATA_WRITE48;
		request->u.ata.command = ATA_WRITE48;
		request->flags &= ~ATA_R_DMA;
	    }
	    else
		request->u.ata.command = ATA_WRITE_DMA_QUEUED48;
	    break;
	case ATA_FLUSHCACHE:
	    request->u.ata.command = ATA_FLUSHCACHE48;
	    break;
	case ATA_READ_NATIVE_MAX_ADDDRESS:
	    request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48;
	    break;
	case ATA_SET_MAX_ADDRESS:
	    request->u.ata.command = ATA_SET_MAX_ADDRESS48;
	    break;
	default:
	    return;
	}
	atadev->flags |= ATA_D_48BIT_ACTIVE;
    }
}
Пример #24
0
static void
vpo_action(struct cam_sim *sim, union ccb *ccb)
{
	struct vpo_data *vpo = (struct vpo_data *)sim->softc;
#ifdef INVARIANTS
	device_t ppbus = device_get_parent(vpo->vpo_dev);

	ppb_assert_locked(ppbus);
#endif
	switch (ccb->ccb_h.func_code) {
	case XPT_SCSI_IO:
	{
		struct ccb_scsiio *csio;

		csio = &ccb->csio;

#ifdef VP0_DEBUG
		device_printf(vpo->vpo_dev, "XPT_SCSI_IO (0x%x) request\n",
			csio->cdb_io.cdb_bytes[0]);
#endif

		vpo_intr(vpo, csio);

		xpt_done(ccb);

		break;
	}
	case XPT_CALC_GEOMETRY:
	{
		struct	  ccb_calc_geometry *ccg;

		ccg = &ccb->ccg;

#ifdef VP0_DEBUG
		device_printf(vpo->vpo_dev, "XPT_CALC_GEOMETRY (bs=%d,vs=%jd,c=%d,h=%d,spt=%d) request\n",
			ccg->block_size,
			(intmax_t)ccg->volume_size,
			ccg->cylinders,
			ccg->heads,
			ccg->secs_per_track);
#endif

		ccg->heads = 64;
		ccg->secs_per_track = 32;
		ccg->cylinders = ccg->volume_size /
				 (ccg->heads * ccg->secs_per_track);

		ccb->ccb_h.status = CAM_REQ_CMP;
		xpt_done(ccb);
		break;
	}
	case XPT_RESET_BUS:		/* Reset the specified SCSI bus */
	{

#ifdef VP0_DEBUG
		device_printf(vpo->vpo_dev, "XPT_RESET_BUS request\n");
#endif

		if (vpo->vpo_isplus) {
			if (imm_reset_bus(&vpo->vpo_io)) {
				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
				xpt_done(ccb);
				return;
			}
		} else {
			if (vpoio_reset_bus(&vpo->vpo_io)) {
				ccb->ccb_h.status = CAM_REQ_CMP_ERR;
				xpt_done(ccb);
				return;
			}
		}

		ccb->ccb_h.status = CAM_REQ_CMP;
		xpt_done(ccb);
		break;
	}
	case XPT_PATH_INQ:		/* Path routing inquiry */
	{
		struct ccb_pathinq *cpi = &ccb->cpi;

#ifdef VP0_DEBUG
		device_printf(vpo->vpo_dev, "XPT_PATH_INQ request\n");
#endif
		cpi->version_num = 1; /* XXX??? */
		cpi->hba_inquiry = 0;
		cpi->target_sprt = 0;
		cpi->hba_misc = 0;
		cpi->hba_eng_cnt = 0;
		cpi->max_target = 7;
		cpi->max_lun = 0;
		cpi->initiator_id = VP0_INITIATOR;
		cpi->bus_id = sim->bus_id;
		cpi->base_transfer_speed = 93;
		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
		strncpy(cpi->hba_vid, "Iomega", HBA_IDLEN);
		strncpy(cpi->dev_name, sim->sim_name, DEV_IDLEN);
		cpi->unit_number = sim->unit_number;
		cpi->transport = XPORT_PPB;
		cpi->transport_version = 0;

		cpi->ccb_h.status = CAM_REQ_CMP;
		xpt_done(ccb);
		break;
	}
	default:
		ccb->ccb_h.status = CAM_REQ_INVALID;
		xpt_done(ccb);
		break;
	}

	return;
}
Пример #25
0
static int
pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg)
{
	device_t bus, dev;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	nvlist_t *config;
	int i, error;
	uint16_t rid_off, rid_stride;
	uint16_t first_rid, last_rid;
	uint16_t iov_ctl;
	uint16_t num_vfs, total_vfs;
	int iov_inited;

	mtx_lock(&Giant);
	dinfo = cdev->si_drv1;
	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	iov_inited = 0;
	config = NULL;

	if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) {
		mtx_unlock(&Giant);
		return (EBUSY);
	}
	iov->iov_flags |= IOV_BUSY;

	error = pci_iov_parse_config(iov, arg, &config);
	if (error != 0)
		goto out;

	num_vfs = pci_iov_config_get_num_vfs(config);
	total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2);
	if (num_vfs > total_vfs) {
		error = EINVAL;
		goto out;
	}

	error = pci_iov_config_page_size(dinfo);
	if (error != 0)
		goto out;

	error = pci_iov_set_ari(bus);
	if (error != 0)
		goto out;

	error = pci_iov_init(dev, num_vfs, config);
	if (error != 0)
		goto out;
	iov_inited = 1;

	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2);

	rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2);
	rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2);

	first_rid = pci_get_rid(dev) + rid_off;
	last_rid = first_rid + (num_vfs - 1) * rid_stride;

	/* We don't yet support allocating extra bus numbers for VFs. */
	if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) {
		error = ENOSPC;
		goto out;
	}

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);

	error = pci_iov_init_rman(dev, iov);
	if (error != 0)
		goto out;

	iov->iov_num_vfs = num_vfs;

	error = pci_iov_setup_bars(dinfo);
	if (error != 0)
		goto out;

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE;
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);

	/* Per specification, we must wait 100ms before accessing VFs. */
	pause("iov", roundup(hz, 10));
	pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride);

	nvlist_destroy(config);
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);

	return (0);
out:
	if (iov_inited)
		PCI_IOV_UNINIT(dev);

	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
		if (iov->iov_bar[i].res != NULL) {
			pci_release_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i),
			    iov->iov_bar[i].res);
			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i));
			iov->iov_bar[i].res = NULL;
		}
	}

	if (iov->iov_flags & IOV_RMAN_INITED) {
		rman_fini(&iov->rman);
		iov->iov_flags &= ~IOV_RMAN_INITED;
	}

	nvlist_destroy(config);
	iov->iov_num_vfs = 0;
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);
	return (error);
}
Пример #26
0
static int
nandbus_attach(device_t dev)
{
	device_t child, nfc;
	struct nand_id chip_id;
	struct nandbus_softc *sc;
	struct nandbus_ivar *ivar;
	struct nand_softc *nfc_sc;
	struct nand_params *chip_params;
	uint8_t cs, onfi;

	sc = device_get_softc(dev);
	sc->dev = dev;

	nfc = device_get_parent(dev);
	nfc_sc = device_get_softc(nfc);

	mtx_init(&sc->nandbus_mtx, "nandbus lock", NULL, MTX_DEF);
	cv_init(&sc->nandbus_cv, "nandbus cv");

	/* Check each possible CS for existing nand devices */
	for (cs = 0; cs < NAND_NCS; cs++) {
		nand_debug(NDBG_BUS,"probe chip select %x", cs);

		/* Select & reset chip */
		if (nandbus_select_cs(dev, cs))
			break;

		if (nand_reset(dev))
			continue;

		/* Read manufacturer and device id */
		if (nand_readid(dev, &chip_id.man_id, &chip_id.dev_id))
			continue;

		if (chip_id.man_id == 0xff)
			continue;

		/*
		 * First try to get info from the table.  If that fails, see if
		 * the chip can provide ONFI info.  We check the table first to
		 * allow table entries to override info from chips that are
		 * known to provide bad ONFI data.
		 */
		onfi = 0;
		chip_params = nand_get_params(&chip_id);
		if (chip_params == NULL) {
			nand_probe_onfi(dev, &onfi);
		}

		/*
		 * At this point it appears there is a chip at this chipselect,
		 * so if we can't work with it, whine about it.
		 */
		if (chip_params == NULL && onfi == 0) {
			if (bootverbose || (nand_debug_flag & NDBG_BUS))
				printf("Chip params not found, chipsel: %d "
				    "(manuf: 0x%0x, chipid: 0x%0x, onfi: %d)\n",
				    cs, chip_id.man_id, chip_id.dev_id, onfi);
			continue;
		}

		ivar = malloc(sizeof(struct nandbus_ivar),
		    M_NAND, M_WAITOK);

		if (onfi == 1) {
			ivar->cs = cs;
			ivar->cols = 0;
			ivar->rows = 0;
			ivar->params = NULL;
			ivar->man_id = chip_id.man_id;
			ivar->dev_id = chip_id.dev_id;
			ivar->is_onfi = onfi;
			ivar->chip_cdev_name = nfc_sc->chip_cdev_name;

			child = device_add_child(dev, NULL, -1);
			device_set_ivars(child, ivar);
			continue;
		}

		ivar->cs = cs;
		ivar->cols = 1;
		ivar->rows = 2;
		ivar->params = chip_params;
		ivar->man_id = chip_id.man_id;
		ivar->dev_id = chip_id.dev_id;
		ivar->is_onfi = onfi;
		ivar->chip_cdev_name = nfc_sc->chip_cdev_name;

		/*
		 * Check what type of device we have.
		 * devices bigger than 32MiB have on more row (3)
		 */
		if (chip_params->chip_size > 32)
			ivar->rows++;
		/* Large page devices have one more col (2) */
		if (chip_params->chip_size >= 128 &&
		    chip_params->page_size > 512)
			ivar->cols++;

		child = device_add_child(dev, NULL, -1);
		device_set_ivars(child, ivar);
	}

	bus_generic_attach(dev);
	return (0);
}
Пример #27
0
int
pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema,
    nvlist_t *vf_schema)
{
	device_t pcib;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	nvlist_t *schema;
	uint32_t version;
	int error;
	int iov_pos;

	dinfo = device_get_ivars(dev);
	pcib = device_get_parent(bus);
	schema = NULL;
	
	error = pci_find_extcap(dev, PCIZ_SRIOV, &iov_pos);

	if (error != 0)
		return (error);

	version = pci_read_config(dev, iov_pos, 4); 
	if (PCI_EXTCAP_VER(version) != 1) {
		if (bootverbose)
			device_printf(dev, 
			    "Unsupported version of SR-IOV (%d) detected\n",
			    PCI_EXTCAP_VER(version));

		return (ENXIO);
	}

	iov = malloc(sizeof(*dinfo->cfg.iov), M_SRIOV, M_WAITOK | M_ZERO);

	mtx_lock(&Giant);
	if (dinfo->cfg.iov != NULL) {
		error = EBUSY;
		goto cleanup;
	}
	iov->iov_pos = iov_pos;

	schema = pci_iov_build_schema(&pf_schema, &vf_schema);
	if (schema == NULL) {
		error = ENOMEM;
		goto cleanup;
	}

	error = pci_iov_validate_schema(schema);
	if (error != 0)
		goto cleanup;
	iov->iov_schema = schema;

	iov->iov_cdev = make_dev(&iov_cdevsw, device_get_unit(dev),
	    UID_ROOT, GID_WHEEL, 0600, "iov/%s", device_get_nameunit(dev));

	if (iov->iov_cdev == NULL) {
		error = ENOMEM;
		goto cleanup;
	}
	
	dinfo->cfg.iov = iov;
	iov->iov_cdev->si_drv1 = dinfo;
	mtx_unlock(&Giant);

	return (0);

cleanup:
	nvlist_destroy(schema);
	nvlist_destroy(pf_schema);
	nvlist_destroy(vf_schema);
	free(iov, M_SRIOV);
	mtx_unlock(&Giant);
	return (error);
}
Пример #28
0
int
nandbus_select_cs(device_t dev, uint8_t cs)
{

	return (NFC_SELECT_CS(device_get_parent(dev), cs));
}
Пример #29
0
static int
sata_channel_begin_transaction(struct ata_request *request)
{
	struct sata_softc *sc;
	struct ata_channel *ch;
	struct sata_crqb *crqb;
	uint32_t req_in;
	int error, slot;

	sc = device_get_softc(device_get_parent(request->parent));
	ch = device_get_softc(request->parent);

	mtx_assert(&ch->state_mtx, MA_OWNED);

	/* Only DMA R/W goes through the EDMA machine. */
	if (request->u.ata.command != ATA_READ_DMA &&
	    request->u.ata.command != ATA_WRITE_DMA &&
	    request->u.ata.command != ATA_READ_DMA48 &&
	    request->u.ata.command != ATA_WRITE_DMA48) {

		/* Disable EDMA before accessing legacy registers */
		if (sata_edma_is_running(request->parent)) {
			error = sata_edma_ctrl(request->parent, 0);
			if (error) {
				request->result = error;
				return (ATA_OP_FINISHED);
			}
		}

		return (ata_begin_transaction(request));
	}

	/* Prepare data for DMA */
	if ((error = ch->dma.load(request, NULL, NULL))) {
		device_printf(request->parent, "setting up DMA failed!\n");
		request->result = error;
		return ATA_OP_FINISHED;
	}

	/* Get next free queue slot */
	req_in = SATA_INL(sc, SATA_EDMA_REQIPR(ch->unit));
	slot = (req_in & sc->sc_edma_reqis_mask) >> SATA_EDMA_REQIS_OFS;
	crqb = (struct sata_crqb *)(ch->dma.work +
	    (slot << SATA_EDMA_REQIS_OFS));

	/* Fill in request */
	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

	crqb->crqb_prdlo = htole32((uint64_t)request->dma->sg_bus & 0xFFFFFFFF);
	crqb->crqb_prdhi = htole32((uint64_t)request->dma->sg_bus >> 32);
	crqb->crqb_flags = htole32((request->flags & ATA_R_READ ? 0x01 : 0x00) |
	    (request->tag << 1));

	crqb->crqb_ata_command = request->u.ata.command;
	crqb->crqb_ata_feature = request->u.ata.feature;
	crqb->crqb_ata_lba_low = request->u.ata.lba;
	crqb->crqb_ata_lba_mid = request->u.ata.lba >> 8;
	crqb->crqb_ata_lba_high = request->u.ata.lba >> 16;
	crqb->crqb_ata_device = ((request->u.ata.lba >> 24) & 0x0F) | (1 << 6);
	crqb->crqb_ata_lba_low_p = request->u.ata.lba >> 24;
	crqb->crqb_ata_lba_mid_p = request->u.ata.lba >> 32;
	crqb->crqb_ata_lba_high_p = request->u.ata.lba >> 40;
	crqb->crqb_ata_feature_p = request->u.ata.feature >> 8;
	crqb->crqb_ata_count = request->u.ata.count;
	crqb->crqb_ata_count_p = request->u.ata.count >> 8;

	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);

	/* Enable EDMA if disabled */
	if (!sata_edma_is_running(request->parent)) {
		error = sata_edma_ctrl(request->parent, 1);
		if (error) {
			ch->dma.unload(request);
			request->result = error;
			return (ATA_OP_FINISHED);
		}
	}

	/* Tell EDMA about new request */
	req_in = (req_in & ~sc->sc_edma_reqis_mask) | (((slot + 1) <<
	    SATA_EDMA_REQIS_OFS) & sc->sc_edma_reqis_mask);

	SATA_OUTL(sc, SATA_EDMA_REQIPR(ch->unit), req_in);

	return (ATA_OP_CONTINUES);
}
Пример #30
0
/*
 * Since we are a child of a PCI bus, its parent must support the pcib interface.
 */
u_int32_t
pcib_read_config(device_t dev, int b, int s, int f, int reg, int width)
{
    return(PCIB_READ_CONFIG(device_get_parent(device_get_parent(dev)), b, s, f, reg, width));
}