static void timbuart_rx_chars(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; while (ioread32(port->membase + TIMBUART_ISR) & RXDP) { u8 ch = ioread8(port->membase + TIMBUART_RXFIFO); port->icount.rx++; tty_insert_flip_char(tty, ch, TTY_NORMAL); } spin_unlock(&port->lock); tty_flip_buffer_push(port->state->port.tty); spin_lock(&port->lock); dev_dbg(port->dev, "%s - total read %d bytes\n", __func__, port->icount.rx); }
static irqreturn_t timbuart_handleinterrupt(int irq, void *devid) { struct timbuart_port *uart = (struct timbuart_port *)devid; if (ioread8(uart->port.membase + TIMBUART_IPR)) { uart->last_ier = ioread32(uart->port.membase + TIMBUART_IER); /* disable interrupts, the tasklet enables them again */ iowrite32(0, uart->port.membase + TIMBUART_IER); /* fire off bottom half */ tasklet_schedule(&uart->tasklet); return IRQ_HANDLED; } else return IRQ_NONE; }
/** * Handle a 16 bit I/O request. * * @dev: Device to access * @buffer: Data buffer * @buflen: Length of the buffer. * @rw: True to write. */ static unsigned int octeon_cf_data_xfer16(struct ata_device *dev, unsigned char *buffer, unsigned int buflen, int rw) { struct ata_port *ap = dev->link->ap; void __iomem *data_addr = ap->ioaddr.data_addr; unsigned long words; int count; words = buflen / 2; if (rw) { count = 16; while (words--) { iowrite16(*(uint16_t *)buffer, data_addr); buffer += sizeof(uint16_t); /* * Every 16 writes do a read so the bootbus * FIFO doesn't fill up. */ if (--count == 0) { ioread8(ap->ioaddr.altstatus_addr); count = 16; } } } else { while (words--) { *(uint16_t *)buffer = ioread16(data_addr); buffer += sizeof(uint16_t); } } /* Transfer trailing 1 byte, if any. */ if (unlikely(buflen & 0x01)) { __le16 align_buf[1] = { 0 }; if (rw == READ) { align_buf[0] = cpu_to_le16(ioread16(data_addr)); memcpy(buffer, align_buf, 1); } else { memcpy(align_buf, buffer, 1); iowrite16(le16_to_cpu(align_buf[0]), data_addr); } words++; } return buflen; }
static int recv_data(struct tpm_chip* tpm, uint8_t* buf, size_t count) { int size = 0; int burstcnt; while( size < count && wait_for_stat(tpm, TPM_STS_DATA_AVAIL | TPM_STS_VALID, tpm->timeout_c, &tpm->read_queue) == 0) { burstcnt = get_burstcount(tpm); for(; burstcnt > 0 && size < count; --burstcnt) { buf[size++] = ioread8(TPM_DATA_FIFO(tpm, tpm->locality)); } } return size; }
static void activate_ch(struct sh_mobile_i2c_data *pd) { unsigned long i2c_clk; u_int32_t num; u_int32_t denom; u_int32_t tmp; /* Make sure the clock is enabled */ clk_enable(pd->clk); /* Get clock rate after clock is enabled */ i2c_clk = clk_get_rate(pd->clk); /* Calculate the value for iccl. From the data sheet: * iccl = (p clock / transfer rate) * (L / (L + H)) * where L and H are the SCL low/high ratio (5/4 in this case). * We also round off the result. */ num = i2c_clk * 5; denom = NORMAL_SPEED * 9; tmp = num * 10 / denom; if (tmp % 10 >= 5) pd->iccl = (u_int8_t)((num/denom) + 1); else pd->iccl = (u_int8_t)(num/denom); /* Calculate the value for icch. From the data sheet: icch = (p clock / transfer rate) * (H / (L + H)) */ num = i2c_clk * 4; tmp = num * 10 / denom; if (tmp % 10 >= 5) pd->icch = (u_int8_t)((num/denom) + 1); else pd->icch = (u_int8_t)(num/denom); /* Enable channel and configure rx ack */ iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); /* Mask all interrupts */ iowrite8(0, ICIC(pd)); /* Set the clock */ iowrite8(pd->iccl, ICCL(pd)); iowrite8(pd->icch, ICCH(pd)); }
static int generic_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_port *ap = link->ap; const struct pci_device_id *id = ap->host->private_data; int dma_enabled = 0; struct ata_device *dev; if (id->driver_data & ATA_GEN_FORCE_DMA) { dma_enabled = 0xff; } else if (ap->ioaddr.bmdma_addr) { /* Bits 5 and 6 indicate if DMA is active on master/slave */ dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); } ata_for_each_dev(dev, link, ENABLED) { /* We don't really care */ dev->pio_mode = XFER_PIO_0; dev->dma_mode = XFER_MW_DMA_0; /* We do need the right mode information for DMA or PIO and this comes from the current configuration flags */ if (dma_enabled & (1 << (5 + dev->devno))) { unsigned int xfer_mask = ata_id_xfermask(dev->id); const char *name; if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) name = ata_mode_string(xfer_mask); else { /* SWDMA perhaps? */ name = "DMA"; xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0); } // ata_dev_printk(dev, KERN_INFO, "configured for %s\n", ; dev->xfer_mode = ata_xfer_mask2mode(xfer_mask); dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode); dev->flags &= ~ATA_DFLAG_PIO; } else { ; dev->xfer_mode = XFER_PIO_0; dev->xfer_shift = ATA_SHIFT_PIO; dev->flags |= ATA_DFLAG_PIO; } }
static void remove(struct pci_dev *pdev) { struct uio_info *info = pci_get_drvdata(pdev); iowrite8(INT_DISABLE, info->priv + INT_MASK_ADDR); iowrite32(INT_DISABLE, info->priv + INT_ENABLE_ADDR); ioread8(info->priv + MAILBOX); uio_unregister_device(info); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); iounmap(info->priv); kfree(info); }
static void mic_handle_config_change(struct mic_device_desc __iomem *d, unsigned int offset, struct mic_driver *mdrv) { struct mic_device_ctrl __iomem *dc = (void __iomem *)d + mic_aligned_desc_size(d); struct mic_vdev *mvdev = (struct mic_vdev *)ioread64(&dc->vdev); struct virtio_driver *drv; if (ioread8(&dc->config_change) != MIC_VIRTIO_PARAM_CONFIG_CHANGED) return; dev_dbg(mdrv->dev, "%s %d\n", __func__, __LINE__); drv = container_of(mvdev->vdev.dev.driver, struct virtio_driver, driver); if (drv->config_changed) drv->config_changed(&mvdev->vdev); iowrite8(1, &dc->guest_ack); }
static int number_of_sja1000_chip(void __iomem *base_addr) { u8 status; int i; for (i = 0; i < MAX_NO_OF_CHANNELS; i++) { /* reset chip */ iowrite8(MOD_RM, base_addr + (i * KVASER_PCI_PORT_BYTES) + REG_MOD); status = ioread8(base_addr + (i * KVASER_PCI_PORT_BYTES) + REG_MOD); /* check reset bit */ if (!(status & MOD_RM)) break; } return i; }
static int get_bcu_config(struct ocd_bcove_config_data *ocd_smip_data) { int i; void __iomem *bcu_smip_sram_addr; u8 *plat_smip_data; if (!ocd_smip_data) return -ENXIO; plat_smip_data = (u8 *)ocd_smip_data; bcu_smip_sram_addr = ioremap_nocache(MRFL_SMIP_SRAM_ADDR + BCU_SMIP_OFFSET, NUM_SMIP_BYTES); for (i = 0; i < NUM_SMIP_BYTES ; i++) *(plat_smip_data + i) = ioread8(bcu_smip_sram_addr + i); return 0; }
static int generic_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_port *ap = link->ap; int dma_enabled = 0; struct ata_device *dev; struct pci_dev *pdev = to_pci_dev(ap->host->dev); if (ap->ioaddr.bmdma_addr) dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); if (pdev->vendor == PCI_VENDOR_ID_CENATEK) dma_enabled = 0xFF; ata_for_each_dev(dev, link, ENABLED) { dev->pio_mode = XFER_PIO_0; dev->dma_mode = XFER_MW_DMA_0; if (dma_enabled & (1 << (5 + dev->devno))) { unsigned int xfer_mask = ata_id_xfermask(dev->id); const char *name; if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) name = ata_mode_string(xfer_mask); else { name = "DMA"; xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0); } ata_dev_printk(dev, KERN_INFO, "configured for %s\n", name); dev->xfer_mode = ata_xfer_mask2mode(xfer_mask); dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode); dev->flags &= ~ATA_DFLAG_PIO; } else { ata_dev_printk(dev, KERN_INFO, "configured for PIO\n"); dev->xfer_mode = XFER_PIO_0; dev->xfer_shift = ATA_SHIFT_PIO; dev->flags |= ATA_DFLAG_PIO; } }
static irqreturn_t vmeio_irq(void *arg) { struct vmeio_device *dev = arg; long data; if (dev->isr_source_address) { unsigned long data_width = dev->maps[0].data_width; if (data_width == 4) data = ioread32be(dev->isr_source_address); else if (data_width == 2) data = ioread16be(dev->isr_source_address); else data = ioread8(dev->isr_source_address); dev->isr_source_mask = data; } dev->icnt++; wake_up(&dev->queue); return IRQ_HANDLED; }
static void del_vq(struct virtio_pci_vq_info *info) { struct virtqueue *vq = info->vq; struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); /* Flush the write out to device */ ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); } /* Select and deactivate the queue */ iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); vring_del_virtqueue(vq); }
/* * Description: * Turn Off MAC Tx * * Parameters: * In: * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ bool MACbSafeTxOff(struct vnt_private *priv) { void __iomem *io_base = priv->PortOffset; unsigned short ww; /* Clear TX DMA */ /* Tx0 */ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_TXDMACTL0); /* AC0 */ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_AC0DMACTL); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread32(io_base + MAC_REG_TXDMACTL0) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x20)\n"); return false; } for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread32(io_base + MAC_REG_AC0DMACTL) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x21)\n"); return false; } /* try to safe shutdown TX */ MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_TXON); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_TXONST)) break; } if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x24)\n"); return false; } return true; }
irqreturn_t timing_interrupt_handler(int irq, void *dev_id) { u8 tmp8; u32 tmp32; tmp8 = ioread8( timing_card[12].base + PLX9080_DMACSR1 ); tmp32 = ioread32(timing_card[12].base + PLX9080_INTCSR ); /* if interrupt occured from DMA and DMA is done (sanity check) */ if ( (tmp8 & (0x1 << 4 )) && (tmp32 & (0x1 << 22)) ) { end_ns = ktime_to_ns(ktime_get()); printk(KERN_DEBUG "%u bytes moved in %lld microseconds\n", (unsigned) dma_size, end_ns - start_ns ); if ( !dma_configured ) configure_for_dma(); /* clear interrupt status */ iowrite8( tmp8 | (0x1 << 3), timing_card[12].base + PLX9080_DMACSR1 ); /* calculate the delay */ dma_delay = ns_clock_period * ALMOST_EMPTY * 1024; /* adjust delay as appropriate */ if ( dma_delay < (end_ns - start_ns) ) dma_delay = 0; else dma_delay -= (end_ns - start_ns); /* begin new transfer or set flag */ if ( output_enabled ) wake_up_process(dma_kthread); else dma_waiting = 1; } return IRQ_HANDLED; }
static int generic_set_mode(struct ata_link *link, struct ata_device **unused) { struct ata_port *ap = link->ap; const struct pci_device_id *id = ap->host->private_data; int dma_enabled = 0; struct ata_device *dev; if (id->driver_data & ATA_GEN_FORCE_DMA) { dma_enabled = 0xff; } else if (ap->ioaddr.bmdma_addr) { dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); } ata_for_each_dev(dev, link, ENABLED) { dev->pio_mode = XFER_PIO_0; dev->dma_mode = XFER_MW_DMA_0; if (dma_enabled & (1 << (5 + dev->devno))) { unsigned int xfer_mask = ata_id_xfermask(dev->id); const char *name; if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) name = ata_mode_string(xfer_mask); else { name = "DMA"; xfer_mask |= ata_xfer_mode2mask(XFER_MW_DMA_0); } ata_dev_info(dev, "configured for %s\n", name); dev->xfer_mode = ata_xfer_mask2mode(xfer_mask); dev->xfer_shift = ata_xfer_mode2shift(dev->xfer_mode); dev->flags &= ~ATA_DFLAG_PIO; } else { ata_dev_info(dev, "configured for PIO\n"); dev->xfer_mode = XFER_PIO_0; dev->xfer_shift = ATA_SHIFT_PIO; dev->flags |= ATA_DFLAG_PIO; } }
int tc6393xb_lcd_set_power(struct platform_device *fb, bool on) { struct platform_device *dev = to_platform_device(fb->dev.parent); struct tc6393xb *tc6393xb = platform_get_drvdata(dev); u8 fer; unsigned long flags; spin_lock_irqsave(&tc6393xb->lock, flags); fer = ioread8(tc6393xb->scr + SCR_FER); if (on) fer |= SCR_FER_SLCDEN; else fer &= ~SCR_FER_SLCDEN; iowrite8(fer, tc6393xb->scr + SCR_FER); spin_unlock_irqrestore(&tc6393xb->lock, flags); return 0; }
static int raw_read(struct vmeio_device *dev, struct vmeio_riob *riob) { struct vme_mapping *mapx = &dev->maps[riob->mapnum-1]; int dwidth = riob->data_width ? riob->data_width : mapx->data_width; int byte_dwidth = dwidth/8; int bsize = riob->wsize * byte_dwidth; int i, j, cc; char *map, *iob; if (bsize > vmeioMAX_BUF) return -E2BIG; iob = kmalloc(bsize, GFP_KERNEL); if (!iob) return -ENOMEM; if ((map = mapx->kernel_va) == NULL) { kfree(iob); return -ENODEV; } if (dev->debug > 1) { printk("RAW:READ:win:%d map:0x%p offs:0x%X amd:0x%2x dwd:%d words:%d\n", riob->mapnum, mapx->kernel_va, riob->offset, mapx->am, dwidth, riob->wsize); } for (i = 0, j = riob->offset; i < bsize; i += byte_dwidth, j += byte_dwidth) { union vmeio_word *dst = (void *)&iob[i]; if (dwidth == VME_D32) dst->width4 = ioread32be(&map[j]); else if (dwidth == VME_D16) dst->width2 = ioread16be(&map[j]); else if (dwidth == VME_D8) dst->width1 = ioread8(&map[j]); else printk(KERN_ERR PFX "invalid data width %d\n", dwidth); } cc = copy_to_user(riob->buffer, iob, bsize); kfree(iob); if (cc) return -EACCES; return 0; }
static int atp867x_set_priv(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); struct atp867x_priv *dp; int port = ap->port_no; dp = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL); if (dp == NULL) return -ENOMEM; dp->dma_mode = ATP867X_IO_DMAMODE(ap, port); dp->mstr_piospd = ATP867X_IO_MSTRPIOSPD(ap, port); dp->slave_piospd = ATP867X_IO_SLAVPIOSPD(ap, port); dp->eightb_piospd = ATP867X_IO_8BPIOSPD(ap, port); dp->pci66mhz = ioread8(ATP867X_SYS_INFO(ap)) & ATP867X_IO_SYS_INFO_66MHZ; return 0; }
int get_smip_plat_config(int offset) { unsigned long sram_addr; if (INTEL_MID_BOARD(1, PHONE, MRFL) || INTEL_MID_BOARD(1, TABLET, MRFL)) { sram_addr = MRFL_SMIP_SRAM_ADDR; } else if (INTEL_MID_BOARD(1, PHONE, MOFD) || INTEL_MID_BOARD(1, TABLET, MOFD)) { sram_addr = MOFD_SMIP_SRAM_ADDR; } else return -EINVAL; if (!is_mapped) { smip = ioremap_nocache(sram_addr + MRFL_PLATFORM_CONFIG_OFFSET, 8); is_mapped = true; } return ioread8(smip + offset); }
static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 dmactl; mb(); iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); dmactl |= ATA_DMA_INTR | ATA_DMA_ERR; if (!rw) dmactl |= ATA_DMA_WR; iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); ap->ops->sff_exec_command(ap, &qc->tf); }
static ssize_t my_read(struct file *f, char __user *buf, size_t len, loff_t *off) { int i; u8 byte; if (*off >= VRAM_SIZE) { return 0; } if (*off + len > VRAM_SIZE) { len = VRAM_SIZE - *off; } for (i = 0; i < len; i++) { byte = ioread8((u8 *)vram + *off + i); if (copy_to_user(buf + i, &byte, 1)) { return -EFAULT; } } *off += len; return len; }
void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier) { if (isr & RXFLAGS) { /* Some RX status is set */ if (isr & RXBF) { u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHRX; iowrite8(ctl, port->membase + TIMBUART_CTRL); port->icount.overrun++; } else if (isr & (RXDP)) timbuart_rx_chars(port); /* ack all RX interrupts */ iowrite32(RXFLAGS, port->membase + TIMBUART_ISR); } /* always have the RX interrupts enabled */ *ier |= RXBAF | RXBF | RXTT; dev_dbg(port->dev, "%s - leaving\n", __func__); }
static int sc6000_write(char __iomem *vport, int cmd) { unsigned char val; int loop = 500000; do { val = ioread8(vport + DSP_STATUS); /* * DSP ready to receive data if bit 7 of val == 0 */ if (!(val & 0x80)) { iowrite8(cmd, vport + DSP_COMMAND); return 0; } cpu_relax(); } while (loop--); snd_printk(KERN_ERR "DSP Command (0x%x) timeout.\n", cmd); return -EIO; }
void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier) { if (isr & RXFLAGS) { if (isr & RXBF) { u8 ctl = ioread8(port->membase + TIMBUART_CTRL) | TIMBUART_CTRL_FLSHRX; iowrite8(ctl, port->membase + TIMBUART_CTRL); port->icount.overrun++; } else if (isr & (RXDP)) timbuart_rx_chars(port); iowrite32(RXFLAGS, port->membase + TIMBUART_ISR); } *ier |= RXBAF | RXBF | RXTT; dev_dbg(port->dev, "%s - leaving\n", __func__); }
/* * Description: * Micro Second Delay via MAC * * Parameters: * In: * io_base - Base Address for MAC * uDelay - Delay time (timer resolution is 4 us) * Out: * none * * Return Value: none * */ void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay) { void __iomem *io_base = priv->PortOffset; unsigned char byValue; unsigned int uu, ii; iowrite8(0, io_base + MAC_REG_TMCTL0); iowrite32(uDelay, io_base + MAC_REG_TMDATA0); iowrite8((TMCTL_TMD | TMCTL_TE), io_base + MAC_REG_TMCTL0); for (ii = 0; ii < 66; ii++) { /* assume max PCI clock is 66Mhz */ for (uu = 0; uu < uDelay; uu++) { byValue = ioread8(io_base + MAC_REG_TMCTL0); if ((byValue == 0) || (byValue & TMCTL_TSUSP)) { iowrite8(0, io_base + MAC_REG_TMCTL0); return; } } } iowrite8(0, io_base + MAC_REG_TMCTL0); }
/* * Description: * Turn Off MAC Rx * * Parameters: * In: * io_base - Base Address for MAC * Out: * none * * Return Value: true if success; otherwise false * */ bool MACbSafeRxOff(struct vnt_private *priv) { void __iomem *io_base = priv->PortOffset; unsigned short ww; /* turn off wow temp for turn off Rx safely */ /* Clear RX DMA0,1 */ iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL0); iowrite32(DMACTL_CLRRUN, io_base + MAC_REG_RXDMACTL1); for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread32(io_base + MAC_REG_RXDMACTL0) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x10)\n"); return false; } for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread32(io_base + MAC_REG_RXDMACTL1) & DMACTL_RUN)) break; } if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x11)\n"); return false; } /* try to safe shutdown RX */ MACvRegBitsOff(io_base, MAC_REG_HOSTCR, HOSTCR_RXON); /* W_MAX_TIMEOUT is the timeout period */ for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { if (!(ioread8(io_base + MAC_REG_HOSTCR) & HOSTCR_RXONST)) break; } if (ww == W_MAX_TIMEOUT) { pr_debug(" DBG_PORT80(0x12)\n"); return false; } return true; }
/* Inform host on a virtio device reset and wait for ack from host */ static void mic_reset_inform_host(struct virtio_device *vdev) { struct mic_vdev *mvdev = to_micvdev(vdev); struct mic_device_ctrl __iomem *dc = mvdev->dc; int retry; iowrite8(0, &dc->host_ack); iowrite8(1, &dc->vdev_reset); mic_send_intr(mvdev->mdev, mvdev->c2h_vdev_db); /* Wait till host completes all card accesses and acks the reset */ for (retry = 100; retry--;) { if (ioread8(&dc->host_ack)) break; msleep(100); }; dev_dbg(mic_dev(mvdev), "%s: retry: %d\n", __func__, retry); /* Reset status to 0 in case we timed out */ iowrite8(0, &mvdev->desc->status); }
static void ns87415_bmdma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 dmactl; /* load PRD table addr. */ mb(); /* make sure PRD table writes are visible to controller */ iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); /* Due to an erratum we need to write these bits to the wrong place - which does save us an I/O bizarrely */ dmactl |= ATA_DMA_INTR | ATA_DMA_ERR; if (!rw) dmactl |= ATA_DMA_WR; iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); }
static void do_read_test(void __iomem *p) { unsigned int i; unsigned errs[3] = { 0 }; pr_info("read test.\n"); mmiotrace_printk("Read test.\n"); for (i = 0; i < 256; i++) if (ioread8(p + i) != i) ++errs[0]; for (i = 1024; i < (5 * 1024); i += 2) if (ioread16(p + i) != v16(i)) ++errs[1]; for (i = (5 * 1024); i < (16 * 1024); i += 4) if (ioread32(p + i) != v32(i)) ++errs[2]; mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n", errs[0], errs[1], errs[2]); }