static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg) { return ioread32(i2c->base + reg); }
static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) { return ioread32(priv->base + reg_offs); }
static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs) { return ioread32(p->base + offs); }
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) { offset += bcma_host_pci_provide_access_to_core(core); return ioread32(core->bus->mmio + offset); }
static int __devinit mrf_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret = -ENODEV; struct mrf_priv *priv; struct uio_info *info; priv = kzalloc(sizeof(struct mrf_priv), GFP_KERNEL); if (!priv) return -ENOMEM; info = &priv->uio; priv->pdev = dev; ret = pci_enable_device(dev); if (ret) { dev_err(&dev->dev, "pci_enable_device failed with %d\n",ret); goto err_free; } if (!dev->irq) { dev_warn(&dev->dev, "Device not configured with IRQ!\n"); ret=-ENODEV; goto err_disable; } if (pci_request_regions(dev, DRV_NAME)) goto err_disable; /* BAR 0 is the PLX bridge */ info->mem[0].addr = pci_resource_start(dev, 0); info->mem[0].size = pci_resource_len(dev,0); info->mem[0].internal_addr =pci_ioremap_bar(dev,0); info->mem[0].memtype = UIO_MEM_PHYS; /* Not used */ info->mem[1].memtype = UIO_MEM_NONE; info->mem[1].size = 1; /* Otherwise UIO will stop searching... */ /* BAR 2 is the EVR */ info->mem[2].addr = pci_resource_start(dev, 2); info->mem[2].size = pci_resource_len(dev,2); info->mem[2].internal_addr =pci_ioremap_bar(dev,2); info->mem[2].memtype = UIO_MEM_PHYS; if (!info->mem[0].internal_addr || !info->mem[0].addr || !info->mem[2].internal_addr || !info->mem[2].addr) { dev_err(&dev->dev, "Failed to map BARS!\n"); ret=-ENODEV; goto err_release; } info->irq = dev->irq; info->irq_flags = IRQF_SHARED; info->handler = mrf_handler; info->irqcontrol = mrf_irqcontrol; info->name = DRV_NAME; info->version = DRV_VERSION; info->priv = dev; pci_set_drvdata(dev, info); ret = uio_register_device(&dev->dev, info); if (ret) goto err_unmap; #if defined(CONFIG_GENERIC_GPIO) || defined(CONFIG_PARPORT_NOT_PC) spin_lock_init(&priv->lock); if (dev->device==PCI_DEVICE_ID_PLX_9030) { u32 val; void __iomem *plx = info->mem[0].internal_addr; /* GPIO Bits 0-3 are used as GPIO for JTAG. * Bits 4-7 must be left to their normal functions. * The device is expected to configure bits 4-7 * itself when initialized, and not change them * afterward. So we just avoid changes. * * Power up value observed in a PMC-EVR-230 * GPIOC = 0x00249924 * is consistent with the default given in the * PLX 9030 data book. */ val = ioread32(plx + GPIOC); /* clear everything for GPIO 0-3 (aka first 12 bits). * Preserve current settings for GPIO 4-7. * This will setup these as inputs (which float high) * * Each GPIO bit has 3 register bits (function, direction, and value) */ val &= 0xfffff000; // Enable output drivers for TCLK, TMS, and TDI val |= GPIOC_pin_dir(0); val |= GPIOC_pin_dir(1); val |= GPIOC_pin_dir(3); dev_info(&dev->dev, "GPIOC %08x\n", val); iowrite32(val, plx + GPIOC); #ifdef CONFIG_GENERIC_GPIO mrf_gpio_setup(priv); #endif #ifdef CONFIG_PARPORT_NOT_PC mrf_pp_setup(priv); #endif } #else if (dev->device==PCI_DEVICE_ID_PLX_9030) dev_info(&dev->dev, "GPIO support not built, JTAG unavailable\n"); #endif /* defined(CONFIG_GENERIC_GPIO) || defined(CONFIG_PARPORT_NOT_PC) */ dev_info(&dev->dev, "MRF Setup complete\n"); return 0; //err_unreg: // uio_unregister_device(info); // pci_set_drvdata(dev, NULL); err_unmap: iounmap(info->mem[0].internal_addr); iounmap(info->mem[2].internal_addr); err_release: pci_release_regions(dev); err_disable: pci_disable_device(dev); err_free: kzfree(priv); return ret; }
static int vnic_dev_discover_res(struct vnic_dev *vdev, struct vnic_dev_bar *bar) { struct vnic_resource_header __iomem *rh; struct vnic_resource __iomem *r; u8 type; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; if (!rh) { printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } if (ioread32(&rh->magic) != VNIC_RES_MAGIC || ioread32(&rh->version) != VNIC_RES_VERSION) { printk(KERN_ERR "vNIC BAR0 res magic/version error " "exp (%lx/%lx) curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, ioread32(&rh->magic), ioread32(&rh->version)); return -EINVAL; } r = (struct vnic_resource __iomem *)(rh + 1); while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { u8 bar_num = ioread8(&r->bar); u32 bar_offset = ioread32(&r->bar_offset); u32 count = ioread32(&r->count); u32 len; r++; if (bar_num != 0) continue; switch (type) { case RES_TYPE_WQ: case RES_TYPE_RQ: case RES_TYPE_CQ: case RES_TYPE_INTR_CTRL: len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar->len) { printk(KERN_ERR "vNIC BAR0 resource %d " "out-of-bounds, offset 0x%x + " "size 0x%x > bar len 0x%lx\n", type, bar_offset, len, bar->len); return -EINVAL; } break; case RES_TYPE_INTR_PBA_LEGACY: case RES_TYPE_DEVCMD: len = count; break; default: continue; } vdev->res[type].count = count; vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; } return 0; }
unsigned int vnic_rq_error_status(struct vnic_rq *rq) { return ioread32(&rq->ctrl->error_status); }
static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset) { struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); return ioread32(ar_ahb->tcsr_mem + offset); }
static void cobalt_dma_stream_queue_handler(struct cobalt_stream *s) { struct cobalt *cobalt = s->cobalt; int rx = s->video_channel; struct m00473_freewheel_regmap __iomem *fw = COBALT_CVI_FREEWHEEL(s->cobalt, rx); struct m00233_video_measure_regmap __iomem *vmr = COBALT_CVI_VMR(s->cobalt, rx); struct m00389_cvi_regmap __iomem *cvi = COBALT_CVI(s->cobalt, rx); struct m00479_clk_loss_detector_regmap __iomem *clkloss = COBALT_CVI_CLK_LOSS(s->cobalt, rx); struct cobalt_buffer *cb; bool skip = false; spin_lock(&s->irqlock); if (list_empty(&s->bufs)) { pr_err("no buffers!\n"); spin_unlock(&s->irqlock); return; } /* Give the fresh filled up buffer to the user. * Note that the interrupt is only sent if the DMA can continue * with a new buffer, so it is always safe to return this buffer * to userspace. */ cb = list_first_entry(&s->bufs, struct cobalt_buffer, list); list_del(&cb->list); spin_unlock(&s->irqlock); if (s->is_audio || s->is_output) goto done; if (s->unstable_frame) { uint32_t stat = ioread32(&vmr->irq_status); iowrite32(stat, &vmr->irq_status); if (!(ioread32(&vmr->status) & M00233_STATUS_BITMAP_INIT_DONE_MSK)) { cobalt_dbg(1, "!init_done\n"); if (s->enable_freewheel) goto restart_fw; goto done; } if (ioread32(&clkloss->status) & M00479_STATUS_BITMAP_CLOCK_MISSING_MSK) { iowrite32(0, &clkloss->ctrl); iowrite32(M00479_CTRL_BITMAP_ENABLE_MSK, &clkloss->ctrl); cobalt_dbg(1, "no clock\n"); if (s->enable_freewheel) goto restart_fw; goto done; } if ((stat & (M00233_IRQ_STATUS_BITMAP_VACTIVE_AREA_MSK | M00233_IRQ_STATUS_BITMAP_HACTIVE_AREA_MSK)) || ioread32(&vmr->vactive_area) != s->timings.bt.height || ioread32(&vmr->hactive_area) != s->timings.bt.width) { cobalt_dbg(1, "unstable\n"); if (s->enable_freewheel) goto restart_fw; goto done; } if (!s->enable_cvi) { s->enable_cvi = true; iowrite32(M00389_CONTROL_BITMAP_ENABLE_MSK, &cvi->control); goto done; } if (!(ioread32(&cvi->status) & M00389_STATUS_BITMAP_LOCK_MSK)) { cobalt_dbg(1, "cvi no lock\n"); if (s->enable_freewheel) goto restart_fw; goto done; } if (!s->enable_freewheel) { cobalt_dbg(1, "stable\n"); s->enable_freewheel = true; iowrite32(0, &fw->ctrl); goto done; } cobalt_dbg(1, "enabled fw\n"); iowrite32(M00233_CONTROL_BITMAP_ENABLE_MEASURE_MSK | M00233_CONTROL_BITMAP_ENABLE_INTERRUPT_MSK, &vmr->control); iowrite32(M00473_CTRL_BITMAP_ENABLE_MSK, &fw->ctrl); s->enable_freewheel = false; s->unstable_frame = false; s->skip_first_frames = 2; skip = true; goto done; } if (ioread32(&fw->status) & M00473_STATUS_BITMAP_FREEWHEEL_MODE_MSK) { restart_fw: cobalt_dbg(1, "lost lock\n"); iowrite32(M00233_CONTROL_BITMAP_ENABLE_MEASURE_MSK, &vmr->control); iowrite32(M00473_CTRL_BITMAP_ENABLE_MSK | M00473_CTRL_BITMAP_FORCE_FREEWHEEL_MODE_MSK, &fw->ctrl); iowrite32(0, &cvi->control); s->unstable_frame = true; s->enable_freewheel = false; s->enable_cvi = false; } done: if (s->skip_first_frames) { skip = true; s->skip_first_frames--; } v4l2_get_timestamp(&cb->vb.v4l2_buf.timestamp); /* TODO: the sequence number should be read from the FPGA so we also know about dropped frames. */ cb->vb.v4l2_buf.sequence = s->sequence++; vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ? VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE); }
unsigned int vnic_wq_error_status(struct vnic_wq *wq) { return ioread32(&wq->ctrl->error_status); }
static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs) { return ioread32(gpio->base + offs); }
static u32 uartlite_inle32(void __iomem *addr) { return ioread32(addr); }
void pnic_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct tulip_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->base_addr; int next_tick = 60*HZ; if(!ioread32(ioaddr + CSR7)) { goto too_good_connection; } if (tulip_media_cap[dev->if_port] & MediaIsMII) { spin_lock_irq(&tp->lock); if (tulip_check_duplex(dev) > 0) next_tick = 3*HZ; spin_unlock_irq(&tp->lock); } else { int csr12 = ioread32(ioaddr + CSR12); int new_csr6 = tp->csr6 & ~0x40C40200; int phy_reg = ioread32(ioaddr + 0xB8); int csr5 = ioread32(ioaddr + CSR5); if (tulip_debug > 1) printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s " "CSR5 %8.8x.\n", dev->name, phy_reg, medianame[dev->if_port], csr5); if (phy_reg & 0x04000000) { iowrite32(0x0201F078, ioaddr + 0xB8); next_tick = 1*HZ; tp->nwayset = 0; } else if (phy_reg & 0x78000000) { pnic_do_nway(dev); next_tick = 60*HZ; } else if (csr5 & TPLnkFail) { if (tulip_debug > 1) printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, " "CSR5 %8.8x, PHY %3.3x.\n", dev->name, medianame[dev->if_port], csr12, ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8)); next_tick = 3*HZ; if (tp->medialock) { } else if (tp->nwayset && (dev->if_port & 1)) { next_tick = 1*HZ; } else if (dev->if_port == 0) { dev->if_port = 3; iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; iowrite32(0x1F868, ioaddr + 0xB8); } else { dev->if_port = 0; iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; iowrite32(0x1F078, ioaddr + 0xB8); } if (tp->csr6 != new_csr6) { tp->csr6 = new_csr6; tulip_restart_rxtx(tp); dev->trans_start = jiffies; if (tulip_debug > 1) printk(KERN_INFO "%s: Changing PNIC configuration to %s " "%s-duplex, CSR6 %8.8x.\n", dev->name, medianame[dev->if_port], tp->full_duplex ? "full" : "half", new_csr6); } } } too_good_connection: mod_timer(&tp->timer, RUN_AT(next_tick)); if(!ioread32(ioaddr + CSR7)) { if (tulip_debug > 1) printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name); disable_irq(dev->irq); tulip_refill_rx(dev); enable_irq(dev->irq); iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); } }
u32 dma_rd_reg(u32 offset) { // ToDo: Handle endianness if required. Intel/Linux = Little Endian return ioread32(dsi_aes_dma.map + offset); }
static inline u32 rt_wdt_r32(unsigned reg) { return ioread32(mt7621_wdt_base + reg); }
/* * Probe PLX90xx based device for the SJA1000 chips and register each * available CAN channel to SJA1000 Socket-CAN subsystem. */ static int __devinit plx_pci_add_card(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct net_device *dev; struct plx_pci_card *card; struct plx_pci_card_info *ci; int err, i; u32 val; void __iomem *addr; ci = (struct plx_pci_card_info *)ent->driver_data; if (pci_enable_device(pdev) < 0) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); return -ENODEV; } dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n", ci->name, PCI_SLOT(pdev->devfn)); /* Allocate card structures to hold addresses, ... */ card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) { dev_err(&pdev->dev, "Unable to allocate memory\n"); pci_disable_device(pdev); return -ENOMEM; } pci_set_drvdata(pdev, card); card->channels = 0; /* Remap PLX90xx configuration space */ addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size); if (!addr) { err = -ENOMEM; dev_err(&pdev->dev, "Failed to remap configuration space " "(BAR%d)\n", ci->conf_map.bar); goto failure_cleanup; } card->conf_addr = addr + ci->conf_map.offset; ci->reset_func(pdev); card->reset_func = ci->reset_func; /* Detect available channels */ for (i = 0; i < ci->channel_count; i++) { struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i]; dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; /* * Remap IO space of the SJA1000 chips * This is device-dependent mapping */ addr = pci_iomap(pdev, cm->bar, cm->size); if (!addr) { err = -ENOMEM; dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar); goto failure_cleanup; } priv->reg_base = addr + cm->offset; priv->read_reg = plx_pci_read_reg; priv->write_reg = plx_pci_write_reg; /* Check if channel is present */ if (plx_pci_check_sja1000(priv)) { priv->can.clock.freq = ci->can_clock; priv->ocr = ci->ocr; priv->cdr = ci->cdr; SET_NETDEV_DEV(dev, &pdev->dev); /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "Registering device failed " "(err=%d)\n", err); goto failure_cleanup; } card->channels++; dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d " "registered as %s\n", i + 1, priv->reg_base, dev->irq, dev->name); } else { dev_err(&pdev->dev, "Channel #%d not detected\n", i + 1); free_sja1000dev(dev); card->net_dev[i] = NULL; } } if (!card->channels) { err = -ENODEV; goto failure_cleanup; } /* * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, * Local_2 interrupts from the SJA1000 chips */ if (pdev->device != PCI_DEVICE_ID_PLX_9056) { val = ioread32(card->conf_addr + PLX_INTCSR); if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH) val |= PLX_LINT1_EN | PLX_PCI_INT_EN; else val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; iowrite32(val, card->conf_addr + PLX_INTCSR); } else { iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN, card->conf_addr + PLX9056_INTCSR); } return 0; failure_cleanup: dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); plx_pci_del_card(pdev); return err; }
static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv, struct resource *io_res, u64 start, u32 size) { struct resource new_res = { .start = start, .end = start + size - 1, .flags = IORESOURCE_MEM, }; /* Detect a 64 bit address on a 32 bit system */ if (start != new_res.start) return (void __iomem *) ERR_PTR(-EINVAL); if (!resource_contains(io_res, &new_res)) return devm_ioremap_resource(dev, &new_res); return priv->iobase + (new_res.start - io_res->start); } /* * Work around broken BIOSs that return inconsistent values from the ACPI * region vs the registers. Trust the ACPI region. Such broken systems * probably cannot send large TPM commands since the buffer will be truncated. */ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res, u64 start, u64 size) { if (io_res->start > start || io_res->end < start) return size; if (start + size - 1 <= io_res->end) return size; dev_err(dev, FW_BUG "ACPI region does not cover the entire command/response buffer. %pr vs %llx %llx\n", io_res, start, size); return io_res->end - start + 1; } static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, struct acpi_table_tpm2 *buf) { struct list_head resources; struct resource io_res; struct device *dev = &device->dev; u32 pa_high, pa_low; u64 cmd_pa; u32 cmd_size; __le64 __rsp_pa; u64 rsp_pa; u32 rsp_size; int ret; INIT_LIST_HEAD(&resources); ret = acpi_dev_get_resources(device, &resources, crb_check_resource, &io_res); if (ret < 0) return ret; acpi_dev_free_resource_list(&resources); if (resource_type(&io_res) != IORESOURCE_MEM) { dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n"); return -EINVAL; } priv->iobase = devm_ioremap_resource(dev, &io_res); if (IS_ERR(priv->iobase)) return PTR_ERR(priv->iobase); /* The ACPI IO region starts at the head area and continues to include * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { if (buf->control_address == io_res.start + sizeof(*priv->regs_h)) priv->regs_h = priv->iobase; else dev_warn(dev, FW_BUG "Bad ACPI memory layout"); } ret = __crb_request_locality(dev, priv, 0); if (ret) return ret; priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, sizeof(struct crb_regs_tail)); if (IS_ERR(priv->regs_t)) return PTR_ERR(priv->regs_t); /* * PTT HW bug w/a: wake up the device to access * possibly not retained registers. */ ret = crb_cmd_ready(dev, priv); if (ret) return ret; pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high); pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low); cmd_pa = ((u64)pa_high << 32) | pa_low; cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa, ioread32(&priv->regs_t->ctrl_cmd_size)); dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n", pa_high, pa_low, cmd_size); priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size); if (IS_ERR(priv->cmd)) { ret = PTR_ERR(priv->cmd); goto out; } memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8); rsp_pa = le64_to_cpu(__rsp_pa); rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa, ioread32(&priv->regs_t->ctrl_rsp_size)); if (cmd_pa != rsp_pa) { priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size); ret = PTR_ERR_OR_ZERO(priv->rsp); goto out; } /* According to the PTP specification, overlapping command and response * buffer sizes must be identical. */ if (cmd_size != rsp_size) { dev_err(dev, FW_BUG "overlapping command and response buffer sizes are not identical"); ret = -EINVAL; goto out; } priv->rsp = priv->cmd; out: if (!ret) priv->cmd_size = cmd_size; crb_go_idle(dev, priv); __crb_relinquish_locality(dev, priv, 0); return ret; } static int crb_acpi_add(struct acpi_device *device) { struct acpi_table_tpm2 *buf; struct crb_priv *priv; struct tpm_chip *chip; struct device *dev = &device->dev; struct tpm2_crb_smc *crb_smc; acpi_status status; u32 sm; int rc; status = acpi_get_table(ACPI_SIG_TPM2, 1, (struct acpi_table_header **) &buf); if (ACPI_FAILURE(status) || buf->header.length < sizeof(*buf)) { dev_err(dev, FW_BUG "failed to get TPM2 ACPI table\n"); return -EINVAL; } /* Should the FIFO driver handle this? */ sm = buf->start_method; if (sm == ACPI_TPM2_MEMORY_MAPPED) return -ENODEV; priv = devm_kzalloc(dev, sizeof(struct crb_priv), GFP_KERNEL); if (!priv) return -ENOMEM; if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { dev_err(dev, FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n", buf->header.length, ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC); return -EINVAL; } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; } priv->sm = sm; priv->hid = acpi_device_hid(device); rc = crb_map_io(device, priv, buf); if (rc) return rc; chip = tpmm_chip_alloc(dev, &tpm_crb); if (IS_ERR(chip)) return PTR_ERR(chip); dev_set_drvdata(&chip->dev, priv); chip->acpi_dev_handle = device->handle; chip->flags = TPM_CHIP_FLAG_TPM2; rc = __crb_request_locality(dev, priv, 0); if (rc) return rc; rc = crb_cmd_ready(dev, priv); if (rc) goto out; pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); rc = tpm_chip_register(chip); if (rc) { crb_go_idle(dev, priv); pm_runtime_put_noidle(dev); pm_runtime_disable(dev); goto out; } pm_runtime_put_sync(dev); out: __crb_relinquish_locality(dev, priv, 0); return rc; } static int crb_acpi_remove(struct acpi_device *device) { struct device *dev = &device->dev; struct tpm_chip *chip = dev_get_drvdata(dev); tpm_chip_unregister(chip); pm_runtime_disable(dev); return 0; } static int __maybe_unused crb_pm_runtime_suspend(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct crb_priv *priv = dev_get_drvdata(&chip->dev); return crb_go_idle(dev, priv); } static int __maybe_unused crb_pm_runtime_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); struct crb_priv *priv = dev_get_drvdata(&chip->dev); return crb_cmd_ready(dev, priv); } static int __maybe_unused crb_pm_suspend(struct device *dev) { int ret; ret = tpm_pm_suspend(dev); if (ret) return ret; return crb_pm_runtime_suspend(dev); }
static inline int get_device_outbound(struct ilo_hwinfo *hw) { return ioread32(&hw->mmio_vaddr[DB_OUT]); }
static inline u32 rt_wdt_r32(unsigned reg) { return ioread32(rt288x_wdt_base + reg); }
/** * i2c_pnx_master_xmit - transmit data to slave * @adap: pointer to I2C adapter structure * * Sends one byte of data to the slave */ static int i2c_pnx_master_xmit(struct i2c_adapter *adap) { struct i2c_pnx_algo_data *alg_data = adap->algo_data; u32 val; dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (alg_data->mif.len > 0) { /* We still have something to talk about... */ val = *alg_data->mif.buf++; if (alg_data->mif.len == 1) { val |= stop_bit; if (!alg_data->last) val |= start_bit; } alg_data->mif.len--; iowrite32(val, I2C_REG_TX(alg_data)); dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __func__, val, alg_data->mif.len + 1); if (alg_data->mif.len == 0) { if (alg_data->last) { /* Wait until the STOP is seen. */ if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) dev_err(&adap->dev, "The bus is still " "active after timeout\n"); } /* Disable master interrupts */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); del_timer_sync(&alg_data->mif.timer); dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n", __func__); complete(&alg_data->mif.complete); } } else if (alg_data->mif.len == 0) { /* zero-sized transfer */ i2c_pnx_stop(adap); /* Disable master interrupts. */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) & ~(mcntrl_afie | mcntrl_naie | mcntrl_drmie), I2C_REG_CTL(alg_data)); /* Stop timer. */ del_timer_sync(&alg_data->mif.timer); dev_dbg(&adap->dev, "%s(): Waking up xfer routine after " "zero-xfer.\n", __func__); complete(&alg_data->mif.complete); } dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; }
static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset) { if (core->bus->mapped_core != core) bcma_host_pci_switch_core(core); return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); }
/** * i2c_pnx_master_rcv - receive data from slave * @adap: pointer to I2C adapter structure * * Reads one byte data from the slave */ static int i2c_pnx_master_rcv(struct i2c_adapter *adap) { struct i2c_pnx_algo_data *alg_data = adap->algo_data; unsigned int val = 0; u32 ctl = 0; dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); /* Check, whether there is already data, * or we didn't 'ask' for it yet. */ if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { dev_dbg(&adap->dev, "%s(): Write dummy data to fill " "Rx-fifo...\n", __func__); if (alg_data->mif.len == 1) { /* Last byte, do not acknowledge next rcv. */ val |= stop_bit; if (!alg_data->last) val |= start_bit; /* * Enable interrupt RFDAIE (data in Rx fifo), * and disable DRMIE (need data for Tx) */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl |= mcntrl_rffie | mcntrl_daie; ctl &= ~mcntrl_drmie; iowrite32(ctl, I2C_REG_CTL(alg_data)); } /* * Now we'll 'ask' for data: * For each byte we want to receive, we must * write a (dummy) byte to the Tx-FIFO. */ iowrite32(val, I2C_REG_TX(alg_data)); return 0; } /* Handle data. */ if (alg_data->mif.len > 0) { val = ioread32(I2C_REG_RX(alg_data)); *alg_data->mif.buf++ = (u8) (val & 0xff); dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __func__, val, alg_data->mif.len); alg_data->mif.len--; if (alg_data->mif.len == 0) { if (alg_data->last) /* Wait until the STOP is seen. */ if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) dev_err(&adap->dev, "The bus is still " "active after timeout\n"); /* Disable master interrupts */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie | mcntrl_daie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Kill timer. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } } dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n", __func__, ioread32(I2C_REG_STS(alg_data))); return 0; }
u32 inline _mali_osk_mem_ioread32(volatile mali_io_address addr, u32 offset) { return ioread32(((u8 *)addr) + offset); }
static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id) { u32 stat, ctl; struct i2c_adapter *adap = dev_id; struct i2c_pnx_algo_data *alg_data = adap->algo_data; dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data)), alg_data->mif.mode); stat = ioread32(I2C_REG_STS(alg_data)); /* let's see what kind of event this is */ if (stat & mstatus_afi) { /* We lost arbitration in the midst of a transfer */ alg_data->mif.ret = -EIO; /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else if (stat & mstatus_nai) { /* Slave did not acknowledge, generate a STOP */ dev_dbg(&adap->dev, "%s(): " "Slave did not acknowledge, generating a STOP.\n", __func__); i2c_pnx_stop(adap); /* Disable master interrupts. */ ctl = ioread32(I2C_REG_CTL(alg_data)); ctl &= ~(mcntrl_afie | mcntrl_naie | mcntrl_rffie | mcntrl_drmie); iowrite32(ctl, I2C_REG_CTL(alg_data)); /* Our return value. */ alg_data->mif.ret = -EIO; /* Stop timer, to prevent timeout. */ del_timer_sync(&alg_data->mif.timer); complete(&alg_data->mif.complete); } else { /* * Two options: * - Master Tx needs data. * - There is data in the Rx-fifo * The latter is only the case if we have requested for data, * via a dummy write. (See 'i2c_pnx_master_rcv'.) * We therefore check, as a sanity check, whether that interrupt * has been enabled. */ if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) { if (alg_data->mif.mode == I2C_SMBUS_WRITE) { i2c_pnx_master_xmit(adap); } else if (alg_data->mif.mode == I2C_SMBUS_READ) { i2c_pnx_master_rcv(adap); } } } /* Clear TDI and AFI bits */ stat = ioread32(I2C_REG_STS(alg_data)); iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data)); dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n", __func__, ioread32(I2C_REG_STS(alg_data)), ioread32(I2C_REG_CTL(alg_data))); return IRQ_HANDLED; }
static inline u32 gpio_rcar_read(struct gpio_rcar_priv *p, int offs) { return ioread32(p->base + offs); }
/** * i2c_pnx_xfer - generic transfer entry point * @adap: pointer to I2C adapter structure * @msgs: array of messages * @num: number of messages * * Initiates the transfer */ static int i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int rc = 0, completed = 0, i; struct i2c_pnx_algo_data *alg_data = adap->algo_data; u32 stat = ioread32(I2C_REG_STS(alg_data)); dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n", __func__, num, ioread32(I2C_REG_STS(alg_data))); bus_reset_if_active(adap); /* Process transactions in a loop. */ for (i = 0; rc >= 0 && i < num; i++) { u8 addr; pmsg = &msgs[i]; addr = pmsg->addr; if (pmsg->flags & I2C_M_TEN) { dev_err(&adap->dev, "%s: 10 bits addr not supported!\n", adap->name); rc = -EINVAL; break; } alg_data->mif.buf = pmsg->buf; alg_data->mif.len = pmsg->len; alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ? I2C_SMBUS_READ : I2C_SMBUS_WRITE; alg_data->mif.ret = 0; alg_data->last = (i == num - 1); dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __func__, alg_data->mif.mode, alg_data->mif.len); i2c_pnx_arm_timer(adap); /* initialize the completion var */ init_completion(&alg_data->mif.complete); /* Enable master interrupt */ iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_afie | mcntrl_naie | mcntrl_drmie, I2C_REG_CTL(alg_data)); /* Put start-code and slave-address on the bus. */ rc = i2c_pnx_start(addr, adap); if (rc < 0) break; /* Wait for completion */ wait_for_completion(&alg_data->mif.complete); if (!(rc = alg_data->mif.ret)) completed++; dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n", __func__, rc); /* Clear TDI and AFI bits in case they are set. */ if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) { dev_dbg(&adap->dev, "%s: TDI still set... clearing now.\n", adap->name); iowrite32(stat, I2C_REG_STS(alg_data)); } if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) { dev_dbg(&adap->dev, "%s: AFI still set... clearing now.\n", adap->name); iowrite32(stat, I2C_REG_STS(alg_data)); } } bus_reset_if_active(adap); /* Cleanup to be sure... */ alg_data->mif.buf = NULL; alg_data->mif.len = 0; dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n", __func__, ioread32(I2C_REG_STS(alg_data))); if (completed != num) return ((rc < 0) ? rc : -EREMOTEIO); return num; }
static uint32_t wdog_get_remaining(void) { uint32_t ret = ioread32(__io_address(PM_WDOG)); return ret & PM_WDOG_TIME_SET; }
irqreturn_t openair_irq_handler(int irq, void *cookie) { unsigned int irqval; unsigned int irqcmd = EXMIMO_NOP; unsigned long card_id; // = (unsigned long) cookie; unsigned int pcie_control = PCIE_CONTROL2; // check interrupt status register //pci_read_config_word(pdev[0],6 , &irqval); // find card_id. cookie is set by request_irq and static, so we will always find it for (card_id=0; card_id<MAX_CARDS; card_id++) if ( pdev[card_id] == cookie ) break; if (exmimo_pci_kvirt[card_id].exmimo_id_ptr->board_swrev == BOARD_SWREV_LEGACY) pcie_control = PCIE_CONTROL1; else pcie_control = PCIE_CONTROL2; //printk("irq hndl called: card_id=%i, irqval=%i\n", card_id, irqval); // get AHBPCIE interrupt line (bit 7) to determine if IRQ was for us from ExMIMO card, or from a different device // reading CONTROL0 will also clear this bit and the LEON-to-PC IRQ line irqval = ioread32(bar[card_id]+PCIE_CONTROL0); irqcmd = ioread32(bar[card_id]+pcie_control); //printk("IRQ handler: ctrl0: %08x, ctrl1: %08x, ctrl2: %08x, status: %08x\n", irqval, ioread32(bar[card_id]+PCIE_CONTROL1), ioread32(bar[card_id]+PCIE_CONTROL2), ioread32(bar[card_id]+PCIE_STATUS)); if ( (irqval & 0x80) == 0 ) { // CTRL0.bit7 is no set -> IRQ is not from ExMIMO i.e. not for us if (exmimo_pci_kvirt[card_id].exmimo_id_ptr->board_swrev == BOARD_SWREV_CMDREGISTERS) { if (irqcmd != EXMIMO_NOP && irqcmd != EXMIMO_CONTROL2_COOKIE) { if (irqcmd == GET_FRAME_DONE) { get_frame_done = 1; } openair_tasklet.data = card_id; tasklet_schedule(&openair_tasklet); openair_bh_cnt++; return IRQ_HANDLED; } else { return IRQ_NONE; } } else return IRQ_NONE; } else { if (exmimo_pci_kvirt[card_id].exmimo_id_ptr->board_swrev == BOARD_SWREV_LEGACY) { // clear PCIE interrupt (bit 7 of register 0x0) iowrite32(irqval&0xffffff7f,bar[card_id]+PCIE_CONTROL0); } if (irqcmd == GET_FRAME_DONE) { get_frame_done = 1; } openair_tasklet.data = card_id; tasklet_schedule(&openair_tasklet); openair_bh_cnt++; return IRQ_HANDLED; } }
static int wil_debugfs_iomem_x32_get(void *data, u64 *val) { *val = ioread32((void __iomem *)data); return 0; }
static int stx_mpe41_suspend_core(suspend_state_t state, struct stm_wakeup_devices *wkd, int suspending) { static long *switch_cfg; unsigned long cfg_0_0, cfg_0_1; unsigned long cfg_1_0, cfg_1_1; unsigned long cfg_2_0, cfg_2_1; int i, j; unsigned long tmp; if (suspending) goto on_suspending; tmp = ioread32(A9_CLK_CONFIG_IOMEM); iowrite32(tmp & ~0x1, A9_CLK_CONFIG_IOMEM); /* Reenabling PLL */ while (!(ioread32(A9_CLK_STATUS_IOMEM) & 0x1)) cpu_relax(); iowrite32(tmp & ~(0x4 | 0x1), A9_CLK_CONFIG_IOMEM);/* Disabling PLL */ /* turn-on PLLs */ for (i = 0; i < ARRAY_SIZE(clks_base); ++i) iowrite32(0, clks_base[i] + MPE_POWER_CFG); /* wait for stable PLLs */ /* for each Clock tree */ for (i = 0; i < ARRAY_SIZE(clks_base); ++i) /* for each PLL in the tree */ for (j = 0; j < 2; ++j) /* wait the PLL is locked */ while (!(ioread32(clks_base[i] + MPE_PLL_LOCK_REG(j)))) ; /* apply the original parents */ for (i = 0; i < ARRAY_SIZE(clks_base); ++i) { iowrite32(switch_cfg[i * 2], clks_base[i] + MPE_SWITCH_CFG(0)); iowrite32(switch_cfg[i * 2 + 1], clks_base[i] + MPE_SWITCH_CFG(1)); } kfree(switch_cfg); switch_cfg = NULL; pr_debug("[STM][PM] MPE41: ClockGens A: restored\n"); return 0; on_suspending: cfg_0_0 = 0xffc3fcff; cfg_0_1 = 0xf; cfg_1_0 = 0xf3ffffff; cfg_1_1 = 0xf; cfg_2_0 = 0xf3ffff0f; /* Validation suggests 0xf3ffffff but * in this manner the: * CLKM_STAC_PHY and CLKM_STAC_SYS * are turned-off... not clear * how the MPE should communicate with SASG... */ cfg_2_1 = 0xf; switch_cfg = kmalloc(sizeof(long) * 2 * ARRAY_SIZE(clks_base), GFP_ATOMIC); if (!switch_cfg) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(clks_base); ++i) { /* Save the original parents */ switch_cfg[i * 2] = ioread32(clks_base[i] + MPE_SWITCH_CFG(0)); switch_cfg[i * 2 + 1] = ioread32(clks_base[i] + MPE_SWITCH_CFG(1)); /* and move the clock under the extern oscillator (30 MHz) */ iowrite32(0, clks_base[i] + MPE_SWITCH_CFG(0)); iowrite32(0, clks_base[i] + MPE_SWITCH_CFG(1)); } /* turn-off PLLs */ for (i = 0; i < ARRAY_SIZE(clks_base); ++i) iowrite32(3, clks_base[i] + MPE_POWER_CFG); iowrite32(cfg_0_0, clks_base[0] + MPE_SWITCH_CFG(0)); /* * The switch config cfg_0_1 the validation suggested * currently doesn't work */ /* iowrite32(cfg_0_1, clks_base[0] + MPE_SWITCH_CFG(1)); */ iowrite32(cfg_1_0, clks_base[1] + MPE_SWITCH_CFG(0)); iowrite32(cfg_1_1, clks_base[1] + MPE_SWITCH_CFG(1)); iowrite32(cfg_2_0, clks_base[2] + MPE_SWITCH_CFG(0)); iowrite32(cfg_2_1, clks_base[2] + MPE_SWITCH_CFG(1)); tmp = ioread32(A9_CLK_CONFIG_IOMEM); iowrite32(tmp | 0x4, A9_CLK_CONFIG_IOMEM); /* Bypassing PLL */ iowrite32(tmp | 0x4 | 0x1, A9_CLK_CONFIG_IOMEM);/* Disabling PLL */ pr_debug("[STM][PM] MPE41: ClockGens A: saved\n"); return 0; }