/** * usb_hcd_ppc_soc_probe - initialize On-Chip HCDs * Context: !in_interrupt() * * Allocates basic resources for this USB host controller. * * Store this function in the HCD's struct pci_driver as probe(). */ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver, struct usb_hcd **hcd_out, struct platform_device *pdev) { int retval; struct usb_hcd *hcd = 0; struct ohci_hcd *ohci; struct resource *res; int irq; pr_debug("initializing PPC-SOC USB Controller\n"); res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { pr_debug(__FILE__ ": no irq\n"); return -ENODEV; } irq = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { pr_debug(__FILE__ ": no reg addr\n"); return -ENODEV; } if (!request_mem_region(res->start, res->end - res->start + 1, hcd_name)) { pr_debug(__FILE__ ": request_mem_region failed\n"); return -EBUSY; } hcd = driver->hcd_alloc (); if (!hcd){ pr_debug(__FILE__ ": hcd_alloc failed\n"); retval = -ENOMEM; goto err1; } ohci = hcd_to_ohci(hcd); ohci->flags |= OHCI_BIG_ENDIAN; hcd->driver = (struct hc_driver *) driver; hcd->description = driver->description; hcd->irq = irq; hcd->regs = (struct ohci_regs *) ioremap(res->start, res->end - res->start + 1); if (!hcd->regs) { pr_debug(__FILE__ ": ioremap failed\n"); retval = -ENOMEM; goto err2; } hcd->self.controller = &pdev->dev; retval = hcd_buffer_create(hcd); if (retval) { pr_debug(__FILE__ ": pool alloc fail\n"); goto err3; } retval = request_irq(hcd->irq, usb_hcd_irq, SA_INTERRUPT, hcd_name, hcd); if (retval) { pr_debug(__FILE__ ": request_irq failed, returned %d\n", retval); retval = -EBUSY; goto err4; } info("%s (PPC-SOC) at 0x%p, irq %d\n", hcd_name, hcd->regs, hcd->irq); usb_bus_init(&hcd->self); hcd->self.op = &usb_hcd_operations; hcd->self.release = & usb_hcd_release; hcd->self.hcpriv = (void *) hcd; hcd->self.bus_name = "PPC-SOC USB"; hcd->product_desc = "PPC-SOC OHCI"; INIT_LIST_HEAD(&hcd->dev_list); usb_register_bus(&hcd->self); if ((retval = driver->start(hcd)) < 0) { usb_hcd_ppc_soc_remove(hcd, pdev); return retval; } *hcd_out = hcd; return 0; err4: hcd_buffer_destroy(hcd); err3: iounmap(hcd->regs); err2: dev_set_drvdata(&pdev->dev, NULL); err1: pr_debug("Removing PPC-SOC USB Controller\n"); release_mem_region(res->start, res->end - res->start + 1); return retval; }
static int __init davinci_vc_probe(struct platform_device *pdev) { struct davinci_vc *davinci_vc; struct resource *res, *mem; struct mfd_cell *cell = NULL; int ret; davinci_vc = kzalloc(sizeof(struct davinci_vc), GFP_KERNEL); if (!davinci_vc) { dev_dbg(&pdev->dev, "could not allocate memory for private data\n"); return -ENOMEM; } davinci_vc->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(davinci_vc->clk)) { dev_dbg(&pdev->dev, "could not get the clock for voice codec\n"); ret = -ENODEV; goto fail1; } clk_enable(davinci_vc->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mem resource\n"); ret = -ENODEV; goto fail2; } davinci_vc->pbase = res->start; davinci_vc->base_size = resource_size(res); mem = request_mem_region(davinci_vc->pbase, davinci_vc->base_size, pdev->name); if (!mem) { dev_err(&pdev->dev, "VCIF region already claimed\n"); ret = -EBUSY; goto fail2; } davinci_vc->base = ioremap(davinci_vc->pbase, davinci_vc->base_size); if (!davinci_vc->base) { dev_err(&pdev->dev, "can't ioremap mem resource.\n"); ret = -ENOMEM; goto fail3; } res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto fail4; } davinci_vc->davinci_vcif.dma_tx_channel = res->start; davinci_vc->davinci_vcif.dma_tx_addr = (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_WFIFO); res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto fail4; } davinci_vc->davinci_vcif.dma_rx_channel = res->start; davinci_vc->davinci_vcif.dma_rx_addr = (dma_addr_t)(io_v2p(davinci_vc->base) + DAVINCI_VC_RFIFO); davinci_vc->dev = &pdev->dev; davinci_vc->pdev = pdev; /* Voice codec interface client */ cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; cell->name = "davinci-vcif"; cell->mfd_data = davinci_vc; /* Voice codec CQ93VC client */ cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; cell->name = "cq93vc-codec"; cell->mfd_data = davinci_vc; ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, DAVINCI_VC_CELLS, NULL, 0); if (ret != 0) { dev_err(&pdev->dev, "fail to register client devices\n"); goto fail4; } return 0; fail4: iounmap(davinci_vc->base); fail3: release_mem_region(davinci_vc->pbase, davinci_vc->base_size); fail2: clk_disable(davinci_vc->clk); clk_put(davinci_vc->clk); davinci_vc->clk = NULL; fail1: kfree(davinci_vc); return ret; }
static int jpeg_probe(struct platform_device *pdev) { struct resource *res; int ret; /* global structure */ jpeg_ctrl = kzalloc(sizeof(*jpeg_ctrl), GFP_KERNEL); if (!jpeg_ctrl) { dev_err(&pdev->dev, "%s: not enough memory\n", __func__); ret = -ENOMEM; goto err_alloc; } /* setup jpeg control */ ret = jpeg_setup_controller(jpeg_ctrl); if (ret) { jpeg_err("failed to setup controller\n"); goto err_setup; } /* memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { jpeg_err("failed to get jpeg memory region resource\n"); ret = -ENOENT; goto err_res; } res = request_mem_region(res->start, res->end - res->start + 1, pdev->name); if (!res) { jpeg_err("failed to request jpeg io memory region\n"); ret = -ENOMEM; goto err_region; } /* ioremap */ jpeg_ctrl->reg_base = ioremap(res->start, res->end - res->start + 1); if (!jpeg_ctrl->reg_base) { jpeg_err("failed to remap jpeg io region\n"); ret = -ENOENT; goto err_map; } /* irq */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { jpeg_err("failed to request jpeg irq resource\n"); ret = -ENOENT; goto err_irq; } jpeg_ctrl->irq_no = res->start; ret = request_irq(jpeg_ctrl->irq_no, (void *)jpeg_irq, IRQF_DISABLED, pdev->name, jpeg_ctrl); if (ret != 0) { jpeg_err("failed to jpeg request irq\n"); ret = -ENOENT; goto err_irq; } /* clock */ jpeg_ctrl->clk = clk_get(&pdev->dev, "jpeg"); if (IS_ERR(jpeg_ctrl->clk)) { jpeg_err("failed to find jpeg clock source\n"); ret = -ENOENT; goto err_clk; } ret = jpeg_init_mem(&pdev->dev, &jpeg_ctrl->mem.base); if (ret != 0) { jpeg_err("failed to init. jpeg mem"); ret = -ENOMEM; goto err_mem; } ret = misc_register(&jpeg_miscdev); if (ret) { jpeg_err("failed to register misc driver\n"); goto err_reg; } jpeg_pm = &pdev->dev; #ifdef CONFIG_PM_RUNTIME pm_runtime_enable(jpeg_pm); #endif return 0; err_reg: clk_put(jpeg_ctrl->clk); err_mem: err_clk: free_irq(jpeg_ctrl->irq_no, NULL); err_irq: iounmap(jpeg_ctrl->reg_base); err_map: err_region: kfree(res); err_res: mutex_destroy(&jpeg_ctrl->lock); err_setup: kfree(jpeg_ctrl); err_alloc: return ret; }
static int __init n2_run(unsigned long io, unsigned long irq, unsigned long winbase, long valid0, long valid1) { card_t *card; u8 cnt, pcr; int i; if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) { printk(KERN_ERR "n2: invalid I/O port value\n"); return -ENODEV; } if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ { printk(KERN_ERR "n2: invalid IRQ value\n"); return -ENODEV; } if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) { printk(KERN_ERR "n2: invalid RAM value\n"); return -ENODEV; } card = kzalloc(sizeof(card_t), GFP_KERNEL); if (card == NULL) { printk(KERN_ERR "n2: unable to allocate memory\n"); return -ENOBUFS; } card->ports[0].dev = alloc_hdlcdev(&card->ports[0]); card->ports[1].dev = alloc_hdlcdev(&card->ports[1]); if (!card->ports[0].dev || !card->ports[1].dev) { printk(KERN_ERR "n2: unable to allocate memory\n"); n2_destroy_card(card); return -ENOMEM; } if (!request_region(io, N2_IOPORTS, devname)) { printk(KERN_ERR "n2: I/O port region in use\n"); n2_destroy_card(card); return -EBUSY; } card->io = io; if (request_irq(irq, &sca_intr, 0, devname, card)) { printk(KERN_ERR "n2: could not allocate IRQ\n"); n2_destroy_card(card); return(-EBUSY); } card->irq = irq; if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) { printk(KERN_ERR "n2: could not request RAM window\n"); n2_destroy_card(card); return(-EBUSY); } card->phy_winbase = winbase; card->winbase = ioremap(winbase, USE_WINDOWSIZE); if (!card->winbase) { printk(KERN_ERR "n2: ioremap() failed\n"); n2_destroy_card(card); return -EFAULT; } outb(0, io + N2_PCR); outb(winbase >> 12, io + N2_BAR); switch (USE_WINDOWSIZE) { case 16384: outb(WIN16K, io + N2_PSR); break; case 32768: outb(WIN32K, io + N2_PSR); break; case 65536: outb(WIN64K, io + N2_PSR); break; default: printk(KERN_ERR "n2: invalid window size\n"); n2_destroy_card(card); return -ENODEV; } pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0); outb(pcr, io + N2_PCR); card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE); /* number of TX + RX buffers for one port */ i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) + HDLC_MAX_MRU)); card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS); card->rx_ring_buffers = i - card->tx_ring_buffers; card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) * (card->tx_ring_buffers + card->rx_ring_buffers); printk(KERN_INFO "n2: RISCom/N2 %u KB RAM, IRQ%u, " "using %u TX + %u RX packets rings\n", card->ram_size / 1024, card->irq, card->tx_ring_buffers, card->rx_ring_buffers); if (card->tx_ring_buffers < 1) { printk(KERN_ERR "n2: RAM test failed\n"); n2_destroy_card(card); return -EIO; } pcr |= PCR_RUNSCA; /* run SCA */ outb(pcr, io + N2_PCR); outb(0, io + N2_MCR); sca_init(card, 0); for (cnt = 0; cnt < 2; cnt++) { port_t *port = &card->ports[cnt]; struct net_device *dev = port_to_dev(port); hdlc_device *hdlc = dev_to_hdlc(dev); if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1)) continue; port->phy_node = cnt; port->valid = 1; if ((cnt == 1) && valid0) port->log_node = 1; spin_lock_init(&port->lock); dev->irq = irq; dev->mem_start = winbase; dev->mem_end = winbase + USE_WINDOWSIZE - 1; dev->tx_queue_len = 50; dev->do_ioctl = n2_ioctl; dev->open = n2_open; dev->stop = n2_close; hdlc->attach = sca_attach; hdlc->xmit = sca_xmit; port->settings.clock_type = CLOCK_EXT; port->card = card; if (register_hdlc_device(dev)) { printk(KERN_WARNING "n2: unable to register hdlc " "device\n"); port->card = NULL; n2_destroy_card(card); return -ENOBUFS; } sca_init_port(port); /* Set up SCA memory */ printk(KERN_INFO "%s: RISCom/N2 node %d\n", dev->name, port->phy_node); } *new_card = card; new_card = &card->next_card; return 0; }
/** @brief wpalDeviceInit provides a mechanism to initialize the DXE platform adaptation @param deviceCB: Implementation-specific device control block @see wpalDeviceClose @return SUCCESS if the DXE abstraction was opened */ wpt_status wpalDeviceInit ( void * devHandle ) { struct device *wcnss_device = (struct device *)devHandle; struct resource *wcnss_memory; int tx_irq; int rx_irq; if (NULL != gpEnv) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: invoked after subsystem initialized", __func__); return eWLAN_PAL_STATUS_E_INVAL; } if (NULL == wcnss_device) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: invalid device", __func__); return eWLAN_PAL_STATUS_E_INVAL; } wcnss_memory = wcnss_wlan_get_memory_map(wcnss_device); if (NULL == wcnss_memory) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: WCNSS memory map unavailable", __func__); return eWLAN_PAL_STATUS_E_FAILURE; } tx_irq = wcnss_wlan_get_dxe_tx_irq(wcnss_device); if (0 > tx_irq) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: WCNSS TX IRQ unavailable", __func__); return eWLAN_PAL_STATUS_E_FAILURE; } rx_irq = wcnss_wlan_get_dxe_rx_irq(wcnss_device); if (0 > rx_irq) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: WCNSS RX IRQ unavailable", __func__); return eWLAN_PAL_STATUS_E_FAILURE; } gpEnv = &gEnv; if (NULL == gpEnv) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: memory allocation failure", __func__); return eWLAN_PAL_STATUS_E_NOMEM; } memset(gpEnv, 0, sizeof(*gpEnv)); gpEnv->wcnss_memory = wcnss_memory; gpEnv->tx_irq = tx_irq; gpEnv->rx_irq = rx_irq; /* note the we don't invoke request_mem_region(). the memory described by wcnss_memory encompases the entire register space (including BT and FM) and we do not want exclusive access to that memory */ gpEnv->mmio = ioremap(wcnss_memory->start, resource_size(wcnss_memory)); if (NULL == gpEnv->mmio) { WPAL_TRACE(eWLAN_MODULE_DAL_DATA, eWLAN_PAL_TRACE_LEVEL_ERROR, "%s: memory remap failure", __func__); goto err_ioremap; } gpEnv->tx_registered = 0; gpEnv->rx_registered = 0; /* successfully allocated environment, memory and IRQs */ return eWLAN_PAL_STATUS_SUCCESS; err_ioremap: gpEnv = NULL; return eWLAN_PAL_STATUS_E_FAILURE; }
void pci_vtd_quirk(const struct pci_dev *pdev) { int seg = pdev->seg; int bus = pdev->bus; int dev = PCI_SLOT(pdev->devfn); int func = PCI_FUNC(pdev->devfn); int pos; bool_t ff; u32 val, val2; u64 bar; paddr_t pa; const char *action; if ( pci_conf_read16(seg, bus, dev, func, PCI_VENDOR_ID) != PCI_VENDOR_ID_INTEL ) return; switch ( pci_conf_read16(seg, bus, dev, func, PCI_DEVICE_ID) ) { /* * Mask reporting Intel VT-d faults to IOH core logic: * - Some platform escalates VT-d faults to platform errors. * - This can cause system failure upon non-fatal VT-d faults. * - Potential security issue if malicious guest trigger VT-d faults. */ case 0x0e28: /* Xeon-E5v2 (IvyBridge) */ case 0x342e: /* Tylersburg chipset (Nehalem / Westmere systems) */ case 0x3728: /* Xeon C5500/C3500 (JasperForest) */ case 0x3c28: /* Sandybridge */ val = pci_conf_read32(seg, bus, dev, func, 0x1AC); pci_conf_write32(seg, bus, dev, func, 0x1AC, val | (1 << 31)); printk(XENLOG_INFO "Masked VT-d error signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); break; /* Tylersburg (EP)/Boxboro (MP) chipsets (NHM-EP/EX, WSM-EP/EX) */ case 0x3400 ... 0x3407: /* host bridges */ case 0x3408 ... 0x3411: case 0x3420 ... 0x3421: /* root ports */ /* JasperForest (Intel Xeon Processor C5500/C3500 */ case 0x3700 ... 0x370f: /* host bridges */ case 0x3720 ... 0x3724: /* root ports */ /* Sandybridge-EP (Romley) */ case 0x3c00: /* host bridge */ case 0x3c01 ... 0x3c0b: /* root ports */ pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ERR); if ( !pos ) { pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_VNDR); while ( pos ) { val = pci_conf_read32(seg, bus, dev, func, pos + PCI_VNDR_HEADER); if ( PCI_VNDR_HEADER_ID(val) == 4 && PCI_VNDR_HEADER_REV(val) == 1 ) { pos += PCI_VNDR_HEADER; break; } pos = pci_find_next_ext_capability(seg, bus, pdev->devfn, pos, PCI_EXT_CAP_ID_VNDR); } ff = 0; } else ff = pcie_aer_get_firmware_first(pdev); if ( !pos ) { printk(XENLOG_WARNING "%04x:%02x:%02x.%u without AER capability?\n", seg, bus, dev, func); break; } val = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK); val2 = pci_conf_read32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK); if ( (val & PCI_ERR_UNC_UNSUP) && (val2 & PCI_ERR_COR_ADV_NFAT) ) action = "Found masked"; else if ( !ff ) { pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_UNCOR_MASK, val | PCI_ERR_UNC_UNSUP); pci_conf_write32(seg, bus, dev, func, pos + PCI_ERR_COR_MASK, val2 | PCI_ERR_COR_ADV_NFAT); action = "Masked"; } else action = "Must not mask"; /* XPUNCERRMSK Send Completion with Unsupported Request */ val = pci_conf_read32(seg, bus, dev, func, 0x20c); pci_conf_write32(seg, bus, dev, func, 0x20c, val | (1 << 4)); printk(XENLOG_INFO "%s UR signaling on %04x:%02x:%02x.%u\n", action, seg, bus, dev, func); break; case 0x0040: case 0x0044: case 0x0048: /* Nehalem/Westmere */ case 0x0100: case 0x0104: case 0x0108: /* Sandybridge */ case 0x0150: case 0x0154: case 0x0158: /* Ivybridge */ case 0x0a00: case 0x0a04: case 0x0a08: case 0x0a0f: /* Haswell ULT */ case 0x0c00: case 0x0c04: case 0x0c08: case 0x0c0f: /* Haswell */ case 0x0d00: case 0x0d04: case 0x0d08: case 0x0d0f: /* Haswell */ case 0x1600: case 0x1604: case 0x1608: case 0x160f: /* Broadwell */ case 0x1610: case 0x1614: case 0x1618: /* Broadwell */ case 0x1900: case 0x1904: case 0x1908: case 0x190c: case 0x190f: /* Skylake */ case 0x1910: case 0x1918: case 0x191f: /* Skylake */ bar = pci_conf_read32(seg, bus, dev, func, 0x6c); bar = (bar << 32) | pci_conf_read32(seg, bus, dev, func, 0x68); pa = bar & 0x7ffffff000UL; /* bits 12...38 */ if ( (bar & 1) && pa && page_is_ram_type(paddr_to_pfn(pa), RAM_TYPE_RESERVED) ) { u32 __iomem *va = ioremap(pa, PAGE_SIZE); if ( va ) { __set_bit(0x1c8 * 8 + 20, va); iounmap(va); printk(XENLOG_INFO "Masked UR signaling on %04x:%02x:%02x.%u\n", seg, bus, dev, func); } else printk(XENLOG_ERR "Could not map %"PRIpaddr" for %04x:%02x:%02x.%u\n", pa, seg, bus, dev, func); } else printk(XENLOG_WARNING "Bogus DMIBAR %#"PRIx64" on %04x:%02x:%02x.%u\n", bar, seg, bus, dev, func); break; } }
static int usb_hcd_omap_probe (const struct hc_driver *driver, struct platform_device *pdev) { int retval, irq; struct usb_hcd *hcd = 0; struct ohci_hcd *ohci; if (pdev->num_resources != 2) { printk(KERN_ERR "hcd probe: invalid num_resources: %i\n", pdev->num_resources); return -ENODEV; } if (pdev->resource[0].flags != IORESOURCE_MEM || pdev->resource[1].flags != IORESOURCE_IRQ) { printk(KERN_ERR "hcd probe: invalid resource type\n"); return -ENODEV; } usb_host_ck = clk_get(&pdev->dev, "usb_hhc_ck"); if (IS_ERR(usb_host_ck)) return PTR_ERR(usb_host_ck); if (!cpu_is_omap15xx()) usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck"); else usb_dc_ck = clk_get(&pdev->dev, "lb_ck"); if (IS_ERR(usb_dc_ck)) { clk_put(usb_host_ck); return PTR_ERR(usb_dc_ck); } hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { retval = -ENOMEM; goto err0; } hcd->rsrc_start = pdev->resource[0].start; hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { dev_dbg(&pdev->dev, "request_mem_region failed\n"); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "can't ioremap OHCI HCD\n"); retval = -ENOMEM; goto err2; } ohci = hcd_to_ohci(hcd); ohci_hcd_init(ohci); host_initialized = 0; host_enabled = 1; irq = platform_get_irq(pdev, 0); if (irq < 0) { retval = -ENXIO; goto err3; } retval = usb_add_hcd(hcd, irq, 0); if (retval) goto err3; host_initialized = 1; if (!host_enabled) omap_ohci_clock_power(0); return 0; err3: iounmap(hcd->regs); err2: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); err1: usb_put_hcd(hcd); err0: clk_put(usb_dc_ck); clk_put(usb_host_ck); return retval; }
static int __devinit atmel_tsadcc_probe(struct platform_device *pdev) { struct atmel_tsadcc *ts_dev; struct input_dev *input_dev; struct resource *res; struct at91_tsadcc_data *pdata = pdev->dev.platform_data; int err = 0; unsigned int prsc; unsigned int reg; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mmio resource defined.\n"); return -ENXIO; } /* Allocate memory for device */ ts_dev = kzalloc(sizeof(struct atmel_tsadcc), GFP_KERNEL); if (!ts_dev) { dev_err(&pdev->dev, "failed to allocate memory.\n"); return -ENOMEM; } platform_set_drvdata(pdev, ts_dev); input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate input device.\n"); err = -EBUSY; goto err_free_mem; } ts_dev->irq = platform_get_irq(pdev, 0); if (ts_dev->irq < 0) { dev_err(&pdev->dev, "no irq ID is designated.\n"); err = -ENODEV; goto err_free_dev; } if (!request_mem_region(res->start, resource_size(res), "atmel tsadcc regs")) { dev_err(&pdev->dev, "resources is unavailable.\n"); err = -EBUSY; goto err_free_dev; } tsc_base = ioremap(res->start, resource_size(res)); if (!tsc_base) { dev_err(&pdev->dev, "failed to map registers.\n"); err = -ENOMEM; goto err_release_mem; } err = request_irq(ts_dev->irq, atmel_tsadcc_interrupt, IRQF_DISABLED, pdev->dev.driver->name, ts_dev); if (err) { dev_err(&pdev->dev, "failed to allocate irq.\n"); goto err_unmap_regs; } ts_dev->clk = clk_get(&pdev->dev, "tsc_clk"); if (IS_ERR(ts_dev->clk)) { dev_err(&pdev->dev, "failed to get ts_clk\n"); err = PTR_ERR(ts_dev->clk); goto err_free_irq; } ts_dev->input = input_dev; ts_dev->bufferedmeasure = 0; snprintf(ts_dev->phys, sizeof(ts_dev->phys), "%s/input0", dev_name(&pdev->dev)); input_dev->name = "atmel touch screen controller"; input_dev->phys = ts_dev->phys; input_dev->dev.parent = &pdev->dev; __set_bit(EV_ABS, input_dev->evbit); input_set_abs_params(input_dev, ABS_X, 0, 0x3FF, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 0x3FF, 0, 0); input_set_capability(input_dev, EV_KEY, BTN_TOUCH); /* clk_enable() always returns 0, no need to check it */ clk_enable(ts_dev->clk); prsc = clk_get_rate(ts_dev->clk); dev_info(&pdev->dev, "Master clock is set at: %d Hz\n", prsc); if (!pdata) goto err_fail; if (!pdata->adc_clock) pdata->adc_clock = ADC_DEFAULT_CLOCK; prsc = (prsc / (2 * pdata->adc_clock)) - 1; /* saturate if this value is too high */ if (cpu_is_at91sam9rl()) { if (prsc > PRESCALER_VAL(ATMEL_TSADCC_PRESCAL)) prsc = PRESCALER_VAL(ATMEL_TSADCC_PRESCAL); } else { if (prsc > PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL)) prsc = PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL); } dev_info(&pdev->dev, "Prescaler is set at: %d\n", prsc); reg = ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE | ((0x00 << 5) & ATMEL_TSADCC_SLEEP) | /* Normal Mode */ ((0x01 << 6) & ATMEL_TSADCC_PENDET) | /* Enable Pen Detect */ (prsc << 8) | ((0x26 << 16) & ATMEL_TSADCC_STARTUP) | ((pdata->pendet_debounce << 28) & ATMEL_TSADCC_PENDBC); atmel_tsadcc_write(ATMEL_TSADCC_CR, ATMEL_TSADCC_SWRST); atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE); atmel_tsadcc_write(ATMEL_TSADCC_TSR, (pdata->ts_sample_hold_time << 24) & ATMEL_TSADCC_TSSHTIM); atmel_tsadcc_read(ATMEL_TSADCC_SR); atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT); /* All went ok, so register to the input system */ err = input_register_device(input_dev); if (err) goto err_fail; return 0; err_fail: clk_disable(ts_dev->clk); clk_put(ts_dev->clk); err_free_irq: free_irq(ts_dev->irq, ts_dev); err_unmap_regs: iounmap(tsc_base); err_release_mem: release_mem_region(res->start, resource_size(res)); err_free_dev: input_free_device(input_dev); err_free_mem: kfree(ts_dev); return err; }
static int __devinit s3c2410wdt_probe(struct platform_device *pdev) { struct device *dev; unsigned int wtcon; int started = 0; int ret; int size; DBG("%s: probe=%p\n", __func__, pdev); dev = &pdev->dev; wdt_dev = &pdev->dev; /* get the memory region for the watchdog timer */ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (wdt_mem == NULL) { dev_err(dev, "no memory resource specified\n"); return -ENOENT; } size = resource_size(wdt_mem); if (!request_mem_region(wdt_mem->start, size, pdev->name)) { dev_err(dev, "failed to get memory region\n"); return -EBUSY; } wdt_base = ioremap(wdt_mem->start, size); if (wdt_base == NULL) { dev_err(dev, "failed to ioremap() region\n"); ret = -EINVAL; goto err_req; } DBG("probe: mapped wdt_base=%p\n", wdt_base); wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (wdt_irq == NULL) { dev_err(dev, "no irq resource specified\n"); ret = -ENOENT; goto err_map; } ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev); if (ret != 0) { dev_err(dev, "failed to install irq (%d)\n", ret); goto err_map; } wdt_clock = clk_get(&pdev->dev, "watchdog"); if (IS_ERR(wdt_clock)) { dev_err(dev, "failed to find watchdog clock source\n"); ret = PTR_ERR(wdt_clock); goto err_irq; } clk_enable(wdt_clock); if (s3c2410wdt_cpufreq_register() < 0) { printk(KERN_ERR PFX "failed to register cpufreq\n"); goto err_clk; } /* see if we can actually set the requested timer margin, and if * not, try the default value */ if (s3c2410wdt_set_heartbeat(tmr_margin)) { started = s3c2410wdt_set_heartbeat( CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); if (started == 0) dev_info(dev, "tmr_margin value out of range, default %d used\n", CONFIG_S3C2410_WATCHDOG_DEFAULT_TIME); else dev_info(dev, "default timer value is out of range, " "cannot start\n"); } ret = misc_register(&s3c2410wdt_miscdev); if (ret) { dev_err(dev, "cannot register miscdev on minor=%d (%d)\n", WATCHDOG_MINOR, ret); goto err_cpufreq; } if (tmr_atboot && started == 0) { dev_info(dev, "starting watchdog timer\n"); s3c2410wdt_start(); } else if (!tmr_atboot) { /* if we're not enabling the watchdog, then ensure it is * disabled if it has been left running from the bootloader * or other source */ s3c2410wdt_stop(); } /* print out a statement of readiness */ wtcon = readl(wdt_base + S3C2410_WTCON); dev_info(dev, "watchdog %sactive, reset %sabled, irq %sabled\n", (wtcon & S3C2410_WTCON_ENABLE) ? "" : "in", (wtcon & S3C2410_WTCON_RSTEN) ? "" : "dis", (wtcon & S3C2410_WTCON_INTEN) ? "" : "en"); return 0; err_cpufreq: s3c2410wdt_cpufreq_deregister(); err_clk: clk_disable(wdt_clock); clk_put(wdt_clock); err_irq: free_irq(wdt_irq->start, pdev); err_map: iounmap(wdt_base); err_req: release_mem_region(wdt_mem->start, size); wdt_mem = NULL; return ret; }
static int ehci_msm_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; int ret; dev_dbg(&pdev->dev, "ehci_msm proble\n"); hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } hcd_to_bus(hcd)->skip_resume = true; hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); ret = hcd->irq; goto put_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get memory resource\n"); ret = -ENODEV; goto put_hcd; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto put_hcd; } /* * OTG driver takes care of PHY initialization, clock management, * powering up VBUS, mapping of registers address space and power * management. */ phy = usb_get_transceiver(); if (!phy) { dev_err(&pdev->dev, "unable to find transceiver\n"); ret = -ENODEV; goto unmap; } ret = otg_set_host(phy->otg, &hcd->self); if (ret < 0) { dev_err(&pdev->dev, "unable to register with transceiver\n"); goto put_transceiver; } hcd_to_ehci(hcd)->transceiver = phy; device_init_wakeup(&pdev->dev, 1); pm_runtime_enable(&pdev->dev); return 0; put_transceiver: usb_put_transceiver(phy); unmap: iounmap(hcd->regs); put_hcd: usb_put_hcd(hcd); return ret; }
/** * amba_device_register - register an AMBA device * @dev: AMBA device to register * @parent: parent memory resource * * Setup the AMBA device, reading the cell ID if present. * Claim the resource, and register the AMBA device with * the Linux device manager. */ int amba_device_register(struct amba_device *dev, struct resource *parent) { u32 size; void __iomem *tmp; int i, ret; device_initialize(&dev->dev); /* * Copy from device_add */ if (dev->dev.init_name) { dev_set_name(&dev->dev, "%s", dev->dev.init_name); dev->dev.init_name = NULL; } dev->dev.release = amba_device_release; dev->dev.bus = &amba_bustype; dev->dev.dma_mask = &dev->dma_mask; dev->res.name = dev_name(&dev->dev); if (!dev->dev.coherent_dma_mask && dev->dma_mask) dev_warn(&dev->dev, "coherent dma mask is unset\n"); ret = request_resource(parent, &dev->res); if (ret) goto err_out; /* * Dynamically calculate the size of the resource * and use this for iomap */ size = resource_size(&dev->res); tmp = ioremap(dev->res.start, size); if (!tmp) { ret = -ENOMEM; goto err_release; } ret = amba_get_enable_pclk(dev); if (ret == 0) { u32 pid, cid; /* * Read pid and cid based on size of resource * they are located at end of region */ for (pid = 0, i = 0; i < 4; i++) pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) << (i * 8); for (cid = 0, i = 0; i < 4; i++) cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << (i * 8); amba_put_disable_pclk(dev); if (cid == 0xb105f00d) dev->periphid = pid; if (!dev->periphid) ret = -ENODEV; } iounmap(tmp); if (ret) goto err_release; ret = device_add(&dev->dev); if (ret) goto err_release; if (dev->irq[0] != NO_IRQ) ret = device_create_file(&dev->dev, &dev_attr_irq0); if (ret == 0 && dev->irq[1] != NO_IRQ) ret = device_create_file(&dev->dev, &dev_attr_irq1); if (ret == 0) return ret; device_unregister(&dev->dev); err_release: release_resource(&dev->res); err_out: return ret; }
static int sossi_init(struct omapfb_device *fbdev) { u32 l, k; struct clk *fck; struct clk *dpll1out_ck; int r; sossi.base = ioremap(OMAP_SOSSI_BASE, SZ_1K); if (!sossi.base) { dev_err(fbdev->dev, "can't ioremap SoSSI\n"); return -ENOMEM; } sossi.fbdev = fbdev; spin_lock_init(&sossi.lock); dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out"); if (IS_ERR(dpll1out_ck)) { dev_err(fbdev->dev, "can't get DPLL1OUT clock\n"); return PTR_ERR(dpll1out_ck); } /* * We need the parent clock rate, which we might divide further * depending on the timing requirements of the controller. See * _set_timings. */ sossi.fck_hz = clk_get_rate(dpll1out_ck); clk_put(dpll1out_ck); fck = clk_get(fbdev->dev, "ck_sossi"); if (IS_ERR(fck)) { dev_err(fbdev->dev, "can't get SoSSI functional clock\n"); return PTR_ERR(fck); } sossi.fck = fck; /* Reset and enable the SoSSI module */ l = omap_readl(MOD_CONF_CTRL_1); l |= CONF_SOSSI_RESET_R; omap_writel(l, MOD_CONF_CTRL_1); l &= ~CONF_SOSSI_RESET_R; omap_writel(l, MOD_CONF_CTRL_1); clk_enable(sossi.fck); l = omap_readl(ARM_IDLECT2); l &= ~(1 << 8); /* DMACK_REQ */ omap_writel(l, ARM_IDLECT2); l = sossi_read_reg(SOSSI_INIT2_REG); /* Enable and reset the SoSSI block */ l |= (1 << 0) | (1 << 1); sossi_write_reg(SOSSI_INIT2_REG, l); /* Take SoSSI out of reset */ l &= ~(1 << 1); sossi_write_reg(SOSSI_INIT2_REG, l); sossi_write_reg(SOSSI_ID_REG, 0); l = sossi_read_reg(SOSSI_ID_REG); k = sossi_read_reg(SOSSI_ID_REG); if (l != 0x55555555 || k != 0xaaaaaaaa) { dev_err(fbdev->dev, "invalid SoSSI sync pattern: %08x, %08x\n", l, k); r = -ENODEV; goto err; } if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) { dev_err(fbdev->dev, "can't get LCDC IRQ\n"); r = -ENODEV; goto err; } l = sossi_read_reg(SOSSI_ID_REG); /* Component code */ l = sossi_read_reg(SOSSI_ID_REG); dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n", l >> 16, l & 0xffff); l = sossi_read_reg(SOSSI_INIT1_REG); l |= (1 << 19); /* DMA_MODE */ l &= ~(1 << 31); /* REORDERING */ sossi_write_reg(SOSSI_INIT1_REG, l); if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq, IRQ_TYPE_EDGE_FALLING, "sossi_match", sossi.fbdev->dev)) < 0) { dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n"); goto err; } clk_disable(sossi.fck); return 0; err: clk_disable(sossi.fck); clk_put(sossi.fck); return r; }
static int ixxat_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int ret, channel, conf_addr; unsigned long addr; void __iomem *base_addr; struct rtcan_device *master_dev = NULL; if ((ret = pci_enable_device (pdev))) goto failure; if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME))) goto failure; RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n", RTCAN_DRV_NAME, pdev->vendor, pdev->device, pdev->subsystem_device); /* Enable memory and I/O space */ if ((ret = pci_write_config_word(pdev, 0x04, 0x3))) goto failure_release_pci; conf_addr = pci_resource_start(pdev, 1); addr = pci_resource_start(pdev, 2); base_addr = ioremap(addr, IXXAT_BASE_PORT_SIZE); if (base_addr == 0) { ret = -ENODEV; goto failure_release_pci; } /* Check if second channel is available after reset */ writeb(0x1, base_addr + CHANNEL_MASTER_RESET); writeb(0x1, base_addr + CHANNEL_SLAVE_RESET); udelay(100); if ( (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) & IXXAT_SJA_MOD_MASK ) != 0x21 || readb(base_addr + CHANNEL_OFFSET + SJA_SR ) != 0x0c || readb(base_addr + CHANNEL_OFFSET + SJA_IR ) != 0xe0) channel = CHANNEL_SINGLE; else channel = CHANNEL_MASTER; if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel, &master_dev, conf_addr, base_addr))) goto failure_iounmap; if (channel != CHANNEL_SINGLE) { channel = CHANNEL_SLAVE; if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel, &master_dev, conf_addr, base_addr + CHANNEL_OFFSET))) goto failure_iounmap; } pci_set_drvdata(pdev, master_dev); return 0; failure_iounmap: if (master_dev) rtcan_ixxat_pci_del_chan(master_dev); iounmap(base_addr); failure_release_pci: pci_release_regions(pdev); failure: return ret; }
//第二步 开始写入口函数 static int s3c_ts_init(void) { struct clk* clk; //第五步 细化,把要做的东西写下来,以下框架为所有触摸屏程序的框架 // 1.分配一个input_dev结构体 s3c_ts_dev = input_allocate_device(); // 2.设置,分两大类 参考8th Driver buttons.c // 2.1 能产生哪类事件 set_bit(EV_KEY, s3c_ts_dev->evbit); //EV_KEY表示会产生按键类事件,buttons_dev->evbit表示设置数组总的某一位可以产生案件类事件。 set_bit(EV_ABS, s3c_ts_dev->evbit); //绝对位移 // 2.2 能产生这类操作中的哪些事件 set_bit(BTN_TOUCH, s3c_ts_dev->keybit); //能够产生按键类事件中的触摸屏事件 input_set_abs_params(s3c_ts_dev, ABS_X, 0, 0x3FF, 0, 0); //最大尺寸为0x3ff. input_set_abs_params(s3c_ts_dev, ABS_Y, 0, 0x3FF, 0, 0); input_set_abs_params(s3c_ts_dev, ABS_PRESSURE, 0, 1, 0, 0);// 笔压力仅设置为两种 0和1. // 3.注册 input_register_device (s3c_ts_dev); // 4. 硬件相关的操作 硬件手册16章 // 4.1 使能时钟(也就是设置CLKCON的bit[15]) clk = clk_get(NULL, "abc"); clk_enable(clk); // 4.2 设置s3c2440的ADC (TS寄存器) // 操作TS寄存器,需要进行ioremap. 这样一个个的进行ioremap多麻烦, // 写一个结构体就比较简单struct s3c_ts_regs. s3c_ts_regs = ioremap(0x58000000, sizeof(struct s3c_ts_regs)); //物理地址 // 以下参数设置参照手册 444页 // bit[14] 1-A/D converter prescaler enable 欲分频使能 // bit[13:6] 1-A/D converter prescaler value 欲分频系数 // 设为49 ADCCLK = PCLK/(49+1) = 50MHz/(49+1)= 1MHz.0 // bit[5:3] 模拟输入频道选择,暂时不设置 // bit[2] STDBM // bit[0] A/D conversion starts by enable 先设为0; s3c_ts_regs->adccon = (1<<14) | (49<<6); request_irq(IRQ_TC, pen_down_up_irq, IRQF_SAMPLE_RANDOM, "ts_pen", NULL); //先设置一个中断,看触摸屏是否有动作,参考笔记 // 3th增加的自动分离测量模式,需要注册启动后的中断 request_irq(IRQ_ADC, adc_irq, IRQF_SAMPLE_RANDOM, "adc", NULL); //&&&&&&&&&&%%%%%%% 优化措施1 // 手册446 触摸屏有一个ADCDLY寄存器,设置为最大值,这使得电压稳定后再发出中断 s3c_ts_regs->adcdly = 0xffff; enter_wait_pen_down_mode(); //增加该函数别忘了在前面声明,等待触摸笔按下模式 return 0; }
static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version) { int rc = 0; if (!csid_version) { pr_err("%s:%d csid_version NULL\n", __func__, __LINE__); rc = -EINVAL; return rc; } if (csid_dev->csid_state == CSID_POWER_UP) { pr_err("%s: csid invalid state %d\n", __func__, csid_dev->csid_state); rc = -EINVAL; return rc; } csid_dev->base = ioremap(csid_dev->mem->start, resource_size(csid_dev->mem)); if (!csid_dev->base) { pr_err("%s csid_dev->base NULL\n", __func__); rc = -ENOMEM; return rc; } pr_info("%s: CSID_VERSION = 0x%x\n", __func__, csid_dev->ctrl_reg->csid_reg.csid_version); /* power up */ if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) { rc = msm_camera_config_vreg(&csid_dev->pdev->dev, csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); } else { rc = msm_camera_config_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); } if (rc < 0) { pr_err("%s: regulator on failed\n", __func__); goto vreg_config_failed; } if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) { rc = msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); } else { rc = msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); } if (rc < 0) { pr_err("%s: regulator enable failed\n", __func__); goto vreg_enable_failed; } if (csid_dev->ctrl_reg->csid_reg.csid_version == CSID_VERSION_V22) msm_cam_clk_sel_src(&csid_dev->pdev->dev, &csid_clk_info[3], csid_clk_src_info, csid_dev->num_clk_src_info); rc = msm_cam_clk_enable(&csid_dev->pdev->dev, csid_clk_info, csid_dev->csid_clk, csid_dev->num_clk, 1); if (rc < 0) { pr_err("%s:%d clock enable failed\n", __func__, __LINE__); goto clk_enable_failed; } CDBG("%s:%d called\n", __func__, __LINE__); csid_dev->hw_version = msm_camera_io_r(csid_dev->base + csid_dev->ctrl_reg->csid_reg.csid_hw_version_addr); CDBG("%s:%d called csid_dev->hw_version %x\n", __func__, __LINE__, csid_dev->hw_version); *csid_version = csid_dev->hw_version; init_completion(&csid_dev->reset_complete); enable_irq(csid_dev->irq->start); msm_csid_reset(csid_dev); csid_dev->csid_state = CSID_POWER_UP; return rc; clk_enable_failed: if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) { msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } else { msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } vreg_enable_failed: if (csid_dev->ctrl_reg->csid_reg.csid_version < CSID_VERSION_V22) { msm_camera_config_vreg(&csid_dev->pdev->dev, csid_8960_vreg_info, ARRAY_SIZE(csid_8960_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } else { msm_camera_config_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } vreg_config_failed: iounmap(csid_dev->base); csid_dev->base = NULL; return rc; }
int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, struct skt_dev_info *sinfo) { struct soc_pcmcia_socket *skt; int ret, i; mutex_lock(&soc_pcmcia_sockets_lock); for (i = 0; i < sinfo->nskt; i++) { skt = &sinfo->skt[i]; skt->socket.ops = &soc_common_pcmcia_operations; skt->socket.owner = ops->owner; skt->socket.dev.parent = dev; init_timer(&skt->poll_timer); skt->poll_timer.function = soc_common_pcmcia_poll_event; skt->poll_timer.data = (unsigned long)skt; skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD; skt->dev = dev; skt->ops = ops; ret = request_resource(&iomem_resource, &skt->res_skt); if (ret) goto out_err_1; ret = request_resource(&skt->res_skt, &skt->res_io); if (ret) goto out_err_2; ret = request_resource(&skt->res_skt, &skt->res_mem); if (ret) goto out_err_3; ret = request_resource(&skt->res_skt, &skt->res_attr); if (ret) goto out_err_4; skt->virt_io = ioremap(skt->res_io.start, 0x10000); if (skt->virt_io == NULL) { ret = -ENOMEM; goto out_err_5; } if (list_empty(&soc_pcmcia_sockets)) soc_pcmcia_cpufreq_register(); list_add(&skt->node, &soc_pcmcia_sockets); ops->set_timing(skt); ret = ops->hw_init(skt); if (ret) goto out_err_6; skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD; skt->socket.resource_ops = &pccard_static_ops; skt->socket.irq_mask = 0; skt->socket.map_size = PAGE_SIZE; skt->socket.pci_irq = skt->irq; skt->socket.io_offset = (unsigned long)skt->virt_io; skt->status = soc_common_pcmcia_skt_state(skt); ret = pcmcia_register_socket(&skt->socket); if (ret) goto out_err_7; WARN_ON(skt->socket.sock != i); add_timer(&skt->poll_timer); ret = device_create_file(&skt->socket.dev, &dev_attr_status); if (ret) goto out_err_8; } dev_set_drvdata(dev, sinfo); ret = 0; goto out; do { skt = &sinfo->skt[i]; device_remove_file(&skt->socket.dev, &dev_attr_status); out_err_8: del_timer_sync(&skt->poll_timer); pcmcia_unregister_socket(&skt->socket); out_err_7: flush_scheduled_work(); ops->hw_shutdown(skt); out_err_6: list_del(&skt->node); iounmap(skt->virt_io); out_err_5: release_resource(&skt->res_attr); out_err_4: release_resource(&skt->res_mem); out_err_3: release_resource(&skt->res_io); out_err_2: release_resource(&skt->res_skt); out_err_1: i--; } while (i > 0); kfree(sinfo); out: mutex_unlock(&soc_pcmcia_sockets_lock); return ret; }
static int __init at91_cf_probe(struct platform_device *pdev) { struct at91_cf_socket *cf; struct at91_cf_data *board = pdev->dev.platform_data; struct resource *io; int status; if (!board || !gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin)) return -ENODEV; io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) return -ENODEV; cf = kzalloc(sizeof *cf, GFP_KERNEL); if (!cf) return -ENOMEM; cf->board = board; cf->pdev = pdev; cf->phys_baseaddr = io->start; platform_set_drvdata(pdev, cf); status = gpio_request(board->det_pin, "cf_det"); if (status < 0) goto fail0; status = request_irq(gpio_to_irq(board->det_pin), at91_cf_irq, 0, driver_name, cf); if (status < 0) goto fail00; device_init_wakeup(&pdev->dev, 1); status = gpio_request(board->rst_pin, "cf_rst"); if (status < 0) goto fail0a; if (gpio_is_valid(board->vcc_pin)) { status = gpio_request(board->vcc_pin, "cf_vcc"); if (status < 0) goto fail0b; } if (gpio_is_valid(board->irq_pin)) { status = gpio_request(board->irq_pin, "cf_irq"); if (status < 0) goto fail0c; status = request_irq(gpio_to_irq(board->irq_pin), at91_cf_irq, IRQF_SHARED, driver_name, cf); if (status < 0) goto fail0d; cf->socket.pci_irq = gpio_to_irq(board->irq_pin); } else cf->socket.pci_irq = nr_irqs + 1; cf->socket.io_offset = (unsigned long) ioremap(cf->phys_baseaddr + CF_IO_PHYS, SZ_2K); if (!cf->socket.io_offset) { status = -ENXIO; goto fail1; } if (!request_mem_region(io->start, resource_size(io), driver_name)) { status = -ENXIO; goto fail1; } pr_info("%s: irqs det #%d, io #%d\n", driver_name, gpio_to_irq(board->det_pin), gpio_to_irq(board->irq_pin)); cf->socket.owner = THIS_MODULE; cf->socket.dev.parent = &pdev->dev; cf->socket.ops = &at91_cf_ops; cf->socket.resource_ops = &pccard_static_ops; cf->socket.features = SS_CAP_PCCARD | SS_CAP_STATIC_MAP | SS_CAP_MEM_ALIGN; cf->socket.map_size = SZ_2K; cf->socket.io[0].res = io; status = pcmcia_register_socket(&cf->socket); if (status < 0) goto fail2; return 0; fail2: release_mem_region(io->start, resource_size(io)); fail1: if (cf->socket.io_offset) iounmap((void __iomem *) cf->socket.io_offset); if (gpio_is_valid(board->irq_pin)) { free_irq(gpio_to_irq(board->irq_pin), cf); fail0d: gpio_free(board->irq_pin); } fail0c: if (gpio_is_valid(board->vcc_pin)) gpio_free(board->vcc_pin); fail0b: gpio_free(board->rst_pin); fail0a: device_init_wakeup(&pdev->dev, 0); free_irq(gpio_to_irq(board->det_pin), cf); fail00: gpio_free(board->det_pin); fail0: kfree(cf); return status; }
static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *match) { struct device *device = &ofdev->dev; struct device_node *np = ofdev->node, *tempnp = NULL; struct device_node *child = NULL; struct ucc_mii_mng __iomem *regs; struct mii_bus *new_bus; struct resource res; int k, err = 0; new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); if (NULL == new_bus) return -ENOMEM; new_bus->name = "UCC Ethernet Controller MII Bus"; new_bus->read = &uec_mdio_read; new_bus->write = &uec_mdio_write; new_bus->reset = &uec_mdio_reset; memset(&res, 0, sizeof(res)); err = of_address_to_resource(np, 0, &res); if (err) goto reg_map_fail; snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); if (NULL == new_bus->irq) { err = -ENOMEM; goto reg_map_fail; } for (k = 0; k < 32; k++) new_bus->irq[k] = PHY_POLL; while ((child = of_get_next_child(np, child)) != NULL) { int irq = irq_of_parse_and_map(child, 0); if (irq != NO_IRQ) { const u32 *id = of_get_property(child, "reg", NULL); new_bus->irq[*id] = irq; } } /* Set the base address */ regs = ioremap(res.start, sizeof(struct ucc_mii_mng)); if (NULL == regs) { err = -ENOMEM; goto ioremap_fail; } new_bus->priv = (void __force *)regs; new_bus->dev = device; dev_set_drvdata(device, new_bus); /* Read MII management master from device tree */ while ((tempnp = of_find_compatible_node(tempnp, "network", "ucc_geth")) != NULL) { struct resource tempres; err = of_address_to_resource(tempnp, 0, &tempres); if (err) goto bus_register_fail; /* if our mdio regs fall within this UCC regs range */ if ((res.start >= tempres.start) && (res.end <= tempres.end)) { /* set this UCC to be the MII master */ const u32 *id; id = of_get_property(tempnp, "cell-index", NULL); if (!id) { id = of_get_property(tempnp, "device-id", NULL); if (!id) goto bus_register_fail; } ucc_set_qe_mux_mii_mng(*id - 1); /* assign the TBI an address which won't * conflict with the PHYs */ out_be32(®s->utbipar, UTBIPAR_INIT_TBIPA); break; } } err = mdiobus_register(new_bus); if (0 != err) { printk(KERN_ERR "%s: Cannot register as MDIO bus\n", new_bus->name); goto bus_register_fail; } return 0; bus_register_fail: iounmap(regs); ioremap_fail: kfree(new_bus->irq); reg_map_fail: kfree(new_bus); return err; }
static int msm_csid_init(struct csid_device *csid_dev, uint32_t *csid_version) { int rc = 0; struct camera_vreg_t *cam_vreg; if (!csid_version) { pr_err("%s:%d csid_version NULL\n", __func__, __LINE__); rc = -EINVAL; return rc; } if (csid_dev->csid_state == CSID_POWER_UP) { pr_err("%s: csid invalid state %d\n", __func__, csid_dev->csid_state); rc = -EINVAL; return rc; } csid_dev->base = ioremap(csid_dev->mem->start, resource_size(csid_dev->mem)); if (!csid_dev->base) { pr_err("%s csid_dev->base NULL\n", __func__); rc = -ENOMEM; return rc; } if (CSID_VERSION == CSID_VERSION_V20) cam_vreg = csid_8960_vreg_info; else cam_vreg = csid_vreg_info; if (CSID_VERSION < CSID_VERSION_V30) { rc = msm_camera_config_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); if (rc < 0) { pr_err("%s: regulator on failed\n", __func__); goto vreg_config_failed; } rc = msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); if (rc < 0) { pr_err("%s: regulator enable failed\n", __func__); goto vreg_enable_failed; } if (CSID_VERSION == CSID_VERSION_V20) { rc = msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8960_clk_info, csid_dev->csid_clk, ARRAY_SIZE(csid_8960_clk_info), 1); if (rc < 0) { pr_err("%s: 8960: clock enable failed\n", __func__); goto clk_enable_failed; } } else { msm_cam_clk_sel_src(&csid_dev->pdev->dev, &csid_8610_clk_info[3], csid_8610_clk_src_info, ARRAY_SIZE(csid_8610_clk_src_info)); rc = msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8610_clk_info, csid_dev->csid_clk, ARRAY_SIZE(csid_8610_clk_info), 1); if (rc < 0) { pr_err("%s: 8610: clock enable failed\n", __func__); goto clk_enable_failed; } } } else if (CSID_VERSION >= CSID_VERSION_V30) { rc = msm_camera_config_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); if (rc < 0) { pr_err("%s: regulator on failed\n", __func__); goto vreg_config_failed; } rc = msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 1); if (rc < 0) { pr_err("%s: regulator enable failed\n", __func__); goto vreg_enable_failed; } rc = msm_cam_clk_enable(&csid_dev->pdev->dev, csid_8974_clk_info, csid_dev->csid_clk, ARRAY_SIZE(csid_8974_clk_info), 1); if (rc < 0) { pr_err("%s: clock enable failed\n", __func__); goto clk_enable_failed; } } CDBG("%s:%d called\n", __func__, __LINE__); csid_dev->hw_version = msm_camera_io_r(csid_dev->base + CSID_HW_VERSION_ADDR); CDBG("%s:%d called csid_dev->hw_version %x\n", __func__, __LINE__, csid_dev->hw_version); *csid_version = csid_dev->hw_version; init_completion(&csid_dev->reset_complete); enable_irq(csid_dev->irq->start); msm_csid_reset(csid_dev); csid_dev->csid_state = CSID_POWER_UP; return rc; clk_enable_failed: if (CSID_VERSION < CSID_VERSION_V30) { msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } else if (CSID_VERSION >= CSID_VERSION_V30) { msm_camera_enable_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } vreg_enable_failed: if (CSID_VERSION < CSID_VERSION_V30) { msm_camera_config_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } else if (CSID_VERSION >= CSID_VERSION_V30) { msm_camera_config_vreg(&csid_dev->pdev->dev, csid_vreg_info, ARRAY_SIZE(csid_vreg_info), NULL, 0, &csid_dev->csi_vdd, 0); } vreg_config_failed: iounmap(csid_dev->base); csid_dev->base = NULL; return rc; }
int gfar_mdio_probe(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct gianfar_mdio_data *pdata; struct gfar_mii __iomem *regs; struct mii_bus *new_bus; struct resource *r; int err = 0; if (NULL == dev) return -EINVAL; new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL); if (NULL == new_bus) return -ENOMEM; new_bus->name = "Gianfar MII Bus", new_bus->read = &gfar_mdio_read, new_bus->write = &gfar_mdio_write, new_bus->reset = &gfar_mdio_reset, new_bus->id = pdev->id; pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data; if (NULL == pdata) { printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id); return -ENODEV; } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); /* Set the PHY base address */ regs = ioremap(r->start, sizeof (struct gfar_mii)); if (NULL == regs) { err = -ENOMEM; goto reg_map_fail; } new_bus->priv = (void __force *)regs; new_bus->irq = pdata->irq; new_bus->dev = dev; dev_set_drvdata(dev, new_bus); err = mdiobus_register(new_bus); if (0 != err) { printk (KERN_ERR "%s: Cannot register as MDIO bus\n", new_bus->name); goto bus_register_fail; } return 0; bus_register_fail: iounmap(regs); reg_map_fail: kfree(new_bus); return err; }
void init_ddr_settings(void) { unsigned long iram_paddr; unsigned int reg; int i; struct clk *ddr_clk = clk_get(NULL, "ddr_clk"); databahn_base = ioremap(MX50_DATABAHN_BASE_ADDR, SZ_16K); /* Find the memory type, LPDDR2 or mddr. */ mx50_ddr_type = __raw_readl(databahn_base) & 0xF00; if (mx50_ddr_type == MX50_LPDDR2) { normal_databahn_settings = lpddr2_databhan_regs_offsets; ddr_settings_size = ARRAY_SIZE(lpddr2_databhan_regs_offsets); } else if (mx50_ddr_type == MX50_MDDR) { normal_databahn_settings = mddr_databhan_regs_offsets; ddr_settings_size = ARRAY_SIZE(mddr_databhan_regs_offsets); } else { printk(KERN_DEBUG "%s: Unsupported memory type\n", __func__); return; } /* Copy the databhan settings into the iram location. */ for (i = 0; i < ddr_settings_size; i++) { normal_databahn_settings[i][1] = __raw_readl(databahn_base + normal_databahn_settings[i][0]); } /* Store the size of the array in iRAM also, * increase the size by 8 bytes. */ iram_ddr_settings = iram_alloc(ddr_settings_size + 8, &iram_paddr); if (iram_ddr_settings == NULL) { printk(KERN_DEBUG "%s: failed to allocate iRAM memory for ddr settings\n", __func__); return; } /* Allocate IRAM for the DDR freq change code. */ iram_alloc(SZ_8K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ ddr_freq_change_iram_base = __arm_ioremap(iram_paddr, SZ_8K, MT_HIGH_VECTORS); memcpy(ddr_freq_change_iram_base, mx50_ddr_freq_change, SZ_8K); change_ddr_freq = (void *)ddr_freq_change_iram_base; qosc_base = ioremap(QOSC_BASE_ADDR, SZ_4K); /* Enable the QoSC */ reg = __raw_readl(qosc_base); reg &= ~0xC0000000; __raw_writel(reg, qosc_base); /* Allocate IRAM to run the WFI code from iram, since * we can turn off the DDR clocks when ARM is in WFI. */ iram_alloc(SZ_4K, &iram_paddr); /* Need to remap the area here since we want the memory region to be executable. */ wait_in_iram_base = __arm_ioremap(iram_paddr, SZ_4K, MT_HIGH_VECTORS); memcpy(wait_in_iram_base, mx50_wait, SZ_4K); wait_in_iram = (void *)wait_in_iram_base; clk_enable(ddr_clk); /* Set the DDR to enter automatic self-refresh. */ /* Set the DDR to automatically enter lower power mode 4. */ reg = __raw_readl(databahn_base + DATABAHN_CTL_REG22); reg &= ~LOWPOWER_AUTOENABLE_MASK; reg |= 1 << 1; __raw_writel(reg, databahn_base + DATABAHN_CTL_REG22); /* set the counter for entering mode 4. */ reg = __raw_readl(databahn_base + DATABAHN_CTL_REG21); reg &= ~LOWPOWER_EXTERNAL_CNT_MASK; reg = 128 << LOWPOWER_EXTERNAL_CNT_OFFSET; __raw_writel(reg, databahn_base + DATABAHN_CTL_REG21); /* Enable low power mode 4 */ reg = __raw_readl(databahn_base + DATABAHN_CTL_REG20); reg &= ~LOWPOWER_CONTROL_MASK; reg |= 1 << 1; __raw_writel(reg, databahn_base + DATABAHN_CTL_REG20); clk_disable(ddr_clk); epdc_clk = clk_get(NULL, "epdc_axi"); if (IS_ERR(epdc_clk)) { printk(KERN_DEBUG "%s: failed to get epdc_axi_clk\n", __func__); return; } }
static int __init init_autcpu12_sram (void) { int err, save0, save1; autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K); if (!autcpu12_sram_map.virt) { printk("Failed to ioremap autcpu12 NV-RAM space\n"); err = -EIO; goto out; } simple_map_init(&autcpu_sram_map); /* * Check for 32K/128K * read ofs 0 * read ofs 0x10000 * Write complement to ofs 0x100000 * Read and check result on ofs 0x0 * Restore contents */ save0 = map_read32(&autcpu12_sram_map,0); save1 = map_read32(&autcpu12_sram_map,0x10000); map_write32(&autcpu12_sram_map,~save0,0x10000); /* if we find this pattern on 0x0, we have 32K size * restore contents and exit */ if ( map_read32(&autcpu12_sram_map,0) != save0) { map_write32(&autcpu12_sram_map,save0,0x0); goto map; } /* We have a 128K found, restore 0x10000 and set size * to 128K */ map_write32(&autcpu12_sram_map,save1,0x10000); autcpu12_sram_map.size = SZ_128K; map: sram_mtd = do_map_probe("map_ram", &autcpu12_sram_map); if (!sram_mtd) { printk("NV-RAM probe failed\n"); err = -ENXIO; goto out_ioremap; } sram_mtd->owner = THIS_MODULE; sram_mtd->erasesize = 16; if (add_mtd_device(sram_mtd)) { printk("NV-RAM device addition failed\n"); err = -ENOMEM; goto out_probe; } printk("NV-RAM device size %ldKiB registered on AUTCPU12\n",autcpu12_sram_map.size/SZ_1K); return 0; out_probe: map_destroy(sram_mtd); sram_mtd = 0; out_ioremap: iounmap((void *)autcpu12_sram_map.virt); out: return err; }
static int __devinit ps3_ehci_probe(struct ps3_system_bus_device *dev) { int result; struct usb_hcd *hcd; unsigned int virq; static u64 dummy_mask = DMA_BIT_MASK(32); if (usb_disabled()) { result = -ENODEV; goto fail_start; } result = ps3_open_hv_device(dev); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed\n", __func__, __LINE__); goto fail_open; } result = ps3_dma_region_create(dev->d_region); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: " "(%d)\n", __func__, __LINE__, result); BUG_ON("check region type"); goto fail_dma_region; } result = ps3_mmio_region_create(dev->m_region); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n", __func__, __LINE__); result = -EPERM; goto fail_mmio_region; } dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, __LINE__, dev->m_region->lpar_addr); result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", __func__, __LINE__, virq); result = -EPERM; goto fail_irq; } dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */ hcd = usb_create_hcd(&ps3_ehci_hc_driver, &dev->core, dev_name(&dev->core)); if (!hcd) { dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__, __LINE__); result = -ENOMEM; goto fail_create_hcd; } hcd->rsrc_start = dev->m_region->lpar_addr; hcd->rsrc_len = dev->m_region->len; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n", __func__, __LINE__); hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len); if (!hcd->regs) { dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__, __LINE__); result = -EPERM; goto fail_ioremap; } dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__, (unsigned long)hcd->rsrc_start); dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__, (unsigned long)hcd->rsrc_len); dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__, (unsigned long)hcd->regs); dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__, (unsigned long)virq); ps3_system_bus_set_drvdata(dev, hcd); result = usb_add_hcd(hcd, virq, 0); if (result) { dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n", __func__, __LINE__, result); goto fail_add_hcd; } return result; fail_add_hcd: iounmap(hcd->regs); fail_ioremap: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); fail_create_hcd: ps3_io_irq_destroy(virq); fail_irq: ps3_free_mmio_region(dev->m_region); fail_mmio_region: ps3_dma_region_free(dev->d_region); fail_dma_region: ps3_close_hv_device(dev); fail_open: fail_start: return result; }
struct aac_dev *aac_init_adapter(struct aac_dev *dev) { u32 status[5]; struct Scsi_Host * host = dev->scsi_host_ptr; /* * Check the preferred comm settings, defaults from template. */ dev->max_fib_size = sizeof(struct hw_fib); dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); dev->new_comm_interface = 0; dev->raw_io_64 = 0; if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, NULL, NULL)) && (status[0] == 0x00000001)) { if (status[1] & AAC_OPT_NEW_COMM_64) dev->raw_io_64 = 1; if (status[1] & AAC_OPT_NEW_COMM) dev->new_comm_interface = dev->a_ops.adapter_send != 0; if (dev->new_comm_interface && (status[2] > dev->base_size)) { iounmap(dev->regs.sa); dev->base_size = status[2]; dprintk((KERN_DEBUG "ioremap(%lx,%d)\n", host->base, status[2])); dev->regs.sa = ioremap(host->base, status[2]); if (dev->regs.sa == NULL) { /* remap failed, go back ... */ dev->new_comm_interface = 0; dev->regs.sa = ioremap(host->base, AAC_MIN_FOOTPRINT_SIZE); if (dev->regs.sa == NULL) { printk(KERN_WARNING "aacraid: unable to map adapter.\n"); return NULL; } } } } if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS, 0, 0, 0, 0, 0, 0, status+0, status+1, status+2, status+3, status+4)) && (status[0] == 0x00000001)) { /* * status[1] >> 16 maximum command size in KB * status[1] & 0xFFFF maximum FIB size * status[2] >> 16 maximum SG elements to driver * status[2] & 0xFFFF maximum SG elements from driver * status[3] & 0xFFFF maximum number FIBs outstanding */ host->max_sectors = (status[1] >> 16) << 1; dev->max_fib_size = status[1] & 0xFFFF; host->sg_tablesize = status[2] >> 16; dev->sg_tablesize = status[2] & 0xFFFF; host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB; /* * NOTE: * All these overrides are based on a fixed internal * knowledge and understanding of existing adapters, * acbsize should be set with caution. */ if (acbsize == 512) { host->max_sectors = AAC_MAX_32BIT_SGBCOUNT; dev->max_fib_size = 512; dev->sg_tablesize = host->sg_tablesize = (512 - sizeof(struct aac_fibhdr) - sizeof(struct aac_write) + sizeof(struct sgentry)) / sizeof(struct sgentry); host->can_queue = AAC_NUM_IO_FIB; } else if (acbsize == 2048) { host->max_sectors = 512; dev->max_fib_size = 2048; host->sg_tablesize = 65; dev->sg_tablesize = 81; host->can_queue = 512 - AAC_NUM_MGT_FIB; } else if (acbsize == 4096) { host->max_sectors = 1024; dev->max_fib_size = 4096; host->sg_tablesize = 129; dev->sg_tablesize = 166; host->can_queue = 256 - AAC_NUM_MGT_FIB; } else if (acbsize == 8192) { host->max_sectors = 2048; dev->max_fib_size = 8192; host->sg_tablesize = 257; dev->sg_tablesize = 337; host->can_queue = 128 - AAC_NUM_MGT_FIB; } else if (acbsize > 0) { printk("Illegal acbsize=%d ignored\n", acbsize); } }
static int __devinit msm_rng_probe(struct platform_device *pdev) { struct resource *res; struct msm_rng_device *msm_rng_dev = NULL; void __iomem *base = NULL; int error = 0; int ret = 0; struct device *dev; struct msm_bus_scale_pdata *qrng_platform_support = NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "invalid address\n"); error = -EFAULT; goto err_exit; } msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL); if (!msm_rng_dev) { dev_err(&pdev->dev, "cannot allocate memory\n"); error = -ENOMEM; goto err_exit; } base = ioremap(res->start, resource_size(res)); if (!base) { dev_err(&pdev->dev, "ioremap failed\n"); error = -ENOMEM; goto err_iomap; } msm_rng_dev->base = base; msm_rng_dev->drbg_ctx = kzalloc(sizeof(struct fips_drbg_ctx_s), GFP_KERNEL); if (!msm_rng_dev->drbg_ctx) { dev_err(&pdev->dev, "cannot allocate memory\n"); error = -ENOMEM; goto err_clk_get; } /* create a handle for clock control */ if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node, "qcom,msm-rng-iface-clk"))) msm_rng_dev->prng_clk = clk_get(&pdev->dev, "iface_clk"); else msm_rng_dev->prng_clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(msm_rng_dev->prng_clk)) { dev_err(&pdev->dev, "failed to register clock source\n"); error = -EPERM; goto err_clk_get; } /* save away pdev and register driver data */ msm_rng_dev->pdev = pdev; platform_set_drvdata(pdev, msm_rng_dev); if (pdev->dev.of_node) { /* Register bus client */ qrng_platform_support = msm_bus_cl_get_pdata(pdev); msm_rng_dev->qrng_perf_client = msm_bus_scale_register_client( qrng_platform_support); msm_rng_device_info.qrng_perf_client = msm_rng_dev->qrng_perf_client; if (!msm_rng_dev->qrng_perf_client) pr_err("Unable to register bus client\n"); } /* Enable rng h/w */ error = msm_rng_enable_hw(msm_rng_dev); if (error) goto rollback_clk; /* register with hwrng framework */ msm_rng.priv = (unsigned long) msm_rng_dev; error = hwrng_register(&msm_rng); if (error) { dev_err(&pdev->dev, "failed to register hwrng\n"); error = -EPERM; goto rollback_clk; } ret = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops); msm_rng_class = class_create(THIS_MODULE, "msm-rng"); if (IS_ERR(msm_rng_class)) { pr_err("class_create failed\n"); return PTR_ERR(msm_rng_class); } dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0), NULL, "msm-rng"); if (IS_ERR(dev)) { pr_err("Device create failed\n"); error = PTR_ERR(dev); goto unregister_chrdev; } cdev_init(&msm_rng_cdev, &msm_rng_fops); sema_init(&msm_rng_dev->drbg_sem, 1); _first_msm_drbg_init(msm_rng_dev); return error; unregister_chrdev: unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME); rollback_clk: clk_put(msm_rng_dev->prng_clk); err_clk_get: iounmap(msm_rng_dev->base); err_iomap: kzfree(msm_rng_dev->drbg_ctx); kzfree(msm_rng_dev); err_exit: return error; }
static int mpc83xx_wdt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; static struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = "MPC83xx", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: mpc83xx_wdt_keepalive(); return 0; case WDIOC_GETTIMEOUT: return put_user(timeout_sec, p); default: return -ENOTTY; } } static const struct file_operations mpc83xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = mpc83xx_wdt_write, .ioctl = mpc83xx_wdt_ioctl, .open = mpc83xx_wdt_open, .release = mpc83xx_wdt_release, }; static struct miscdevice mpc83xx_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &mpc83xx_wdt_fops, }; static int __devinit mpc83xx_wdt_probe(struct platform_device *dev) { struct resource *r; int ret; unsigned int *freq = dev->dev.platform_data; /* get a pointer to the register memory */ r = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!r) { ret = -ENODEV; goto err_out; } wd_base = ioremap(r->start, sizeof (struct mpc83xx_wdt)); if (wd_base == NULL) { ret = -ENOMEM; goto err_out; } ret = misc_register(&mpc83xx_wdt_miscdev); if (ret) { printk(KERN_ERR "cannot register miscdev on minor=%d " "(err=%d)\n", WATCHDOG_MINOR, ret); goto err_unmap; } /* Calculate the timeout in seconds */ if (prescale) timeout_sec = (timeout * 0x10000) / (*freq); else timeout_sec = timeout / (*freq); printk(KERN_INFO "WDT driver for MPC83xx initialized. " "mode:%s timeout=%d (%d seconds)\n", reset ? "reset":"interrupt", timeout, timeout_sec); spin_lock_init(&wdt_spinlock); return 0; err_unmap: iounmap(wd_base); err_out: return ret; } static int __devexit mpc83xx_wdt_remove(struct platform_device *dev) { misc_deregister(&mpc83xx_wdt_miscdev); iounmap(wd_base); return 0; } static struct platform_driver mpc83xx_wdt_driver = { .probe = mpc83xx_wdt_probe, .remove = __devexit_p(mpc83xx_wdt_remove), .driver = { .name = "mpc83xx_wdt", }, }; static int __init mpc83xx_wdt_init(void) { return platform_driver_register(&mpc83xx_wdt_driver); } static void __exit mpc83xx_wdt_exit(void) { platform_driver_unregister(&mpc83xx_wdt_driver); }
static void __init fsg_init(void) { uint8_t __iomem *f; ixp4xx_sys_init(); fsg_flash_resource.start = IXP4XX_EXP_BUS_BASE(0); fsg_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1; *IXP4XX_EXP_CS0 |= IXP4XX_FLASH_WRITABLE; *IXP4XX_EXP_CS1 = *IXP4XX_EXP_CS0; /* Configure CS2 for operation, 8bit and writable */ *IXP4XX_EXP_CS2 = 0xbfff0002; i2c_register_board_info(0, fsg_i2c_board_info, ARRAY_SIZE(fsg_i2c_board_info)); /* This is only useful on a modified machine, but it is valuable * to have it first in order to see debug messages, and so that * it does *not* get removed if platform_add_devices fails! */ (void)platform_device_register(&fsg_uart); platform_add_devices(fsg_devices, ARRAY_SIZE(fsg_devices)); if (request_irq(gpio_to_irq(FSG_RB_GPIO), &fsg_reset_handler, IRQF_DISABLED | IRQF_TRIGGER_LOW, "FSG reset button", NULL) < 0) { printk(KERN_DEBUG "Reset Button IRQ %d not available\n", gpio_to_irq(FSG_RB_GPIO)); } if (request_irq(gpio_to_irq(FSG_SB_GPIO), &fsg_power_handler, IRQF_DISABLED | IRQF_TRIGGER_LOW, "FSG power button", NULL) < 0) { printk(KERN_DEBUG "Power Button IRQ %d not available\n", gpio_to_irq(FSG_SB_GPIO)); } /* * Map in a portion of the flash and read the MAC addresses. * Since it is stored in BE in the flash itself, we need to * byteswap it if we're in LE mode. */ f = ioremap(IXP4XX_EXP_BUS_BASE(0), 0x400000); if (f) { #ifdef __ARMEB__ int i; for (i = 0; i < 6; i++) { fsg_plat_eth[0].hwaddr[i] = readb(f + 0x3C0422 + i); fsg_plat_eth[1].hwaddr[i] = readb(f + 0x3C043B + i); } #else /* Endian-swapped reads from unaligned addresses are required to extract the two MACs from the big-endian Redboot config area in flash. */ fsg_plat_eth[0].hwaddr[0] = readb(f + 0x3C0421); fsg_plat_eth[0].hwaddr[1] = readb(f + 0x3C0420); fsg_plat_eth[0].hwaddr[2] = readb(f + 0x3C0427); fsg_plat_eth[0].hwaddr[3] = readb(f + 0x3C0426); fsg_plat_eth[0].hwaddr[4] = readb(f + 0x3C0425); fsg_plat_eth[0].hwaddr[5] = readb(f + 0x3C0424); fsg_plat_eth[1].hwaddr[0] = readb(f + 0x3C0439); fsg_plat_eth[1].hwaddr[1] = readb(f + 0x3C043F); fsg_plat_eth[1].hwaddr[2] = readb(f + 0x3C043E); fsg_plat_eth[1].hwaddr[3] = readb(f + 0x3C043D); fsg_plat_eth[1].hwaddr[4] = readb(f + 0x3C043C); fsg_plat_eth[1].hwaddr[5] = readb(f + 0x3C0443); #endif iounmap(f); } printk(KERN_INFO "FSG: Using MAC address %pM for port 0\n", fsg_plat_eth[0].hwaddr); printk(KERN_INFO "FSG: Using MAC address %pM for port 1\n", fsg_plat_eth[1].hwaddr); }
static int msm_ispif_init(struct ispif_device *ispif, uint32_t csid_version) { int rc = 0; BUG_ON(!ispif); if (ispif->ispif_state == ISPIF_POWER_UP) { pr_err("%s: ispif already initted state = %d\n", __func__, ispif->ispif_state); rc = -EPERM; return rc; } /* can we set to zero? */ ispif->applied_intf_cmd[VFE0].intf_cmd = 0xFFFFFFFF; ispif->applied_intf_cmd[VFE0].intf_cmd1 = 0xFFFFFFFF; ispif->applied_intf_cmd[VFE1].intf_cmd = 0xFFFFFFFF; ispif->applied_intf_cmd[VFE1].intf_cmd1 = 0xFFFFFFFF; memset(ispif->sof_count, 0, sizeof(ispif->sof_count)); ispif->csid_version = csid_version; if (ispif->csid_version >= CSID_VERSION_V30) { if (!ispif->clk_mux_mem || !ispif->clk_mux_io) { pr_err("%s csi clk mux mem %p io %p\n", __func__, ispif->clk_mux_mem, ispif->clk_mux_io); rc = -ENOMEM; return rc; } ispif->clk_mux_base = ioremap(ispif->clk_mux_mem->start, resource_size(ispif->clk_mux_mem)); if (!ispif->clk_mux_base) { pr_err("%s: clk_mux_mem ioremap failed\n", __func__); rc = -ENOMEM; return rc; } } ispif->base = ioremap(ispif->mem->start, resource_size(ispif->mem)); if (!ispif->base) { rc = -ENOMEM; pr_err("%s: nomem\n", __func__); goto end; } rc = request_irq(ispif->irq->start, msm_io_ispif_irq, IRQF_TRIGGER_RISING, "ispif", ispif); if (rc) { pr_err("%s: request_irq error = %d\n", __func__, rc); goto error_irq; } rc = msm_ispif_clk_ahb_enable(ispif, 1); if (rc) { pr_err("%s: ahb_clk enable failed", __func__); goto error_ahb; } msm_ispif_reset_hw(ispif); rc = msm_ispif_reset(ispif); if (rc == 0) { ispif->ispif_state = ISPIF_POWER_UP; CDBG("%s: power up done\n", __func__); goto end; } error_ahb: free_irq(ispif->irq->start, ispif); error_irq: iounmap(ispif->base); end: return rc; }
static int __devinit s3c_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; int ret; pr_debug("%s: probe=%p\n", __func__, pdev); /* find the IRQs */ s3c_rtc_tickno = platform_get_irq(pdev, 1); if (s3c_rtc_tickno < 0) { dev_err(&pdev->dev, "no irq for rtc tick\n"); return -ENOENT; } s3c_rtc_alarmno = platform_get_irq(pdev, 0); if (s3c_rtc_alarmno < 0) { dev_err(&pdev->dev, "no irq for alarm\n"); return -ENOENT; } pr_debug("s3c2410_rtc: tick irq %d, alarm irq %d\n", s3c_rtc_tickno, s3c_rtc_alarmno); /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "failed to get memory region resource\n"); return -ENOENT; } s3c_rtc_mem = request_mem_region(res->start, res->end-res->start+1, pdev->name); if (s3c_rtc_mem == NULL) { dev_err(&pdev->dev, "failed to reserve memory region\n"); ret = -ENOENT; goto err_nores; } s3c_rtc_base = ioremap(res->start, res->end - res->start + 1); if (s3c_rtc_base == NULL) { dev_err(&pdev->dev, "failed ioremap()\n"); ret = -EINVAL; goto err_nomap; } /* check to see if everything is setup correctly */ s3c_rtc_enable(pdev, 1); pr_debug("s3c2410_rtc: RTCCON=%02x\n", readb(s3c_rtc_base + S3C2410_RTCCON)); device_init_wakeup(&pdev->dev, 1); /* register RTC and exit */ rtc = rtc_device_register("s3c", &pdev->dev, &s3c_rtcops, THIS_MODULE); if (IS_ERR(rtc)) { dev_err(&pdev->dev, "cannot attach rtc\n"); ret = PTR_ERR(rtc); goto err_nortc; } s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data; if (s3c_rtc_cpu_type == TYPE_S3C64XX) rtc->max_user_freq = 32768; else rtc->max_user_freq = 128; platform_set_drvdata(pdev, rtc); s3c_rtc_setfreq(&pdev->dev, 1); return 0; err_nortc: s3c_rtc_enable(pdev, 0); iounmap(s3c_rtc_base); err_nomap: release_resource(s3c_rtc_mem); err_nores: return ret; }
int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) { struct ucc_slow_private *uccs; u32 i; struct ucc_slow *us_regs; u32 gumr; struct qe_bd *bd; u32 id; u32 command; int ret = 0; if (!us_info) return -EINVAL; /* check if the UCC port number is in range. */ if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { printk(KERN_ERR "%s: illegal UCC number", __FUNCTION__); return -EINVAL; } /* * Set mrblr * Check that 'max_rx_buf_length' is properly aligned (4), unless * rfw is 1, meaning that QE accepts one byte at a time, unlike normal * case when QE accepts 32 bits at a time. */ if ((!us_info->rfw) && (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { printk(KERN_ERR "max_rx_buf_length not aligned."); return -EINVAL; } uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); if (!uccs) { printk(KERN_ERR "%s: Cannot allocate private data", __FUNCTION__); return -ENOMEM; } /* Fill slow UCC structure */ uccs->us_info = us_info; /* Set the PHY base address */ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); if (uccs->us_regs == NULL) { printk(KERN_ERR "%s: Cannot map UCC registers", __FUNCTION__); return -ENOMEM; } uccs->saved_uccm = 0; uccs->p_rx_frame = 0; us_regs = uccs->us_regs; uccs->p_ucce = (u16 *) & (us_regs->ucce); uccs->p_uccm = (u16 *) & (us_regs->uccm); #ifdef STATISTICS uccs->rx_frames = 0; uccs->tx_frames = 0; uccs->rx_discarded = 0; #endif /* STATISTICS */ /* Get PRAM base */ uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); if (IS_MURAM_ERR(uccs->us_pram_offset)) { printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __FUNCTION__); ucc_slow_free(uccs); return -ENOMEM; } id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED, uccs->us_pram_offset); uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); /* Init Guemr register */ if ((ret = ucc_init_guemr((struct ucc_common *) (us_info->regs)))) { printk(KERN_ERR "%s: cannot init GUEMR", __FUNCTION__); ucc_slow_free(uccs); return ret; } /* Set UCC to slow type */ if ((ret = ucc_set_type(us_info->ucc_num, (struct ucc_common *) (us_info->regs), UCC_SPEED_TYPE_SLOW))) { printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__); ucc_slow_free(uccs); return ret; } out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length); INIT_LIST_HEAD(&uccs->confQ); /* Allocate BDs. */ uccs->rx_base_offset = qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); if (IS_MURAM_ERR(uccs->rx_base_offset)) { printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__); uccs->rx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } uccs->tx_base_offset = qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), QE_ALIGNMENT_OF_BD); if (IS_MURAM_ERR(uccs->tx_base_offset)) { printk(KERN_ERR "%s: cannot allocate TX BDs", __FUNCTION__); uccs->tx_base_offset = 0; ucc_slow_free(uccs); return -ENOMEM; } /* Init Tx bds */ bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset); for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) { /* clear bd buffer */ out_be32(&bd->buf, 0); /* set bd status and length */ out_be32((u32 *) bd, 0); bd++; } /* for last BD set Wrap bit */ out_be32(&bd->buf, 0); out_be32((u32 *) bd, cpu_to_be32(T_W)); /* Init Rx bds */ bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset); for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) { /* set bd status and length */ out_be32((u32*)bd, 0); /* clear bd buffer */ out_be32(&bd->buf, 0); bd++; } /* for last BD set Wrap bit */ out_be32((u32*)bd, cpu_to_be32(R_W)); out_be32(&bd->buf, 0); /* Set GUMR (For more details see the hardware spec.). */ /* gumr_h */ gumr = us_info->tcrc; if (us_info->cdp) gumr |= UCC_SLOW_GUMR_H_CDP; if (us_info->ctsp) gumr |= UCC_SLOW_GUMR_H_CTSP; if (us_info->cds) gumr |= UCC_SLOW_GUMR_H_CDS; if (us_info->ctss) gumr |= UCC_SLOW_GUMR_H_CTSS; if (us_info->tfl) gumr |= UCC_SLOW_GUMR_H_TFL; if (us_info->rfw) gumr |= UCC_SLOW_GUMR_H_RFW; if (us_info->txsy) gumr |= UCC_SLOW_GUMR_H_TXSY; if (us_info->rtsm) gumr |= UCC_SLOW_GUMR_H_RTSM; out_be32(&us_regs->gumr_h, gumr); /* gumr_l */ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc | us_info->diag | us_info->mode; if (us_info->tci) gumr |= UCC_SLOW_GUMR_L_TCI; if (us_info->rinv) gumr |= UCC_SLOW_GUMR_L_RINV; if (us_info->tinv) gumr |= UCC_SLOW_GUMR_L_TINV; if (us_info->tend) gumr |= UCC_SLOW_GUMR_L_TEND; out_be32(&us_regs->gumr_l, gumr); /* Function code registers */ /* if the data is in cachable memory, the 'global' */ /* in the function code should be set. */ uccs->us_pram->tfcr = uccs->us_pram->rfcr = us_info->data_mem_part | QE_BMR_BYTE_ORDER_BO_MOT; /* rbase, tbase are offsets from MURAM base */ out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset); out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset); /* Mux clocking */ /* Grant Support */ ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support); /* Breakpoint Support */ ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support); /* Set Tsa or NMSI mode. */ ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa); /* If NMSI (not Tsa), set Tx and Rx clock. */ if (!us_info->tsa) { /* Rx clock routing */ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, COMM_DIR_RX)) { printk(KERN_ERR "%s: illegal value for RX clock", __FUNCTION__); ucc_slow_free(uccs); return -EINVAL; } /* Tx clock routing */ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, COMM_DIR_TX)) { printk(KERN_ERR "%s: illegal value for TX clock", __FUNCTION__); ucc_slow_free(uccs); return -EINVAL; } } /* Set interrupt mask register at UCC level. */ out_be16(&us_regs->uccm, us_info->uccm_mask); /* First, clear anything pending at UCC level, * otherwise, old garbage may come through * as soon as the dam is opened. */ /* Writing '1' clears */ out_be16(&us_regs->ucce, 0xffff); /* Issue QE Init command */ if (us_info->init_tx && us_info->init_rx) command = QE_INIT_TX_RX; else if (us_info->init_tx) command = QE_INIT_TX; else command = QE_INIT_RX; /* We know at least one is TRUE */ id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); *uccs_ret = uccs; return 0; }