void __iomem * ioremap (unsigned long offset, unsigned long size) { u64 attr; unsigned long gran_base, gran_size; /* * For things in kern_memmap, we must use the same attribute * as the rest of the kernel. For more details, see * Documentation/ia64/aliasing.txt. */ attr = kern_mem_attribute(offset, size); if (attr & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(offset); else if (attr & EFI_MEMORY_UC) return __ioremap(offset, size); /* * Some chipsets don't support UC access to memory. If * WB is supported for the whole granule, we prefer that. */ gran_base = GRANULEROUNDDOWN(offset); gran_size = GRANULEROUNDUP(offset + size) - gran_base; if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB) return (void __iomem *) phys_to_virt(offset); return __ioremap(offset, size); }
static int h3600_dual_sleeve_pcmcia_init( struct pcmcia_init *init ) { dual_pcmcia_sleeve[0] = (struct linkup_l1110 *)__ioremap(0x1a000000, PAGE_SIZE, 0); dual_pcmcia_sleeve[1] = (struct linkup_l1110 *)__ioremap(0x19000000, PAGE_SIZE, 0); writel(LINKUP_PRC_S2|LINKUP_PRC_S1, &dual_pcmcia_sleeve[0]->prc); writel(LINKUP_PRC_S2|LINKUP_PRC_S1|LINKUP_PRC_SSP, &dual_pcmcia_sleeve[1]->prc); return sa1100_h3600_common_pcmcia_init( init ); }
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { unsigned long last_addr; void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); if (!p) return p; /* Guaranteed to be > phys_addr, as per __ioremap() */ last_addr = phys_addr + size - 1; if (last_addr < virt_to_phys(high_memory)) { struct page *ppage = virt_to_page(__va(phys_addr)); unsigned long npages; phys_addr &= PAGE_MASK; /* This might overflow and become zero.. */ last_addr = PAGE_ALIGN(last_addr); /* .. but that's ok, because modulo-2**n arithmetic will make * the page-aligned "last - first" come out right. */ npages = (last_addr - phys_addr) >> PAGE_SHIFT; if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { iounmap(p); p = NULL; } global_flush_tlb(); } return p; }
int drm_ioremap(struct drm_device *dev, struct drm_local_map *map) { struct drm_device_iomap iomap; int ret; DRM_DEBUG("\n"); iomap.paddr = map->offset; iomap.size = map->size; ret = __ioremap(dev->devinfo, &iomap); if (ret) { DRM_ERROR("__ioremap failed: paddr=0x%x, size=0x%x", map->offset, map->size); return (ret); } map->handle = (void *)iomap.kvaddr; map->acc_handle = iomap.acc_handle; DRM_DEBUG( "map->handle=%p, map->size=%x", (void *)map->handle, map->size); return (0); }
// return IRQ if found, or 0 otherwise int detect_rf2(int iobase) { volatile unsigned char *sh; int irq=0; sh = ((volatile unsigned char *) __ioremap(0xEE000000, 4096,0)); if (sh == (volatile unsigned char *)0xEE000000) { printk(KERN_ERR "ide-tsrf2: cannot map 0xEE000000\n"); return 0; } sh += iobase; if (sh[0] == 0x8e) { // found board printk("TS-RF2 detected at I/O address 0x%X, ",iobase); sh[2] = 0xE9; // make sure unused UART is off if (sh[1] & (1 << 6)) { printk("IRQ 6\n"); irq = 64+6; } else if (sh[1] & (1 << 7)) { printk("IRQ 7\n"); irq = 64+7; } else { printk("NO IRQ (ignoring!)\n"); } } __iounmap(sh); return irq; }
void __iomem * ioremap_nocache (unsigned long offset, unsigned long size) { if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB) return NULL; return __ioremap(offset, size); }
void __iomem * ioremap_nocache (unsigned long phys_addr, unsigned long size) { if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB) return NULL; return __ioremap(phys_addr, size); }
static void off104(void) { volatile unsigned long *tmp; tmp = ((unsigned long *) __ioremap(0xE8000000, 4096,0)); if (tmp == (unsigned long *)0xE8000000) { printk(KERN_ERR "tsuart1: cannot map 0xE8000000\n"); return; } tmp[0x30/4] = saved_A; tmp[0x34/4] = saved_B; __iounmap(tmp); }
void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) { /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_RW) flags |= _PAGE_DIRTY | _PAGE_HWWRITE; /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC); return __ioremap(addr, size, flags); }
static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3; spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow), shadow_flags); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; } spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, LS_SIZE, _PAGE_NO_CACHE); if (!spu->local_store) { pr_debug("%s:%d: ioremap local_store failed\n", __func__, __LINE__); goto fail_ioremap; } spu->problem = ioremap(spu->problem_phys, sizeof(struct spu_problem)); if (!spu->problem) { pr_debug("%s:%d: ioremap problem failed\n", __func__, __LINE__); goto fail_ioremap; } spu->priv2 = ioremap(spu_pdata(spu)->priv2_addr, sizeof(struct spu_priv2)); if (!spu->priv2) { pr_debug("%s:%d: ioremap priv2 failed\n", __func__, __LINE__); goto fail_ioremap; } dump_areas(spu_pdata(spu)->spe_id, spu_pdata(spu)->priv2_addr, spu->problem_phys, spu->local_store_phys, spu_pdata(spu)->shadow_addr); dump_areas(spu_pdata(spu)->spe_id, (unsigned long)spu->priv2, (unsigned long)spu->problem, (unsigned long)spu->local_store, (unsigned long)spu_pdata(spu)->shadow); return 0; fail_ioremap: spu_unmap(spu); return -ENOMEM; }
static int __devinit chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) { struct fb_info *p = &chipsfb_info; unsigned long addr, size; unsigned short cmd; if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) return -ENODEV; addr = pci_resource_start(dp, 0); size = pci_resource_len(dp, 0); if (addr == 0) return -ENODEV; if (p->screen_base != 0) return -EBUSY; if (!request_mem_region(addr, size, "chipsfb")) return -EBUSY; #ifdef __BIG_ENDIAN addr += 0x800000; // Use big-endian aperture #endif /* we should use pci_enable_device here, but, the device doesn't declare its I/O ports in its BARs so pci_enable_device won't turn on I/O responses */ pci_read_config_word(dp, PCI_COMMAND, &cmd); cmd |= 3; /* enable memory and IO space */ pci_write_config_word(dp, PCI_COMMAND, cmd); #ifdef CONFIG_PMAC_BACKLIGHT /* turn on the backlight */ set_backlight_enable(1); #endif /* CONFIG_PMAC_BACKLIGHT */ p->screen_base = __ioremap(addr, 0x200000, _PAGE_NO_CACHE); if (p->screen_base == NULL) { release_mem_region(addr, size); return -ENOMEM; } p->device = &dp->dev; init_chips(p, addr); #ifdef CONFIG_PMAC_PBOOK pmu_register_sleep_notifier(&chips_sleep_notifier); #endif /* CONFIG_PMAC_PBOOK */ /* Clear the entire framebuffer */ memset(p->screen_base, 0, 0x100000); pci_set_drvdata(dp, p); return 0; }
static void __iomem *iowa_ioremap(unsigned long addr, unsigned long size, unsigned long flags) { struct iowa_bus *bus; void __iomem *res = __ioremap(addr, size, flags); int busno; bus = iowa_pci_find(0, addr); if (bus != NULL) { busno = bus - iowa_busses; PCI_SET_ADDR_TOKEN(res, busno + 1); } return res; }
static int __init setup_areas(struct spu *spu) { struct table {char* name; unsigned long addr; unsigned long size;}; static const unsigned long shadow_flags = _PAGE_NO_CACHE | 3; spu_pdata(spu)->shadow = __ioremap(spu_pdata(spu)->shadow_addr, sizeof(struct spe_shadow), shadow_flags); if (!spu_pdata(spu)->shadow) { pr_debug("%s:%d: ioremap shadow failed\n", __func__, __LINE__); goto fail_ioremap; } <<<<<<< HEAD
static int __init clps6700_init_skt(int nr) { struct clps6700_skt *skt; int ret; skt = kmalloc(sizeof(struct clps6700_skt), GFP_KERNEL); if (!skt) return -ENOMEM; memset(skt, 0, sizeof(struct clps6700_skt)); spin_lock_init(&skt->ev_lock); skt->nr = nr; skt->physbase = nr ? CS5_PHYS_BASE : CS4_PHYS_BASE; skt->pmr = PMR_AUTOIDLE | PMR_MCPE | PMR_CDWEAK; skt->cpcr = CPCR_PDIR(PCTL1|PCTL0); skt->cpcr_3v3 = CPCR_PON(PCTL0); skt->cpcr_5v0 = CPCR_PON(PCTL0); /* we only do 3v3 */ skt->cur_pmr = skt->pmr; skt->regbase = (u_int)__ioremap(skt->physbase + CLPS6700_REG_BASE, CLPS6700_REG_SIZE, 0); ret = -ENOMEM; if (!skt->regbase) goto err_free; skts[nr] = skt; ret = request_irq(IRQ_EINT3, clps6700_interrupt, SA_SHIRQ, "pcmcia", skt); if (ret) { printk(KERN_ERR "clps6700: unable to grab irq%d (%d)\n", IRQ_EINT3, ret); goto err_unmap; } return 0; err_unmap: iounmap((void *)skt->regbase); err_free: kfree(skt); skts[nr] = NULL; return ret; }
// mask off bits 0xFC000 in register 0xE8000038 // this sets C14-C19 on the PC104 bus to GPIO, which is needed for // UART #8 and UART #9 signals to appear on the header. int semioff104(void) { volatile unsigned long *tmp; tmp = ((unsigned long *) __ioremap(0xE8000000, 4096,0)); if (tmp == (unsigned long *)0xE8000000) { printk(KERN_ERR "tsuart1: cannot map 0xE8000000\n"); return 0; } if (tmp[0x38/4] & 0xFC000) { // already set to not GPIO; reject the change __iounmap(tmp); return 0; } else { tmp[0x38/4] &= 0x03FFF; __iounmap(tmp); return 1; } }
/* * Do remaining PCIe setup, once dd is allocated, and save away * fields required to re-initialize after a chip reset, or for * various other purposes */ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long len; resource_size_t addr; dd->pcidev = pdev; pci_set_drvdata(pdev, dd); addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); qib_cdbg(VERBOSE, "regbase (0) %llx len %ld vend %x/%x " "driver_data %p\n", (unsigned long long)addr, len, ent->vendor, ent->device, pci_get_drvdata(pdev)); #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU); #else dd->kregbase = ioremap_nocache(addr, len); #endif if (!dd->kregbase) { qib_dbg("Unable to map io addr %llx to kvirt, failing\n", (unsigned long long)addr); return -ENOMEM; } dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len); dd->physaddr = addr; /* used for io_remap, etc. */ /* for user mmap */ qib_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p through %p\n", (unsigned long long)addr, dd->kregbase, dd->kregend); /* * Save BARs to rewrite after device reset. Save all 64 bits of * BAR, just in case. */ dd->pcibar0 = addr; dd->pcibar1 = addr >> 32; dd->deviceid = ent->device; /* save for later use */ dd->vendorid = ent->vendor; return 0; }
static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p) { struct device_node *node; unsigned int *int_servers; char *addr; unsigned long real_address; unsigned int size; struct pmd_regs __iomem *pmd_mmio_area; int hardid, thread; int proplen; pmd_mmio_area = NULL; hardid = get_hard_smp_processor_id(cpu); for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) { int_servers = (void *) get_property(node, "ibm,ppc-interrupt-server#s", &proplen); if (!int_servers) { printk(KERN_WARNING "%s misses " "ibm,ppc-interrupt-server#s property", node->full_name); continue; } for (thread = 0; thread < proplen / sizeof (int); thread++) { if (hardid == int_servers[thread]) { addr = get_property(node, "pervasive", NULL); goto found; } } } printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu); return -EINVAL; found: real_address = *(unsigned long*) addr; addr += sizeof (unsigned long); size = *(unsigned int*) addr; pr_debug("pervasive area for CPU %d at %lx, size %x\n", cpu, real_address, size); p->regs = __ioremap(real_address, size, _PAGE_NO_CACHE); p->thread = thread; return 0; }
ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; csize = min_t(size_t, csize, PAGE_SIZE); if ((min_low_pfn < pfn) && (pfn < max_pfn)) { vaddr = __va(pfn << PAGE_SHIFT); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); } else { vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); iounmap(vaddr); } return csize; }
/* * copy_oldmem_page - copy one page from "oldmem" * @pfn: page frame number to be copied * @buf: target memory address for the copy; this can be in kernel address * space or user address space (see @userbuf) * @csize: number of bytes to copy * @offset: offset in bytes into the page (based on pfn) to begin the copy * @userbuf: if set, @buf is in user address space, use copy_to_user(), * otherwise @buf is in kernel address space, use memcpy(). * * Copy a page from "oldmem". For this page, there is no pte mapped * in the current kernel. We stitch up a pte, similar to kmap_atomic. */ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, unsigned long offset, int userbuf) { void *vaddr; if (!csize) return 0; vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); if (userbuf) { if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) { iounmap(vaddr); return -EFAULT; } } else memcpy(buf, (vaddr + offset), csize); iounmap(vaddr); return csize; }
int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long len; resource_size_t addr; dd->pcidev = pdev; pci_set_drvdata(pdev, dd); addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); #if defined(__powerpc__) /* */ dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU); #else dd->kregbase = ioremap_nocache(addr, len); #endif if (!dd->kregbase) return -ENOMEM; dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len); dd->physaddr = addr; /* */ /* */ dd->pcibar0 = addr; dd->pcibar1 = addr >> 32; dd->deviceid = ent->device; /* */ dd->vendorid = ent->vendor; return 0; }
/* * Do remaining PCIe setup, once dd is allocated, and save away * fields required to re-initialize after a chip reset, or for * various other purposes */ int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long len; resource_size_t addr; dd->pcidev = pdev; pci_set_drvdata(pdev, dd); addr = pci_resource_start(pdev, 0); len = pci_resource_len(pdev, 0); #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU); #else dd->kregbase = ioremap_nocache(addr, len); #endif if (!dd->kregbase) return -ENOMEM; dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len); dd->physaddr = addr; /* used for io_remap, etc. */ /* * Save BARs to rewrite after device reset. Save all 64 bits of * BAR, just in case. */ dd->pcibar0 = addr; dd->pcibar1 = addr >> 32; dd->deviceid = ent->device; /* save for later use */ dd->vendorid = ent->vendor; return 0; }
static int z2_open(struct block_device *bdev, fmode_t mode) { int device; int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) * sizeof( z2ram_map[0] ); int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) * sizeof( z2ram_map[0] ); int rc = -ENOMEM; device = MINOR(bdev->bd_dev); if ( current_device != -1 && current_device != device ) { rc = -EBUSY; goto err_out; } if ( current_device == -1 ) { z2_count = 0; chip_count = 0; list_count = 0; z2ram_size = 0; if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) { int index = device - Z2MINOR_MEMLIST1 + 1; unsigned long size, paddr, vaddr; if (index >= m68k_realnum_memory) { printk( KERN_ERR DEVICE_NAME ": no such entry in z2ram_map\n" ); goto err_out; } paddr = m68k_memory[index].addr; size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1); #ifdef __powerpc__ { vfree(vmalloc (size)); } vaddr = (unsigned long) __ioremap (paddr, size, _PAGE_WRITETHRU); #else vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size); #endif z2ram_map = kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]), GFP_KERNEL); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } while (size) { z2ram_map[ z2ram_size++ ] = vaddr; size -= Z2RAM_CHUNKSIZE; vaddr += Z2RAM_CHUNKSIZE; list_count++; } if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK List Entry %d Memory\n", list_count * Z2RAM_CHUNK1024, index ); } else switch ( device ) { case Z2MINOR_COMBINED: z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_z2ram(); get_chipram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n", z2_count * Z2RAM_CHUNK1024, chip_count * Z2RAM_CHUNK1024, ( z2_count + chip_count ) * Z2RAM_CHUNK1024 ); break; case Z2MINOR_Z2ONLY: z2ram_map = kmalloc( max_z2_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_z2ram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK of Zorro II RAM\n", z2_count * Z2RAM_CHUNK1024 ); break; case Z2MINOR_CHIPONLY: z2ram_map = kmalloc( max_chip_map, GFP_KERNEL ); if ( z2ram_map == NULL ) { printk( KERN_ERR DEVICE_NAME ": cannot get mem for z2ram_map\n" ); goto err_out; } get_chipram(); if ( z2ram_size != 0 ) printk( KERN_INFO DEVICE_NAME ": using %iK Chip RAM\n", chip_count * Z2RAM_CHUNK1024 ); break; default: rc = -ENODEV; goto err_out; break; } if ( z2ram_size == 0 ) { printk( KERN_NOTICE DEVICE_NAME ": no unused ZII/Chip RAM found\n" ); goto err_out_kfree; } current_device = device; z2ram_size <<= Z2RAM_CHUNKSHIFT; set_capacity(z2ram_gendisk, z2ram_size >> 9); }
static int __init init_ocelot_maps(void) { void *pld; int nr_parts; unsigned char brd_status; printk(KERN_INFO "Momenco Ocelot MTD mappings: Flash 0x%x at 0x%x, NVRAM 0x%x at 0x%x\n", FLASH_WINDOW_SIZE, FLASH_WINDOW_ADDR, NVRAM_WINDOW_SIZE, NVRAM_WINDOW_ADDR); /* First check whether the flash jumper is present */ pld = ioremap(OCELOT_PLD, 0x10); if (!pld) { printk(KERN_NOTICE "Failed to ioremap Ocelot PLD\n"); return -EIO; } brd_status = readb(pld+4); iounmap(pld); /* Now ioremap the NVRAM space */ ocelot_nvram_map.map_priv_1 = (unsigned long)ioremap_nocache(NVRAM_WINDOW_ADDR, NVRAM_WINDOW_SIZE); if (!ocelot_nvram_map.map_priv_1) { printk(KERN_NOTICE "Failed to ioremap Ocelot NVRAM space\n"); return -EIO; } // ocelot_nvram_map.map_priv_2 = ocelot_nvram_map.map_priv_1; /* And do the RAM probe on it to get an MTD device */ nvram_mtd = do_map_probe("map_ram", &ocelot_nvram_map); if (!nvram_mtd) { printk("NVRAM probe failed\n"); goto fail_1; } nvram_mtd->module = THIS_MODULE; nvram_mtd->erasesize = 16; /* Now map the flash space */ ocelot_flash_map.map_priv_1 = (unsigned long)ioremap_nocache(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE); if (!ocelot_flash_map.map_priv_1) { printk(KERN_NOTICE "Failed to ioremap Ocelot flash space\n"); goto fail_2; } /* Now the cached version */ ocelot_flash_map.map_priv_2 = (unsigned long)__ioremap(FLASH_WINDOW_ADDR, FLASH_WINDOW_SIZE, 0); if (!ocelot_flash_map.map_priv_2) { /* Doesn't matter if it failed. Just use the uncached version */ ocelot_flash_map.map_priv_2 = ocelot_flash_map.map_priv_1; } /* Only probe for flash if the write jumper is present */ if (brd_status & 0x40) { flash_mtd = do_map_probe("jedec", &ocelot_flash_map); } else { printk(KERN_NOTICE "Ocelot flash write jumper not present. Treating as ROM\n"); } /* If that failed or the jumper's absent, pretend it's ROM */ if (!flash_mtd) { flash_mtd = do_map_probe("map_rom", &ocelot_flash_map); /* If we're treating it as ROM, set the erase size */ if (flash_mtd) flash_mtd->erasesize = 0x10000; } if (!flash_mtd) goto fail3; add_mtd_device(nvram_mtd); flash_mtd->module = THIS_MODULE; nr_parts = parse_redboot_partitions(flash_mtd, &parsed_parts); if (nr_parts) add_mtd_partitions(flash_mtd, parsed_parts, nr_parts); else add_mtd_device(flash_mtd); return 0; fail3: iounmap((void *)ocelot_flash_map.map_priv_1); if (ocelot_flash_map.map_priv_2 && ocelot_flash_map.map_priv_2 != ocelot_flash_map.map_priv_1) iounmap((void *)ocelot_flash_map.map_priv_2); fail_2: map_destroy(nvram_mtd); fail_1: iounmap((void *)ocelot_nvram_map.map_priv_1); return -ENXIO; }
static int __devinit pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pasemi_softc *sc; int ret, i; DPRINTF(KERN_ERR "%s()\n", __FUNCTION__); sc = kzalloc(sizeof(*sc), GFP_KERNEL); if (!sc) return -ENOMEM; softc_device_init(sc, DRV_NAME, 1, pasemi_methods); pci_set_drvdata(pdev, sc); spin_lock_init(&sc->sc_chnlock); sc->sc_sessions = (struct pasemi_session **) kzalloc(PASEMI_INITIAL_SESSIONS * sizeof(struct pasemi_session *), GFP_ATOMIC); if (sc->sc_sessions == NULL) { ret = -ENOMEM; goto out; } sc->sc_nsessions = PASEMI_INITIAL_SESSIONS; sc->sc_lastchn = 0; sc->base_irq = pdev->irq + 6; sc->base_chan = 6; sc->sc_cid = -1; sc->dma_pdev = pdev; sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL); if (!sc->iob_pdev) { dev_err(&pdev->dev, "Can't find I/O Bridge\n"); ret = -ENODEV; goto out; } /* This is hardcoded and ugly, but we have some firmware versions * who don't provide the register space in the device tree. Luckily * they are at well-known locations so we can just do the math here. */ sc->dma_regs = ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000); sc->iob_regs = ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000); if (!sc->dma_regs || !sc->iob_regs) { dev_err(&pdev->dev, "Can't map registers\n"); ret = -ENODEV; goto out; } dma_status = __ioremap(0xfd800000, 0x1000, 0); if (!dma_status) { ret = -ENODEV; dev_err(&pdev->dev, "Can't map dmastatus space\n"); goto out; } sc->tx = (struct pasemi_fnu_txring *) kzalloc(sizeof(struct pasemi_fnu_txring) * 8, GFP_KERNEL); if (!sc->tx) { ret = -ENOMEM; goto out; } /* Initialize the h/w */ out_le32(sc->dma_regs + PAS_DMA_COM_CFG, (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) | PAS_DMA_COM_CFG_FWF)); out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); for (i = 0; i < PASEMI_FNU_CHANNELS; i++) { sc->sc_num_channels++; ret = pasemi_dma_setup_tx_resources(sc, i); if (ret) goto out; } sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE); if (sc->sc_cid < 0) { printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n"); ret = -ENXIO; goto out; } /* register algorithms with the framework */ printk(DRV_NAME ":"); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); return 0; out: pasemi_dma_remove(pdev); return ret; }
static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) { struct fb_info *p; unsigned long addr, size; unsigned short cmd; int rc = -ENODEV; if (pci_enable_device(dp) < 0) { dev_err(&dp->dev, "Cannot enable PCI device\n"); goto err_out; } if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) goto err_disable; addr = pci_resource_start(dp, 0); size = pci_resource_len(dp, 0); if (addr == 0) goto err_disable; p = framebuffer_alloc(0, &dp->dev); if (p == NULL) { dev_err(&dp->dev, "Cannot allocate framebuffer structure\n"); rc = -ENOMEM; goto err_disable; } if (pci_request_region(dp, 0, "chipsfb") != 0) { dev_err(&dp->dev, "Cannot request framebuffer\n"); rc = -EBUSY; goto err_release_fb; } #ifdef __BIG_ENDIAN addr += 0x800000; // Use big-endian aperture #endif /* we should use pci_enable_device here, but, the device doesn't declare its I/O ports in its BARs so pci_enable_device won't turn on I/O responses */ pci_read_config_word(dp, PCI_COMMAND, &cmd); cmd |= 3; /* enable memory and IO space */ pci_write_config_word(dp, PCI_COMMAND, cmd); #ifdef CONFIG_PMAC_BACKLIGHT /* turn on the backlight */ mutex_lock(&pmac_backlight_mutex); if (pmac_backlight) { pmac_backlight->props.power = FB_BLANK_UNBLANK; backlight_update_status(pmac_backlight); } mutex_unlock(&pmac_backlight_mutex); #endif /* CONFIG_PMAC_BACKLIGHT */ #ifdef CONFIG_PPC p->screen_base = __ioremap(addr, 0x200000, _PAGE_NO_CACHE); #else p->screen_base = ioremap(addr, 0x200000); #endif if (p->screen_base == NULL) { dev_err(&dp->dev, "Cannot map framebuffer\n"); rc = -ENOMEM; goto err_release_pci; } pci_set_drvdata(dp, p); init_chips(p, addr); if (register_framebuffer(p) < 0) { dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n"); goto err_unmap; } dev_info(&dp->dev,"fb%d: Chips 65550 frame buffer" " (%dK RAM detected)\n", p->node, p->fix.smem_len / 1024); return 0; err_unmap: iounmap(p->screen_base); err_release_pci: pci_release_region(dp, 0); err_release_fb: framebuffer_release(p); err_disable: err_out: return rc; }
//------------------------------------------------------ // int reg_open(struct inode *inode, struct file *filp) // // Descripción: // Función para abrir e inicializar // //------------------------------------------------------ int reg_open(struct inode *inode, struct file *filp) { unsigned long val; MOD_INC_USE_COUNT; //para "decir" que lo estoy usando printk("<1>Opening adc...\n"); //comprobar disponibilidad de memoria /** if(check_mem_region(SYS_SW_LOCK, 4)) { printk("DUMB: espacio de memoria en uso: SYS_SW_LOCK\n"); return -EBUSY; } printk ("hola hola hola hola"); if(check_mem_region(ADC_CLK_DIV, 4)) { printk("DUMB: espacio de memoria en uso: ADC_CLK_DIV\n"); return -EBUSY; } printk ("hola2 hola2 hola2 hola2"); if(check_mem_region(DEVICE_CFG, 4)) { printk("DUMB: espacio de memoria en uso: DEVICE_CFG\n"); return -EBUSY; } if(check_mem_region(ADC_RESULT, 4)) { printk("DUMB: espacio de memoria en uso: ADC_RESULT\n"); return -EBUSY; } if(check_mem_region(ADC_SWITCH, 4)) { printk("DUMB: espacio de memoria en uso: ADC_SWITCH\n"); return -EBUSY; } if(check_mem_region(ADC_SW_LOCK, 4)) { printk("DUMB: espacio de memoria en uso: ADC_SW_LOCK\n"); return -EBUSY; } **/ if(check_mem_region(SYS_PAGE, 1024)) { printk("DUMB: espacio de memoria en uso: SYS_PAGE\n"); return -EBUSY; } if(check_mem_region(ADC_PAGE, 1024)) { printk("DUMB: espacio de memoria en uso: ADC_PAGE\n"); return -EBUSY; } //Tomar memoria /*request_mem_region(SYS_SW_LOCK, 4, "sys_sw_lock"); sys_sw_lock = __ioremap(SYS_SW_LOCK, 4, 0); request_mem_region(ADC_CLK_DIV, 4, "adc_clk_div"); adc_clk_div = __ioremap(ADC_CLK_DIV, 4, 0); request_mem_region(DEVICE_CFG, 4, "device_cfg"); device_cfg = __ioremap(DEVICE_CFG, 4, 0); request_mem_region(ADC_RESULT, 4, "adc_result"); adc_result = __ioremap(ADC_RESULT, 4, 0); request_mem_region(ADC_SWITCH, 4, "adc_switch"); adc_switch = __ioremap(ADC_SWITCH, 4, 0); request_mem_region(ADC_SW_LOCK, 4, "adc_sw_lock"); adc_sw_lock = __ioremap(ADC_SW_LOCK, 4, 0);*/ request_mem_region(SYS_PAGE, 1024, "sys_page"); sys_page = __ioremap(SYS_PAGE, 1024, 0); request_mem_region(ADC_PAGE, 1024, "adc_page"); adc_page = __ioremap(ADC_PAGE, 1024, 0); /*printk("dumb: sys_sw_lock remap = %p\n", sys_sw_lock); printk("dumb: adc_clk_div remap = %p\n", adc_clk_div); printk("dumb: device_cfg remap = %p\n", device_cfg); printk("dumb: adc_result remap = %p\n", adc_result); printk("dumb: adc_switch remap = %p\n", adc_switch); printk("dumb: adc_sw_lock remap = %p\n", adc_sw_lock); */ printk("dumb: sys_page remap = %p\n", sys_page); printk("dumb: adc_page remap = %p\n", adc_page); //Configura registros //val = *adc_clk_div; //*adc_sw_lock = 0xAA; val = *(sys_page + ADC_CLK_DIV); *(sys_page + SYS_SW_LOCK) = 0xAA; *(sys_page + ADC_CLK_DIV)= val | 0x80000000; //*sys_sw_lock = 0x000000AA; //*device_cfg = 0x08000D00;// <---da error (Pag.91 manual EP...) val = *(sys_page + DEVICE_CFG); *(sys_page + SYS_SW_LOCK) = 0xAA; *(sys_page + DEVICE_CFG)= val | 0x20000;//mascara para activar ADCEN //*sys_sw_lock = 0xAA; //*device_cfg = val | 0x20000; val = *(sys_page +DEVICE_CFG); *(sys_page + SYS_SW_LOCK) = 0xAA; *(sys_page +DEVICE_CFG)= val & ~0x04;//Mascara para poner a "0" ADCPD //*sys_sw_lock = 0x000000AA; //*device_cfg &= 0xFFFFFFFB; val = *(adc_page + ADC_SWITCH); *(adc_page + ADC_SW_LOCK) = 0xAA; *(adc_page + ADC_SWITCH)= 0x0608; //*adc_sw_lock = 0x000000AA; //*adc_switch = 0x00000608; /** //*((volatile unsigned int *)(SYS_SW_LOCK)) = 0xAA; //desbloqueo software *((volatile unsigned int *)(ADC_SW_LOCK)) = 0xAA; //*((volatile unsigned int *)(ADC_CLK_DIV)) = 0x80000000; //reloj dividido por 16 *((volatile unsigned int *)(ADC_CLK_DIV)) |= 0x80000000 ; *((volatile unsigned int *)(SYS_SW_LOCK)) = 0xAA; //*((volatile unsigned int *)(DEVICE_CFG)) = 0x00020000; //activar ADC *((volatile unsigned int *)(DEVICE_CFG)) | 0x00020000; //ADCEN a "1" *((volatile unsigned int *)(SYS_SW_LOCK)) = 0xAA; *((volatile unsigned int *)(DEVICE_CFG)) & 0xFFFFFFFB; //ADCPD a "0" *((volatile unsigned int *)(ADC_SW_LOCK)) = 0xAA; *((volatile unsigned int *)(ADC_SWITCH)) = 0x00000608; //ADC0 pin 27 //*((volatile unsigned int *)(ADC_INT_EN)) = 0x00000000; //desactivar interrupciones **/ printk("<1>adc init done\n"); return 0; }
void __iomem *ioremap(phys_addr_t addr, unsigned long size) { return __ioremap(addr, size, _PAGE_NO_CACHE); }
void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) { return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0); }
/* * Main initialization routine */ static int __init h1910_init (void) { struct nand_chip *this; const char *part_type = 0; int mtd_parts_nb = 0; struct mtd_partition *mtd_parts = 0; void __iomem *nandaddr; if (!machine_is_h1900()) return -ENODEV; nandaddr = (void __iomem *)__ioremap(0x08000000, 0x1000, 0, 1); if (!nandaddr) { printk("Failed to ioremap nand flash.\n"); return -ENOMEM; } /* Allocate memory for MTD device structure and private data */ h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!h1910_nand_mtd) { printk("Unable to allocate h1910 NAND MTD device structure.\n"); iounmap ((void *) nandaddr); return -ENOMEM; } /* Get pointer to private data */ this = (struct nand_chip *) (&h1910_nand_mtd[1]); /* Initialize structures */ memset((char *) h1910_nand_mtd, 0, sizeof(struct mtd_info)); memset((char *) this, 0, sizeof(struct nand_chip)); /* Link the private data with the MTD structure */ h1910_nand_mtd->priv = this; /* * Enable VPEN */ GPSR(37) = GPIO_bit(37); /* insert callbacks */ this->IO_ADDR_R = nandaddr; this->IO_ADDR_W = nandaddr; this->hwcontrol = h1910_hwcontrol; this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */ /* 15 us command delay time */ this->chip_delay = 50; this->eccmode = NAND_ECC_SOFT; this->options = NAND_NO_AUTOINCR; /* Scan to find existence of the device */ if (nand_scan (h1910_nand_mtd, 1)) { printk(KERN_NOTICE "No NAND device - returning -ENXIO\n"); kfree (h1910_nand_mtd); iounmap ((void *) nandaddr); return -ENXIO; } #ifdef CONFIG_MTD_CMDLINE_PARTS mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand"); if (mtd_parts_nb > 0) part_type = "command line"; else mtd_parts_nb = 0; #endif if (mtd_parts_nb == 0) { mtd_parts = partition_info; mtd_parts_nb = NUM_PARTITIONS; part_type = "static"; } /* Register the partitions */ printk(KERN_NOTICE "Using %s partition definition\n", part_type); add_mtd_partitions(h1910_nand_mtd, mtd_parts, mtd_parts_nb); /* Return happy */ return 0; }
void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) { return __ioremap(addr, size, flags); }