static int bcm_fb_setup_fbd(struct bcmsc_softc *sc) { struct bcm2835_fb_config fb; device_t fbd; int err; err = bcm_fb_init(sc, &fb); if (err) return (err); memset(&sc->info, 0, sizeof(sc->info)); sc->info.fb_name = device_get_nameunit(sc->dev); sc->info.fb_vbase = (intptr_t)pmap_mapdev(fb.base, fb.size); sc->info.fb_pbase = fb.base; sc->info.fb_size = fb.size; sc->info.fb_bpp = sc->info.fb_depth = fb.bpp; sc->info.fb_stride = fb.pitch; sc->info.fb_width = fb.xres; sc->info.fb_height = fb.yres; sc->info.fb_flags = FB_FLAG_MEMATTR; sc->info.fb_memattr = VM_MEMATTR_WRITE_COMBINING; if (sc->fbswap) { switch (sc->info.fb_bpp) { case 24: vt_generate_cons_palette(sc->info.fb_cmap, COLOR_FORMAT_RGB, 0xff, 0, 0xff, 8, 0xff, 16); sc->info.fb_cmsize = 16; break; case 32: vt_generate_cons_palette(sc->info.fb_cmap, COLOR_FORMAT_RGB, 0xff, 16, 0xff, 8, 0xff, 0); sc->info.fb_cmsize = 16; break; } } fbd = device_add_child(sc->dev, "fbd", device_get_unit(sc->dev)); if (fbd == NULL) { device_printf(sc->dev, "Failed to add fbd child\n"); pmap_unmapdev(sc->info.fb_vbase, sc->info.fb_size); return (ENXIO); } else if (device_probe_and_attach(fbd) != 0) { device_printf(sc->dev, "Failed to attach fbd device\n"); device_delete_child(sc->dev, fbd); pmap_unmapdev(sc->info.fb_vbase, sc->info.fb_size); return (ENXIO); } device_printf(sc->dev, "%dx%d(%dx%d@%d,%d) %dbpp\n", fb.xres, fb.yres, fb.vxres, fb.vyres, fb.xoffset, fb.yoffset, fb.bpp); device_printf(sc->dev, "fbswap: %d, pitch %d, base 0x%08x, screen_size %d\n", sc->fbswap, fb.pitch, fb.base, fb.size); return (0); }
void bus_space_unmap(bus_space_tag_t bst __unused, bus_space_handle_t bsh, bus_size_t size) { pmap_unmapdev(bsh, size); }
static void xbox_init(void) { char* ptr; if (!arch_i386_is_xbox) return; /* register our poweroff function */ EVENTHANDLER_REGISTER (shutdown_final, xbox_poweroff, NULL, SHUTDOWN_PRI_LAST); /* * Some XBOX loaders, such as Cromwell, have a flaw which cause the * nve(4) driver to fail attaching to the NIC. * * This is because they leave the NIC running; this will cause the * Nvidia driver to fail as the NIC does not return any sensible * values and thus fails attaching (using an error 0x5, this means * it cannot find a valid PHY) * * We bluntly tell the NIC to stop whatever it's doing; this makes * nve(4) attach correctly. As the NIC always resides at * 0xfef00000-0xfef003ff on an XBOX, we simply hardcode this address. */ ptr = pmap_mapdev (0xfef00000, 0x400); *(uint32_t*)(ptr + 0x188) = 0; /* clear adapter control field */ pmap_unmapdev ((vm_offset_t)ptr, 0x400); }
void vga_pci_unmap_bios(device_t dev, void *bios) { struct vga_resource *vr; if (bios == NULL) { return; } #if defined(__amd64__) || defined(__i386__) || defined(__ia64__) if (vga_pci_is_boot_display(dev)) { /* We mapped the BIOS shadow copy located at 0xC0000. */ pmap_unmapdev((vm_offset_t)bios, VGA_PCI_BIOS_SHADOW_SIZE); return; } #endif /* * Look up the PCIR_BIOS resource in our softc. It should match * the address we returned previously. */ vr = lookup_res(device_get_softc(dev), PCIR_BIOS); KASSERT(vr->vr_res != NULL, ("vga_pci_unmap_bios: bios not mapped")); KASSERT(rman_get_virtual(vr->vr_res) == bios, ("vga_pci_unmap_bios: mismatch")); vga_pci_release_resource(dev, NULL, SYS_RES_MEMORY, PCIR_BIOS, vr->vr_res); }
static int ocotp_attach(device_t dev) { struct ocotp_softc *sc; int err, rid; sc = device_get_softc(dev); sc->dev = dev; /* Allocate bus_space resources. */ rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "Cannot allocate memory resources\n"); err = ENXIO; goto out; } ocotp_sc = sc; /* We're done with the temporary mapping now. */ if (ocotp_regs != NULL) pmap_unmapdev((vm_offset_t)ocotp_regs, ocotp_size); err = 0; out: if (err != 0) ocotp_detach(dev); return (err); }
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) { if (!map->handle || !map->size) return; pmap_unmapdev((vm_offset_t) map->handle, map->size); }
ACPI_STATUS AcpiOsReadMemory(ACPI_PHYSICAL_ADDRESS Address, UINT32 *Value, UINT32 Width) { void *LogicalAddress; LogicalAddress = pmap_mapdev(Address, Width / 8); if (LogicalAddress == NULL) return (AE_NOT_EXIST); switch (Width) { case 8: *Value = *(volatile uint8_t *)LogicalAddress; break; case 16: *Value = *(volatile uint16_t *)LogicalAddress; break; case 32: *Value = *(volatile uint32_t *)LogicalAddress; break; } pmap_unmapdev((vm_offset_t)LogicalAddress, Width / 8); return (AE_OK); }
void bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t size) { if (tag == X86_BUS_SPACE_MEM) pmap_unmapdev(bsh, size); }
static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_size_t psize; bus_space_handle_t vaddr; if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { psize = (bus_size_t)rman_get_size(r); vaddr = rman_get_bushandle(r); if (vaddr != 0) { #ifdef FDT bus_space_unmap(fdtbus_bs_tag, vaddr, psize); #else pmap_unmapdev((vm_offset_t)vaddr, (vm_size_t)psize); #endif rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } } else if (type == SYS_RES_IRQ) { #ifdef INTRNG intr_deactivate_irq(child, r); #endif } return (rman_deactivate_resource(r)); }
static void sdt_probe(void) { const ACPI_TABLE_RSDP *rsdp; vm_size_t mapsz; uint8_t *ptr; if (ebda_addr != 0) { mapsz = ACPI_EBDA_WINDOW_SIZE; ptr = pmap_mapdev(ebda_addr, mapsz); rsdp = sdt_rsdp_search(ptr, mapsz); if (rsdp == NULL) { SDT_VPRINTF("RSDP not in EBDA\n"); pmap_unmapdev((vm_offset_t)ptr, mapsz); ptr = NULL; mapsz = 0; } else { SDT_VPRINTF("RSDP in EBDA\n"); goto found_rsdp; } } mapsz = ACPI_HI_RSDP_WINDOW_SIZE; ptr = pmap_mapdev(ACPI_HI_RSDP_WINDOW_BASE, mapsz); rsdp = sdt_rsdp_search(ptr, mapsz); if (rsdp == NULL) { kprintf("sdt_probe: no RSDP\n"); pmap_unmapdev((vm_offset_t)ptr, mapsz); return; } else { SDT_VPRINTF("RSDP in BIOS mem\n"); } found_rsdp: if (rsdp->Revision != 2 /* || AcpiGbl_DoNotUseXsdt */) { sdt_search_func = sdt_search_rsdt; sdt_search_paddr = rsdp->RsdtPhysicalAddress; } else { sdt_search_func = sdt_search_xsdt; sdt_search_paddr = rsdp->XsdtPhysicalAddress; } pmap_unmapdev((vm_offset_t)ptr, mapsz); }
static void sdt_probe(void) { const struct acpi_rsdp *rsdp; vm_size_t mapsz; uint8_t *ptr; if (ebda_addr != 0) { mapsz = ACPI_RSDP_EBDA_MAPSZ; ptr = pmap_mapdev(ebda_addr, mapsz); rsdp = sdt_rsdp_search(ptr, mapsz); if (rsdp == NULL) { SDT_VPRINTF("RSDP not in EBDA\n"); pmap_unmapdev((vm_offset_t)ptr, mapsz); ptr = NULL; mapsz = 0; } else { SDT_VPRINTF("RSDP in EBDA\n"); goto found_rsdp; } } mapsz = ACPI_RSDP_BIOS_MAPSZ; ptr = pmap_mapdev(ACPI_RSDP_BIOS_MAPADDR, mapsz); rsdp = sdt_rsdp_search(ptr, mapsz); if (rsdp == NULL) { kprintf("sdt_probe: no RSDP\n"); pmap_unmapdev((vm_offset_t)ptr, mapsz); return; } else { SDT_VPRINTF("RSDP in BIOS mem\n"); } found_rsdp: if (rsdp->rsdp_rev != 2) { sdt_search_func = sdt_search_rsdt; sdt_search_paddr = rsdp->rsdp_rsdt; } else { sdt_search_func = sdt_search_xsdt; sdt_search_paddr = rsdp->rsdp_xsdt; } pmap_unmapdev((vm_offset_t)ptr, mapsz); }
static void at91_bs_unmap(bus_space_tag_t tag, bus_space_handle_t h, bus_size_t size) { vm_offset_t va; va = (vm_offset_t)h; if (va >= AT91_BASE && va <= AT91_BASE + 0xff00000) return; pmap_unmapdev(va, size); }
static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { vm_offset_t va; if (type == SYS_RES_MEMORY) { va = (vm_offset_t)rman_get_virtual(r); pmap_unmapdev(va, rman_get_size(r)); } return (rman_deactivate_resource(r)); }
static void lbc_banks_unmap(struct lbc_softc *sc) { int i; for (i = 0; i < LBC_DEV_MAX; i++) { if (sc->sc_banks[i].size == 0) continue; law_disable(OCP85XX_TGTIF_LBC, sc->sc_banks[i].pa, sc->sc_banks[i].size); pmap_unmapdev(sc->sc_banks[i].va, sc->sc_banks[i].size); } }
static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, unmap it. */ if (type == SYS_RES_MEMORY) { pmap_unmapdev((vm_offset_t)rman_get_virtual(r), rman_get_size(r)); } return (rman_deactivate_resource(r)); }
void * sdt_sdth_map(vm_paddr_t paddr) { struct acpi_sdth *sdth; vm_size_t mapsz; sdth = pmap_mapdev(paddr, sizeof(*sdth)); mapsz = sdth->sdth_len; pmap_unmapdev((vm_offset_t)sdth, sizeof(*sdth)); if (mapsz < sizeof(*sdth)) return NULL; return pmap_mapdev(paddr, mapsz); }
void * sdt_sdth_map(vm_paddr_t paddr) { ACPI_TABLE_HEADER *sdth; vm_size_t mapsz; sdth = pmap_mapdev(paddr, sizeof(*sdth)); mapsz = sdth->Length; pmap_unmapdev((vm_offset_t)sdth, sizeof(*sdth)); if (mapsz < sizeof(*sdth)) return NULL; return pmap_mapdev(paddr, mapsz); }
static int coremctl_detach(device_t dev) { struct coremctl_softc *sc = device_get_softc(dev); if (sc->sc_ecc != NULL) device_delete_child(dev, sc->sc_ecc); if (sc->sc_temp != NULL) device_delete_child(dev, sc->sc_temp); bus_generic_detach(dev); if (sc->sc_mch != NULL) pmap_unmapdev((vm_offset_t)sc->sc_mch, MCH_CORE_SIZE); return 0; }
static int zbpci_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { vm_offset_t va; if (type != SYS_RES_IOPORT) { return (bus_generic_deactivate_resource(bus, child, type, rid, r)); } va = (vm_offset_t)rman_get_virtual(r); pmap_unmapdev(va, rman_get_size(r)); return (rman_deactivate_resource(r)); }
static void lbc_banks_unmap(struct lbc_softc *sc) { int r; r = 0; while (r < LBC_DEV_MAX) { if (sc->sc_range[r].size == 0) return; pmap_unmapdev(sc->sc_range[r].kva, sc->sc_range[r].size); law_disable(OCP85XX_TGTIF_LBC, sc->sc_range[r].addr, sc->sc_range[r].size); r++; } }
static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, unmap it. */ if ((rman_get_bustag(r) == I386_BUS_SPACE_MEM) && (r->r_end >= 1024 * 1024)) { u_int32_t psize; psize = r->r_end - r->r_start; pmap_unmapdev((vm_offset_t)rman_get_virtual(r), psize); } return (rman_deactivate_resource(r)); }
static int ofw_pci_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { u_int32_t psize; psize = rman_get_size(res); pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); } return (rman_deactivate_resource(res)); }
static int nexus_deactivate_resource(device_t bus __unused, device_t child __unused, int type __unused, int rid __unused, struct resource *r) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { bus_size_t psize; psize = rman_get_size(r); pmap_unmapdev((vm_offset_t)rman_get_virtual(r), psize); } return (rman_deactivate_resource(r)); }
void drm_ioremapfree(drm_local_map_t *map) { #if defined(__FreeBSD__) pmap_unmapdev((vm_offset_t) map->handle, map->size); #elif defined(__NetBSD__) if (map->fullmap == NULL) { DRM_INFO("drm_ioremapfree called for unknown map\n"); return; } if (map->fullmap->mapped > 0) { map->fullmap->mapped--; if(map->fullmap->mapped == 0) bus_space_unmap(map->bst, map->fullmap->bsh, map->fullmap->size); } #endif }
static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_size_t psize; bus_space_handle_t vaddr; psize = (bus_size_t)rman_get_size(r); vaddr = rman_get_bushandle(r); if (vaddr != 0) { #ifdef FDT bus_space_unmap(fdtbus_bs_tag, vaddr, psize); #else pmap_unmapdev((vm_offset_t)vaddr, (vm_size_t)psize); #endif rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } return (rman_deactivate_resource(r)); }
static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, unmap it. */ if (type == SYS_RES_MEMORY) { pmap_unmapdev((vm_offset_t)rman_get_virtual(r), rman_get_size(r)); } #ifdef PC98 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { bus_space_handle_t bh; bh = rman_get_bushandle(r); i386_bus_space_handle_free(rman_get_bustag(r), bh, bh->bsh_sz); } #endif return (rman_deactivate_resource(r)); }
/* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; void *p; ssize_t orig_resid; u_long v, vd; u_int c; int error; error = 0; orig_resid = uio->uio_resid; while (uio->uio_resid > 0 && error == 0) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } v = uio->uio_offset; c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK)); switch (dev2unit(dev)) { case CDEV_MINOR_KMEM: /* * Since c is clamped to be less or equal than * PAGE_SIZE, the uiomove() call does not * access past the end of the direct map. */ if (v >= DMAP_MIN_ADDRESS && v < DMAP_MIN_ADDRESS + dmaplimit) { error = uiomove((void *)v, c, uio); break; } if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE)) { error = EFAULT; break; } /* * If the extracted address is not accessible * through the direct map, then we make a * private (uncached) mapping because we can't * depend on the existing kernel mapping * remaining valid until the completion of * uiomove(). * * XXX We cannot provide access to the * physical page 0 mapped into KVA. */ v = pmap_extract(kernel_pmap, v); if (v == 0) { error = EFAULT; break; } /* FALLTHROUGH */ case CDEV_MINOR_MEM: if (v < dmaplimit) { vd = PHYS_TO_DMAP(v); error = uiomove((void *)vd, c, uio); break; } if (v > cpu_getmaxphyaddr()) { error = EFAULT; break; } p = pmap_mapdev(v, PAGE_SIZE); error = uiomove(p, c, uio); pmap_unmapdev((vm_offset_t)p, PAGE_SIZE); break; } } /* * Don't return error if any byte was written. Read and write * can return error only if no i/o was performed. */ if (uio->uio_resid != orig_resid) error = 0; return (error); }
void sdt_sdth_unmap(struct acpi_sdth *sdth) { pmap_unmapdev((vm_offset_t)sdth, sdth->sdth_len); }
void generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size) { pmap_unmapdev((vm_offset_t)h, size); }
static int ecc_e31200_attach(device_t dev) { struct ecc_e31200_softc *sc = device_get_softc(dev); uint32_t capa, dmfc, mch_barlo, mch_barhi; uint64_t mch_bar; int bus, slot; dev = sc->ecc_device; /* XXX */ bus = pci_get_bus(dev); slot = pci_get_slot(dev); capa = pcib_read_config(dev, bus, slot, 0, PCI_E31200_CAPID0_A, 4); dmfc = __SHIFTOUT(capa, PCI_E31200_CAPID0_A_DMFC); if (dmfc == PCI_E31200_CAPID0_A_DMFC_1333) { ecc_printf(sc, "CAP DDR3 1333 "); } else if (dmfc == PCI_E31200_CAPID0_A_DMFC_1067) { ecc_printf(sc, "CAP DDR3 1067 "); } else if (dmfc == PCI_E31200_CAPID0_A_DMFC_ALL) { ecc_printf(sc, "no CAP "); } else { ecc_printf(sc, "unknown DMFC %#x\n", dmfc); return 0; } if (capa & PCI_E31200_CAPID0_A_ECCDIS) { kprintf("NON-ECC\n"); return 0; } else { kprintf("ECC\n"); } mch_barlo = pcib_read_config(dev, bus, slot, 0, PCI_E31200_MCHBAR_LO, 4); mch_barhi = pcib_read_config(dev, bus, slot, 0, PCI_E31200_MCHBAR_HI, 4); mch_bar = (uint64_t)mch_barlo | (((uint64_t)mch_barhi) << 32); if (bootverbose) ecc_printf(sc, "MCHBAR %jx\n", (uintmax_t)mch_bar); if (mch_bar & PCI_E31200_MCHBAR_LO_EN) { uint64_t map_addr = mch_bar & PCI_E31200_MCHBAR_ADDRMASK; uint32_t dimm_ch0, dimm_ch1; sc->ecc_addr = pmap_mapdev_uncacheable(map_addr, MCH_E31200_SIZE); if (bootverbose) { ecc_printf(sc, "LOG0_C0 %#x\n", CSR_READ_4(sc, MCH_E31200_ERRLOG0_C0)); ecc_printf(sc, "LOG0_C1 %#x\n", CSR_READ_4(sc, MCH_E31200_ERRLOG0_C1)); } dimm_ch0 = CSR_READ_4(sc, MCH_E31200_DIMM_CH0); dimm_ch1 = CSR_READ_4(sc, MCH_E31200_DIMM_CH1); if (bootverbose) { ecc_e31200_chaninfo(sc, dimm_ch0, "channel0"); ecc_e31200_chaninfo(sc, dimm_ch1, "channel1"); } if (((dimm_ch0 | dimm_ch1) & MCH_E31200_DIMM_ECC) == 0) { ecc_printf(sc, "No ECC active\n"); pmap_unmapdev((vm_offset_t)sc->ecc_addr, MCH_E31200_SIZE); return 0; } } ecc_e31200_status(sc); callout_init_mp(&sc->ecc_callout); callout_reset(&sc->ecc_callout, hz, ecc_e31200_callout, sc); return 0; }