static int __init hisi_log_init(void) { int rc = 0; #ifdef CONFIG_HISI_RDR u32 rdr_int = 0; RDR_ASSERT(rdr_afreg(rdr_int, RDR_LOG_BUF_INFO, RDR_STR, LOG_INFO_BUF_LEN)); log_buf_info = (volatile log_buffer_head *)field_addr(u32, rdr_int); RDR_ASSERT(rdr_afreg(rdr_int, RDR_LOG_KERNEL_BUF, RDR_STR, KERNEL_LOG_BUF_LEN)); res_log_buf = (volatile unsigned char *)field_addr(u32, rdr_int); RDR_ASSERT(rdr_afreg(rdr_int, RDR_LOG_EXCEPTION_BUF, RDR_STR, EXCEPTION_LOG_BUF_LEN)); exception_log_buf = (volatile unsigned char *) field_addr(u32, rdr_int); #else log_buf_info = (volatile log_buffer_head *)ioremap_wc((volatile uint32_t *)HISI_LOG_INFO_BASE, LOG_INFO_BUF_LEN); res_log_buf = (volatile unsigned char *)ioremap_wc((volatile uint32_t *)HISI_KERNEL_LOG_BASE, KERNEL_LOG_BUF_LEN); exception_log_buf = (volatile unsigned char *)ioremap_wc((volatile uint32_t *)HISI_EXCEPTION_LOG_BASE, EXCEPTION_LOG_BUF_LEN); printk("log_buf_info is 0x%p\n", log_buf_info); printk("res_log_buf is 0x%p\n", res_log_buf); #endif pr_info("hisi_log_init ok\n"); hilog_loaded = 1; return rc; }
static int __init bsp_shared_mem_init(void) { #ifndef CONFIG_SRAM_SECURE g_mem_ctrl.sram_phy_addr = (void*)HI_SRAM_MEM_ADDR; g_mem_ctrl.sram_mem_size = HI_SRAM_SIZE; g_mem_ctrl.sram_virt_addr = (void*)ioremap_wc((unsigned long)g_mem_ctrl.sram_phy_addr, g_mem_ctrl.sram_mem_size); #endif g_mem_ctrl.sddr_phy_addr = (void*)HI_SHARED_DDR_BASE_ADDR; g_mem_ctrl.sddr_mem_size = HI_SHARED_DDR_SIZE; g_mem_ctrl.sddr_virt_addr = (void*)ioremap_wc((unsigned long)g_mem_ctrl.sddr_phy_addr, g_mem_ctrl.sddr_mem_size); s_mem_pr_err("ok!\n"); return 0; }
static int s3cfb_map_default_video_memory(struct fb_info *fb) { #if defined(CONFIG_FB_S3C_VIRTUAL) struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; struct s3c_platform_fb *pdata = to_fb_plat(fbdev->dev); if (win->owner == DMA_MEM_OTHER) return 0; fix->smem_start = pdata->pmem_start[win->id]; fb->screen_base = ioremap_wc(fix->smem_start, pdata->pmem_size[win->id]); if (!fb->screen_base) return -ENOMEM; else dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; #else s3cfb_map_video_memory(fb); #endif return 0; }
static bool hifi_check_img_loaded(void) { #ifdef PLATFORM_HI3XXX bool dsp_loaded = false; unsigned int* img_loaded = (unsigned int*)ioremap_wc(DRV_DSP_LOADED_INDICATE, 0x4); if (NULL == img_loaded) { loge("remap hifi img loaded fail.\n"); g_om_data.dsp_loaded_sign = 0xffffffff; return false; } g_om_data.dsp_loaded_sign = *img_loaded; if (0xA5A55A5A == g_om_data.dsp_loaded_sign) { loge("hifi img is not be loaded.\n"); } else if (g_om_data.dsp_loaded_sign > 0) { loge("hifi img is loaded fail: 0x%x.\n", g_om_data.dsp_loaded_sign); } else { logi("hifi img be loaded.\n"); dsp_loaded = true; } iounmap(img_loaded); return dsp_loaded; #endif #ifdef PLATFORM_HI6XXX return true; #endif }
/** * pci_iomap_wc_range - create a virtual WC mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number * @offset: map memory at the given offset in BAR * @maxlen: max length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide * the details if this is a MMIO or PIO address space and will just do what * you expect from them in the correct way. When possible write combining * is used. * * @maxlen specifies the maximum length to map. If you want to get access to * the complete BAR from offset to the end, pass %0 here. * */ void __iomem *pci_iomap_wc_range(struct pci_dev *dev, int bar, unsigned long offset, unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); if (flags & IORESOURCE_IO) return NULL; if (len <= offset || !start) return NULL; len -= offset; start += offset; if (maxlen && len > maxlen) len = maxlen; if (flags & IORESOURCE_MEM) return ioremap_wc(start, len); /* What? */ return NULL; }
/* ------------------------------------------------------------------------- */ static int omap3epfb_alloc_shmem(struct fb_info *info, struct omap3epfb_sharedbuf *buf) { int size = PAGE_ALIGN(OMAP3EPFB_SHARED_MEM_SIZE); int stat; BUG_ON(sizeof(dma_addr_t) != sizeof(uint32_t)); stat = omap_vram_alloc(OMAPFB_MEMTYPE_SDRAM, OMAP3EPFB_SHARED_MEM_SIZE, &buf->phys); if(stat) { dev_err(info->device, "Failed to allocate shared memory (phys): %d\n", stat); return -ENOMEM; } buf->p =(omap3epqe_rdwr_un *)ioremap_wc(buf->phys, size); if (!buf->p) { dev_err(info->device, "Failed to allocate shared memory (virt)\n"); omap_vram_free(buf->phys, size); return -ENOMEM; } buf->size = size; dev_dbg(info->device, "allocated %Zd bytes @%08lx / @%08lx (phys)\n", size, (unsigned long)buf->p, (unsigned long)buf->phys); memset(buf->p, 0, OMAP3EPFB_SHARED_MEM_SIZE); return 0; }
static int omap3epfb_alloc_buffer(struct fb_info *info, struct omap3epfb_buffer *buf, size_t size, unsigned int offset_lines) { int stat; void __iomem *virt; struct omap3epfb_par *par = info->par; long lines = par->mode.pxres * offset_lines / 4; unsigned long off = PAGE_ALIGN(lines); stat = omap_vram_alloc(OMAPFB_MEMTYPE_SDRAM, size + off, &buf->phys_aligned); if(stat) { dev_err(info->device, "Failed to allocate memory (phys): %d\n", stat); return -ENOMEM; } buf->phys = buf->phys_aligned + off; buf->phys_lines = buf->phys - lines; virt = ioremap_wc(buf->phys, size); if (!virt) { dev_err(info->device, "Failed to allocate memory (virt)\n"); omap_vram_free(buf->phys_aligned, size); return -ENOMEM; } buf->virt = virt; buf->size = size; dev_dbg(info->device, "allocated %Zd bytes @%08lx / @%08lx (phys)\n", size, (unsigned long)buf->virt, (unsigned long)buf->phys); return 0; }
static int s3cfb_map_default_video_memory(struct fb_info *fb) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; int reserved_size = 0; if (win->owner == DMA_MEM_OTHER) return 0; fix->smem_start = s3c_get_media_memory_bank(S3C_MDEV_FIMD, 1); reserved_size = s3c_get_media_memsize_bank(S3C_MDEV_FIMD, 1); fb->screen_base = ioremap_wc(fix->smem_start, reserved_size); if (!fb->screen_base) return -ENOMEM; else dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; return 0; }
static void extmem_init(void) { if (extmem_mspace == NULL) { size_t extmem_vmalloc_limit = (VMALLOC_TOTAL/3)& ~(0x02000000-1); if (extmem_mspace_size > 0x02000000) { extmem_mspace_size -= 0x02000000; } else { extmem_get_lca_reserved_mem(); if (extmem_mspace_size > 0x02000000) { extmem_mspace_size -= 0x02000000; } } if (extmem_mspace_size > extmem_vmalloc_limit) { printk(KERN_WARNING "[EXT_MEM] extmem_mspace_size: 0x%zx over limit: 0x%zx\n", extmem_mspace_size, extmem_vmalloc_limit); extmem_mspace_size = extmem_vmalloc_limit; } if (extmem_mspace_size <= 0) { printk(KERN_ERR "[EXT_MEM] no extmem, need check config\n"); BUG(); } #ifdef CONFIG_ARM64 extmem_mspace_base = (void*) ioremap_wc(extmem_phys_base, extmem_mspace_size); #else extmem_mspace_base = (void*) ioremap_cached(extmem_phys_base, extmem_mspace_size); #endif extmem_mspace = create_mspace_with_base(extmem_mspace_base, extmem_mspace_size, 1); extmem_printk("[EXT_MEM] extmem_phys_base: %p, extmem_mspace_size: 0x%zx, extmem_mspace: %p\n", (void *)extmem_phys_base, extmem_mspace_size, extmem_mspace); } }
int bcm21553_l2_evt_buf_init(void) { void *temp; /* Allocate one word of memory with the following property: * TEX,C,B = 100,0,0 * This memory is used for sw work-around for L2 quiesce. */ temp = dma_alloc_coherent(NULL, SZ_1K, (dma_addr_t *)&l2_evt_phys_buf, GFP_ATOMIC); if (!temp) { pr_err("%s: dma_alloc_coherent for L2 evict buf failed\n", __func__); return -ENOMEM; } l2_evt_virt_buf = (u32)ioremap_wc(l2_evt_phys_buf, 4); if (!l2_evt_virt_buf) { pr_err("%s: ioremap_wc for L2 evict buf failed\n", __func__); return -ENOMEM; } return 0; }
void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) { if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) map->handle = agp_remap(map->offset, map->size, dev); else map->handle = ioremap_wc(map->offset, map->size); }
void * td_mapper_map_wc(struct td_mapper *m, uint64_t off, uint64_t size) { #if defined(KABI__ioremap_wc) return ioremap_wc(m->phys_base + off, size); #else pr_err("kernel lacks support for ioremap_wc()\n"); return NULL; #endif }
int balong_ioremap_init(void) { g_ap_sysctrl_regbase_addr_virt = ioremap_nocache(HI_AP_SYSCTRL_BASE_ADDR, HI_AP_SYSCTRL_REG_SIZE); g_ap_syscnt_regbase_addr_virt = ioremap_nocache(HI_AP_SYS_CNT_BASE_ADDR, HI_AP_SYS_CNT_SIZE); g_ddr_share_mem_addr_virt = ioremap_wc(DDR_SHARED_MEM_ADDR, DDR_SHARED_MEM_SIZE); g_socp_mem_addr_virt = ioremap_wc(DDR_SOCP_ADDR, DDR_SOCP_SIZE); g_ddr_gu_addr_virt = ioremap_wc(DDR_GU_ADDR, DDR_GU_SIZE); g_ddr_mntn_addr_virt = ioremap_wc(DDR_MNTN_ADDR, DDR_MNTN_SIZE); g_ipf_regbase_addr_virt = ioremap_nocache(HI_IPF_REGBASE_ADDR, HI_IPF_REG_SIZE); g_socp_regbase_addr_virt = ioremap_nocache(HI_SOCP_REGBASE_ADDR, HI_SOCP_REG_SIZE); g_ipc_regbase_addr_virt = ioremap_nocache(HI_IPCM_REGBASE_ADDR, HI_IPCM_REG_SIZE); #if (HI_BBP_SYSTIME_BASE_ADDR != HI_AP_SYSCTRL_BASE_ADDR) g_bbp_systimer_regbase_addr_virt = g_ap_syscnt_regbase_addr_virt; #else g_bbp_systimer_regbase_addr_virt = g_ap_sysctrl_regbase_addr_virt; #endif g_lpm3_tcm_virt = ioremap_wc(HI_M3TCM0_MEM_ADDR, HI_M3TCM0_MEM_SIZE + HI_M3TCM1_MEM_SIZE); g_sram_virt = ioremap_wc(HI_SRAM_MEM_BASE_ADDR, HI_SRAM_MEM_SIZE); g_modem_sysctrl_base_addr_virt = ioremap_nocache(HI_SYSCTRL_BASE_ADDR, HI_SYSCTRL_REG_SIZE); printk("################ balong_ioremap_init ok! #####################\n"); printk("ap sysctrl regbase: v: %p p: 0x%x ok!\n",g_ap_sysctrl_regbase_addr_virt, HI_AP_SYSCTRL_BASE_ADDR); printk("share memory: v: %p p: 0x%x ok!\n",g_ddr_share_mem_addr_virt, DDR_SHARED_MEM_ADDR); printk("gu memory: v: %p p: 0x%x ok!\n",g_ddr_gu_addr_virt, DDR_GU_ADDR); printk("mntn memory: v: %p p: 0x%x ok!\n",g_ddr_mntn_addr_virt, DDR_MNTN_ADDR); printk("ipf regbase: v: %p p: 0x%x ok!\n",g_ipf_regbase_addr_virt, HI_IPF_REGBASE_ADDR); printk("socp regbase: v: %p p: 0x%x ok!\n",g_socp_regbase_addr_virt, HI_SOCP_REGBASE_ADDR); printk("ipc regbase: v: %p p: 0x%x ok!\n",g_ipc_regbase_addr_virt, HI_IPCM_REGBASE_ADDR); printk("syscnt_regbase: v: %p p: 0x%x ok!\n",g_ap_syscnt_regbase_addr_virt, HI_AP_SYS_CNT_BASE_ADDR); printk("lpm3 tcm: v: %p p: 0x%x ok!\n",g_lpm3_tcm_virt, HI_M3TCM0_MEM_ADDR); printk("sram: v: %p p: 0x%x ok!\n",g_sram_virt, HI_SRAM_MEM_BASE_ADDR); printk("modem sysctrl : v: %p p: 0x%x ok!\n",g_modem_sysctrl_base_addr_virt, HI_SYSCTRL_BASE_ADDR); return 0; }
/** * memremap() - remap an iomem_resource as cacheable memory * @offset: iomem resource start address * @size: size of remap * @flags: any of MEMREMAP_WB, MEMREMAP_WT and MEMREMAP_WC * * memremap() is "ioremap" for cases where it is known that the resource * being mapped does not have i/o side effects and the __iomem * annotation is not applicable. In the case of multiple flags, the different * mapping types will be attempted in the order listed below until one of * them succeeds. * * MEMREMAP_WB - matches the default mapping for System RAM on * the architecture. This is usually a read-allocate write-back cache. * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM * memremap() will bypass establishing a new mapping and instead return * a pointer into the direct map. * * MEMREMAP_WT - establish a mapping whereby writes either bypass the * cache or are written through to memory and never exist in a * cache-dirty state with respect to program visibility. Attempts to * map System RAM with this mapping type will fail. * * MEMREMAP_WC - establish a writecombine mapping, whereby writes may * be coalesced together (e.g. in the CPU's write buffers), but is otherwise * uncached. Attempts to map System RAM with this mapping type will fail. */ void *memremap(resource_size_t offset, size_t size, unsigned long flags) { int is_ram = region_intersects(offset, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE); void *addr = NULL; if (!flags) return NULL; if (is_ram == REGION_MIXED) { WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", &offset, (unsigned long) size); return NULL; } /* Try all mapping types requested until one returns non-NULL */ if (flags & MEMREMAP_WB) { /* * MEMREMAP_WB is special in that it can be satisifed * from the direct map. Some archs depend on the * capability of memremap() to autodetect cases where * the requested range is potentially in System RAM. */ if (is_ram == REGION_INTERSECTS) addr = try_ram_remap(offset, size); if (!addr) addr = arch_memremap_wb(offset, size); } /* * If we don't have a mapping yet and other request flags are * present then we will be attempting to establish a new virtual * address mapping. Enforce that this mapping is not aliasing * System RAM. */ if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) { WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", &offset, (unsigned long) size); return NULL; } if (!addr && (flags & MEMREMAP_WT)) addr = ioremap_wt(offset, size); if (!addr && (flags & MEMREMAP_WC)) addr = ioremap_wc(offset, size); return addr; }
/***************************************************************************** 函 数 名 : drv_hifireset_cbfun 功能描述 : 底软在hifi单独复位时用于处理mailbox相关数据。 输入参数 : DRV_RESET_CB_MOMENT_E eparam, 表示复位前还是复位后调用 int userdata,用户数据. 输出参数 : 无 返 回 值 : int 0, 成功,非0,失败 *****************************************************************************/ static int drv_hifireset_cbfun(DRV_RESET_CB_MOMENT_E eparam, int userdata) { int iresult = 0; #ifdef _DRV_LLT_ #else unsigned int *hifi_nv_vir_addr = NULL; phys_addr_t hifi_nv_phy_addr = 0; if (MDRV_RESET_CB_BEFORE == eparam) { /* clean hifi nv flag */ hifi_nv_phy_addr = (phys_addr_t)(HIFI_SYS_MEM_ADDR + sizeof(CARM_HIFI_DYN_ADDR_SHARE_STRU)); /* nv flag is 4byte */ hifi_nv_vir_addr = (unsigned int*)ioremap_wc(hifi_nv_phy_addr, 4); if (NULL == hifi_nv_vir_addr) { printk(KERN_INFO"RESET LOG %s: hifi_nv_vir_addr ioremap fail.\n", __FUNCTION__); } else { (*hifi_nv_vir_addr) = 0; printk(KERN_INFO"RESET LOG %s: hifi_nv_phy_addr = 0x%x, hifi_nv_vir_addr = 0x%p, (*hifi_nv_vir_addr) = 0x%x\n", __FUNCTION__, (unsigned int )hifi_nv_phy_addr, hifi_nv_vir_addr, (*hifi_nv_vir_addr)); iounmap(hifi_nv_vir_addr); } printk(KERN_INFO"RESET LOG: reset mediaserver task! before\n"); sochifi_watchdog_send_event(); printk(KERN_INFO"RESET LOG: reset mediaserver task! after\n"); } else { } printk(KERN_INFO"RESET LOG: HIFI cb fun %d run ok(%d)\n", eparam, iresult); #endif return iresult; }
int hifi_get_dmesg(unsigned long arg) { int ret = OK; struct misc_io_dump_buf_param dump_info; void * dump_info_user_buf = NULL; unsigned int len = (unsigned int)(*g_om_data.dsp_log_cur_addr) - (DRV_DSP_UART_TO_MEM + DRV_DSP_UART_TO_MEM_RESERVE_SIZE); if (len > (DRV_DSP_UART_TO_MEM_SIZE - DRV_DSP_UART_TO_MEM_RESERVE_SIZE)) { loge("len is larger: %d.\n", len); return 0; } if (copy_from_user(&dump_info, (void*)arg, sizeof(struct misc_io_dump_buf_param))) { loge("copy_from_user fail.\n"); return 0; } g_om_data.dsp_log_addr = (char*)ioremap_wc(DRV_DSP_UART_TO_MEM, DRV_DSP_UART_TO_MEM_SIZE); if (NULL == g_om_data.dsp_log_addr) { loge("dsp log ioremap_wc Error.\n"); return 0; } s_dsp_dump_info[0].data_addr = g_om_data.dsp_log_addr + DRV_DSP_UART_TO_MEM_RESERVE_SIZE; dump_info_user_buf = INT_TO_ADDR(dump_info.user_buf_l, dump_info.user_buf_h); logi("get msg: len:%d from:%p to:%p.\n", len, s_dsp_dump_info[0].data_addr, dump_info_user_buf); ret = copy_to_user(dump_info_user_buf, s_dsp_dump_info[0].data_addr, len); if (OK != ret) { loge("copy_to_user fail, ret is %d.\n", ret); len -= ret; } if (0 == dump_info.buf_size) { *g_om_data.dsp_log_cur_addr = DRV_DSP_UART_TO_MEM + DRV_DSP_UART_TO_MEM_RESERVE_SIZE; memset(s_dsp_dump_info[0].data_addr, 0, s_dsp_dump_info[0].data_len); } if(g_om_data.dsp_log_addr) { iounmap(g_om_data.dsp_log_addr); g_om_data.dsp_log_addr = NULL; } return (int)len; }
int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot) { unsigned long size = height * OMAP_VRFB_LINE_LEN * vrfb->bytespp; vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size); if (!vrfb->vaddr[rot]) { printk(KERN_ERR "vrfb: ioremap failed\n"); return -ENOMEM; } DBG("ioremapped vrfb area %d of size %lu into %p\n", rot, size, vrfb->vaddr[rot]); return 0; }
int s3cfb_map_default_video_memory(struct s3cfb_global *fbdev, struct fb_info *fb, int fimd_id) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; #ifdef CONFIG_CMA struct cma_info mem_info; int err; #endif if (win->owner == DMA_MEM_OTHER) return 0; #ifdef CONFIG_CMA err = cma_info(&mem_info, fbdev->dev, CMA_REGION_FIMD); if (err) return err; fix->smem_start = (dma_addr_t)cma_alloc (fbdev->dev, CMA_REGION_FIMD, (size_t)fix->smem_len, 0); if (IS_ERR_VALUE(fix->smem_start)) { return -EBUSY; } fb->screen_base = cma_get_virt(fix->smem_start, fix->smem_len, 1); #elif defined(CONFIG_S5P_MEM_BOOTMEM) fix->smem_start = s5p_get_media_memory_bank(S5P_MDEV_FIMD, 1); fix->smem_len = s5p_get_media_memsize_bank(S5P_MDEV_FIMD, 1); fb->screen_base = ioremap_wc(fix->smem_start, fix->smem_len); #else fb->screen_base = dma_alloc_writecombine(fbdev->dev, PAGE_ALIGN(fix->smem_len), (unsigned int *) &fix->smem_start, GFP_KERNEL); #endif if (!fb->screen_base) return -ENOMEM; else dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; return 0; }
static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf) { struct perf_mw *mw; int rc; mw = &perf->mw; rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size); if (rc) return rc; perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size); if (!mw->vbase) return -ENOMEM; return 0; }
static int tool_init_mw(struct tool_ctx *tc, int idx) { struct tool_mw *mw = &tc->mws[idx]; phys_addr_t base; int rc; rc = ntb_peer_mw_get_addr(tc->ntb, idx, &base, &mw->win_size); if (rc) return rc; mw->tc = tc; mw->idx = idx; mw->local = ioremap_wc(base, mw->win_size); if (!mw->local) return -EFAULT; return 0; }
/** * devm_ioremap_wc - Managed ioremap_wc() * @dev: Generic device to remap IO address for * @offset: BUS offset to map * @size: Size of map * * Managed ioremap_wc(). Map is automatically unmapped on driver detach. */ void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, resource_size_t size) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = ioremap_wc(offset, size); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; }
static int __init tegra_nct_init(void) { if (tegra_nct_initialized) return 0; if ((tegra_nck_start == 0) || (tegra_nck_size == 0)) { pr_err("tegra_nct: not configured\n"); return -ENOTSUPP; } nct_ptr = ioremap_wc(tegra_nck_start, tegra_nck_size); if (!nct_ptr) { pr_err("tegra_nct: failed to ioremap memory at 0x%08lx\n", tegra_nck_start); return -EIO; } memcpy(&nct_head, nct_ptr, sizeof(nct_head)); wmb(); pr_info("%s: magic(0x%x),vid(0x%x),pid(0x%x),ver(V%x.%x),rev(%d)\n", __func__, nct_head.magicId, nct_head.vendorId, nct_head.productId, (nct_head.version >> 16) & 0xFFFF, (nct_head.version & 0xFFFF), nct_head.revision); if (nct_head.magicId != NCT_MAGIC_ID) { pr_err("%s: magic ID error (0x%x/0x%x)\n", __func__, nct_head.magicId, NCT_MAGIC_ID); iounmap(nct_ptr); return -ENOKEY; } tegra_nct_initialized = true; return 0; }
bool hifi_is_power_on(void) { unsigned int* hifi_power_status_addr = NULL; bool ret = false; hifi_power_status_addr = (unsigned int*)ioremap_wc(DRV_DSP_POWER_STATUS_ADDR, 0x4); if (NULL == hifi_power_status_addr) { loge("ioremap_wc hifi_power_status_addr fail.\n"); return false; } if(DRV_DSP_POWER_ON == readl(hifi_power_status_addr)) { ret = true; } else { ret = false; } iounmap(hifi_power_status_addr); hifi_power_status_addr = NULL; return ret; }
static struct InterleaveBuffer* allocInterleaveBufferAt(int at, int size, u32 paddr) { struct InterleaveBuffer *IB = &(interleave_buffers[at]); int r; if ( IB->buffer!=NULL ) { if ( IB->size==size ) { printk("S3D : size same re-use interlave buffer\n"); IB->fbbuffer_paddr = paddr; return IB; } // kfree(IB->buffer); omap_vram_free(IB->paddr, IB->size); IB->buffer = NULL; } // IB->buffer = kmalloc(size, GFP_DMA | __GFP_COLD); r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &IB->paddr); // if ( IB->buffer!=NULL ) if ( !r ) { // IB->paddr = virt_to_bus(IB->buffer); IB->buffer = ioremap_wc(IB->paddr, size); IB->size = size; IB->fbbuffer_paddr = paddr; printk("S3D : allocat %dth buffer success. size:%d, vaddr:%p paddr:0x%lx for 0x%x\n", at, size, IB->buffer, IB->paddr, paddr ); return IB; } else { printk("S3D : alloc fail for 0x%x\n", paddr); return NULL; } }
static int s3cfb_map_video_memory(struct fb_info *fb) { struct fb_fix_screeninfo *fix = &fb->fix; struct s3cfb_window *win = fb->par; struct s3cfb_global *fbdev = platform_get_drvdata(to_platform_device(fb->device)); struct s3c_platform_fb *pdata = to_fb_plat(fbdev->dev); if (win->owner == DMA_MEM_OTHER) { fix->smem_start = win->other_mem_addr; fix->smem_len = win->other_mem_size; return 0; } if (fb->screen_base) return 0; if (pdata && pdata->pmem_start[win->id] && (pdata->pmem_size[win->id] >= fix->smem_len)) { fix->smem_start = pdata->pmem_start[win->id]; fb->screen_base = ioremap_wc(fix->smem_start, pdata->pmem_size[win->id]); } else fb->screen_base = dma_alloc_writecombine(fbdev->dev, PAGE_ALIGN(fix->smem_len), (unsigned int *) &fix->smem_start, GFP_KERNEL); if (!fb->screen_base) return -ENOMEM; dev_info(fbdev->dev, "[fb%d] dma: 0x%08x, cpu: 0x%08x, " "size: 0x%08x\n", win->id, (unsigned int)fix->smem_start, (unsigned int)fb->screen_base, fix->smem_len); memset(fb->screen_base, 0, fix->smem_len); win->owner = DMA_MEM_FIMD; return 0; }
static int jpeg_init(logo_object_t *plogo) { int logo_size; void __iomem* vaddr; jpeg_private_t *priv; vaddr=(void __iomem*)plogo->para.mem_addr; amlog_mask_level(LOG_MASK_PARSER,LOG_LEVEL_LOW,"logo vaddr:0x%p\n ",vaddr); if((logo_size=parse_jpeg_info(vaddr,plogo)) <=0 ) return PARSER_UNFOUND; vaddr = ioremap_wc((unsigned int)virt_to_phys(plogo->para.mem_addr), logo_size + PADDINGSIZE); if(NULL==vaddr) { amlog_mask_level(LOG_MASK_PARSER,LOG_LEVEL_LOW,"remapping logo data failed\n"); return -ENOMEM; } priv=(jpeg_private_t *)kmalloc(sizeof(jpeg_private_t),GFP_KERNEL); if(IS_ERR(priv)) { amlog_mask_level(LOG_MASK_PARSER,LOG_LEVEL_LOW,"can't alloc memory for jpeg private data\n"); return -ENOMEM; } memset(priv, 0, sizeof(jpeg_private_t)); priv->vf.width=plogo->parser->logo_pic_info.width; priv->vf.height=plogo->parser->logo_pic_info.height; plogo->parser->priv=priv; g_jpeg_parser=priv; priv->vaddr=vaddr; swap_tailzero_data((u8 *)vaddr, logo_size); if(hardware_init(plogo,logo_size) !=SUCCESS) { return PARSER_UNFOUND; } amlog_mask_level(LOG_MASK_PARSER,LOG_LEVEL_LOW,"jpeg parser hardware init ok\n"); plogo->parser->logo_pic_info.size=logo_size; return PARSER_FOUND; }
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct radeon_device *rdev = radeon_get_rdev(bdev); mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: #if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) { /* RADEON_IS_AGP is set only if AGP is active */ mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; } #endif break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) return -EINVAL; mem->bus.base = rdev->mc.aper_base; mem->bus.is_iomem = true; #ifdef __alpha__ /* * Alpha: use bus.addr to hold the ioremap() return, * so we can modify bus.base below. */ if (mem->placement & TTM_PL_FLAG_WC) mem->bus.addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else mem->bus.addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); /* * Alpha: Use just the bus offset plus * the hose/domain memory base for bus.base. * It then can be used to build PTEs for VRAM * access, as done in ttm_bo_vm_fault(). */ mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + rdev->ddev->hose->dense_mem_base; #endif break; default: return -EINVAL; } return 0; }
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) { map->handle = ioremap_wc(map->offset, map->size); }
int mrfld_gtt_init(struct psb_gtt *pg, int resume) { struct drm_device *dev = pg->dev; struct drm_psb_private *dev_priv = dev->dev_private; unsigned gtt_pages; unsigned long stolen_size, vram_stolen_size, ci_stolen_size; unsigned long rar_stolen_size; unsigned i, num_pages; unsigned pfn_base; uint32_t ci_pages, vram_pages; uint32_t tt_pages; uint32_t *ttm_gtt_map; int ret = 0; uint32_t pte; pg->initialized = 1; pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); /* fix me: video mmu has hw bug to access 0x0D0000000, * then make gatt start at 0x0e000,0000 */ pg->mmu_gatt_start = PSB_MEM_TT_START; pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) >> PAGE_SHIFT; pci_read_config_dword(dev->pdev, MRFLD_BGSM, &pg->pge_ctl); pg->gtt_phys_start = pg->pge_ctl & PAGE_MASK; pci_read_config_dword(dev->pdev, MRFLD_MSAC, >t_pages); printk(KERN_INFO "01 gtt_pages = 0x%x \n", gtt_pages); gtt_pages &= _APERTURE_SIZE_MASK; gtt_pages >>= _APERTURE_SIZE_POS; printk(KERN_INFO "02 gtt_pages = 0x%x \n", gtt_pages); switch (gtt_pages) { case _1G_APERTURE: gtt_pages = _1G_APERTURE_SIZE >> PAGE_SHIFT; break; case _512M_APERTURE: gtt_pages = _512M_APERTURE_SIZE >> PAGE_SHIFT; break; case _256M_APERTURE: gtt_pages = _256M_APERTURE_SIZE >> PAGE_SHIFT; break; default: DRM_ERROR("%s, invalded aperture size.\n", __func__); gtt_pages = _1G_APERTURE_SIZE >> PAGE_SHIFT; } gtt_pages >>= PAGE_SHIFT; gtt_pages *= 4; printk(KERN_INFO "03 gtt_pages = 0x%x \n", gtt_pages); /* HW removed the PSB_BSM, SW/FW needs it. */ pci_read_config_dword(dev->pdev, PSB_BSM, &pg->stolen_base); vram_stolen_size = pg->gtt_phys_start - pg->stolen_base - PAGE_SIZE; /* CI is not included in the stolen size since the TOPAZ MMU bug */ ci_stolen_size = dev_priv->ci_region_size; /* Don't add CI & RAR share buffer space * managed by TTM to stolen_size */ stolen_size = vram_stolen_size; rar_stolen_size = dev_priv->rar_region_size; printk(KERN_INFO "GMMADR(region 0) start: 0x%08x (%dM).\n", pg->gatt_start, pg->gatt_pages / 256); printk(KERN_INFO "GTT (can map %dM RAM), and actual RAM base 0x%08x.\n", gtt_pages * 4, pg->gtt_phys_start); printk(KERN_INFO "Stole memory information \n"); printk(KERN_INFO " base in RAM: 0x%x \n", pg->stolen_base); printk(KERN_INFO " size: %luK, calculated by (GTT RAM base) - (Stolen base).\n", vram_stolen_size / 1024); if (ci_stolen_size > 0) printk(KERN_INFO "CI Stole memory: RAM base = 0x%08x, size = %lu M \n", dev_priv->ci_region_start, ci_stolen_size / 1024 / 1024); if (rar_stolen_size > 0) printk(KERN_INFO "RAR Stole memory: RAM base = 0x%08x, size = %lu M \n", dev_priv->rar_region_start, rar_stolen_size / 1024 / 1024); if (resume && (gtt_pages != pg->gtt_pages) && (stolen_size != pg->stolen_size)) { DRM_ERROR("GTT resume error.\n"); ret = -EINVAL; goto out_err; } pg->gtt_pages = gtt_pages; pg->stolen_size = stolen_size; pg->vram_stolen_size = vram_stolen_size; pg->ci_stolen_size = ci_stolen_size; pg->rar_stolen_size = rar_stolen_size; pg->gtt_map = ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); if (!pg->gtt_map) { DRM_ERROR("Failure to map gtt.\n"); ret = -ENOMEM; goto out_err; } pg->vram_addr = ioremap_wc(pg->stolen_base, stolen_size); if (!pg->vram_addr) { DRM_ERROR("Failure to map stolen base.\n"); ret = -ENOMEM; goto out_err; } DRM_INFO("%s: vram kernel virtual address %p\n", __FUNCTION__, pg->vram_addr); tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ? (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT; ttm_gtt_map = pg->gtt_map + tt_pages / 2; /* * insert vram stolen pages. */ pfn_base = pg->stolen_base >> PAGE_SHIFT; vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; printk(KERN_INFO "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", num_pages, pfn_base, 0); for (i = 0; i < num_pages; ++i) { pte = psb_gtt_mask_pte(pfn_base + i, 0); iowrite32(pte, pg->gtt_map + i); } /* * Init rest of gtt managed by IMG. */ pfn_base = page_to_pfn(dev_priv->scratch_page); pte = psb_gtt_mask_pte(pfn_base, 0); for (; i < tt_pages / 2 - 1; ++i) iowrite32(pte, pg->gtt_map + i); /* * insert CI stolen pages */ pfn_base = dev_priv->ci_region_start >> PAGE_SHIFT; ci_pages = num_pages = ci_stolen_size >> PAGE_SHIFT; printk(KERN_INFO "Set up %d CI stolen pages starting at 0x%08x, GTT offset %dK\n", num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map) * 4); for (i = 0; i < num_pages; ++i) { pte = psb_gtt_mask_pte(pfn_base + i, 0); iowrite32(pte, ttm_gtt_map + i); } /* * insert RAR stolen pages */ if (rar_stolen_size != 0) { pfn_base = dev_priv->rar_region_start >> PAGE_SHIFT; num_pages = rar_stolen_size >> PAGE_SHIFT; printk(KERN_INFO "Set up %d RAR stolen pages starting at 0x%08x, GTT offset %dK\n", num_pages, pfn_base, (ttm_gtt_map - pg->gtt_map + i) * 4); for (; i < num_pages + ci_pages; ++i) { pte = psb_gtt_mask_pte(pfn_base + i - ci_pages, 0); iowrite32(pte, ttm_gtt_map + i); } }
static int intelfb_create(struct intel_fbdev *ifbdev, struct drm_fb_helper_surface_size *sizes) { struct drm_device *dev = ifbdev->helper.dev; struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd mode_cmd; struct drm_i915_gem_object *obj; struct device *device = &dev->pdev->dev; int size, ret; /* we don't do packed 24bpp */ if (sizes->surface_bpp == 24) sizes->surface_bpp = 32; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.bpp = sizes->surface_bpp; mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64); mode_cmd.depth = sizes->surface_depth; size = mode_cmd.pitch * mode_cmd.height; size = ALIGN(size, PAGE_SIZE); obj = i915_gem_alloc_object(dev, size); if (!obj) { DRM_ERROR("failed to allocate framebuffer\n"); ret = -ENOMEM; goto out; } mutex_lock(&dev->struct_mutex); /* Flush everything out, we'll be doing GTT only from now on */ ret = intel_pin_and_fence_fb_obj(dev, obj, false); if (ret) { DRM_ERROR("failed to pin fb: %d\n", ret); goto out_unref; } info = framebuffer_alloc(0, device); if (!info) { ret = -ENOMEM; goto out_unpin; } info->par = ifbdev; ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) goto out_unpin; fb = &ifbdev->ifb.base; ifbdev->helper.fb = fb; ifbdev->helper.fbdev = info; strcpy(info->fix.id, "inteldrmfb"); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &intelfb_ops; ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unpin; } /* setup aperture base/size for vesafb takeover */ info->aperture_base = dev->mode_config.fb_base; if (!IS_GEN2(dev)) info->aperture_size = pci_resource_len(dev->pdev, 2); else info->aperture_size = pci_resource_len(dev->pdev, 0); info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; info->fix.smem_len = size; info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size); if (!info->screen_base) { ret = -ENOSPC; goto out_unpin; } info->screen_size = size; // memset(info->screen_base, 0, size); drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", fb->width, fb->height, obj->gtt_offset, obj); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unpin: i915_gem_object_unpin(obj); out_unref: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); out: return ret; }