int __init arch_cpu_clockevent_init(void) { int rc; virtual_addr_t sctl_base; /* Map control registers */ sctl_base = vmm_host_iomap(REALVIEW_SCTL_BASE, 0x1000); /* Map timer registers */ pba8_timer0_base = vmm_host_iomap(REALVIEW_PBA8_TIMER0_1_BASE, 0x1000); /* Initialize timers */ rc = realview_timer_init(sctl_base, pba8_timer0_base, REALVIEW_TIMER1_EnSel, IRQ_PBA8_TIMER0_1, pba8_timer0_handler); if (rc) { return rc; } /* Unmap control register */ rc = vmm_host_iounmap(sctl_base, 0x1000); if (rc) { return rc; } return VMM_OK; }
int __init arch_clockchip_init(void) { int rc; u32 val; virtual_addr_t sctl_base; /* Map control registers */ sctl_base = vmm_host_iomap(V2M_SYSCTL, 0x1000); /* Select 1MHz TIMCLK as the reference clock for SP804 timers */ val = vmm_readl((void *)sctl_base) | SCCTRL_TIMEREN0SEL_TIMCLK; vmm_writel(val, (void *)sctl_base); /* Unmap control register */ rc = vmm_host_iounmap(sctl_base, 0x1000); if (rc) { return rc; } /* Map timer0 registers */ ca9x4_timer0_base = vmm_host_iomap(V2M_TIMER0, 0x1000); /* Initialize timer0 as clockchip */ rc = sp804_clockchip_init(ca9x4_timer0_base, IRQ_V2M_TIMER0, "sp804_timer0", 300, 1000000, 0); if (rc) { return rc; } return VMM_OK; }
int arch_board_reset(void) { #if 0 void *wdt_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_WATCHDOG, 0x100); if (wdt_ptr) { u32 perir_reg; void *cmu_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_CMU + EXYNOS4_CLKGATE_IP_PERIR, sizeof(perir_reg)); if (cmu_ptr) { vmm_printf("%s: CMU reg is at 0x%08x + 0x%08x\n", __func__, EXYNOS4_PA_CMU, EXYNOS4_CLKGATE_IP_PERIR); vmm_writel(0, wdt_ptr + S3C2410_WTCON); /* enable the WDT clock if it is not already enabled */ perir_reg = vmm_readl(cmu_ptr); vmm_printf("%s: CMU PERIR reg is 0x%08x\n", __func__, perir_reg); if (!(perir_reg & (1 << 14))) { perir_reg |= (1 << 14); vmm_printf ("%s: enabling WDT in PERIR: writing 0x%08x\n", __func__, perir_reg); vmm_writel(perir_reg, cmu_ptr); } vmm_writel(0x80, wdt_ptr + S3C2410_WTDAT); vmm_writel(0x80, wdt_ptr + S3C2410_WTCNT); vmm_writel(0x2025, wdt_ptr + S3C2410_WTCON); vmm_host_iounmap((virtual_addr_t) cmu_ptr, sizeof(perir_reg)); } vmm_host_iounmap((virtual_addr_t) wdt_ptr, 0x100); } #else void *pmu_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_PMU + EXYNOS_SWRESET, sizeof(u32)); if (pmu_ptr) { /* Trigger a Software reset */ vmm_writel(0x1, pmu_ptr); vmm_host_iounmap((virtual_addr_t) pmu_ptr, sizeof(u32)); } #endif vmm_mdelay(500); vmm_printf("%s: failed\n", __func__); return VMM_EFAIL; }
int __init omap3_sdrc_init(struct omap3_sdrc_params *sdrc_cs0, struct omap3_sdrc_params *sdrc_cs1) { u32 l; /* This function does the task same as omap2_init_common_devices() of * <linux>/arch/arm/mach-omap2/io.c */ if(!omap3_sdrc_base) { omap3_sdrc_base = vmm_host_iomap(OMAP3_SDRC_BASE, OMAP3_SDRC_SIZE); if(!omap3_sdrc_base) { return VMM_EFAIL; } } if(!omap3_sms_base) { omap3_sms_base = vmm_host_iomap(OMAP3_SMS_BASE, OMAP3_SMS_SIZE); if(!omap3_sms_base) { return VMM_EFAIL; } } /* Initiaize SDRC as per omap2_sdrc_init() of * <linux>/arch/arm/mach-omap2/sdrc.c */ l = sms_read_reg(SMS_SYSCONFIG); l &= ~(0x3 << 3); l |= (0x2 << 3); sms_write_reg(l, SMS_SYSCONFIG); l = sdrc_read_reg(SDRC_SYSCONFIG); l &= ~(0x3 << 3); l |= (0x2 << 3); sdrc_write_reg(l, SDRC_SYSCONFIG); sdrc_init_params_cs0 = sdrc_cs0; sdrc_init_params_cs1 = sdrc_cs1; /* XXX Enable SRFRONIDLEREQ here also? */ /* * PWDENA should not be set due to 34xx erratum 1.150 - PWDENA * can cause random memory corruption */ l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) | (1 << SDRC_POWER_PAGEPOLICY_SHIFT); sdrc_write_reg(l, SDRC_POWER); /* FIXME: Reprogram SDRC timing parameters as per * _omap2_init_reprogram_sdrc() function of * <linux>/arch/arm/mach-omap2/io.c */ return VMM_OK; }
int cmd_memory_modify(struct vmm_chardev *cdev, physical_addr_t addr, u32 wsz, int valc, char **valv) { int rc, w = 0; bool page_mapped; virtual_addr_t page_va, addr_offset; physical_addr_t page_pa; addr = addr - (addr & (wsz - 1)); page_pa = addr - (addr & VMM_PAGE_MASK); page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE); page_mapped = TRUE; while (w < valc) { if (page_pa != (addr - (addr & VMM_PAGE_MASK))) { if (page_mapped) { rc = vmm_host_iounmap(page_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } page_mapped = FALSE; } page_pa = addr - (addr & VMM_PAGE_MASK); page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE); page_mapped = TRUE; } addr_offset = (addr & VMM_PAGE_MASK); switch (wsz) { case 1: *((u8 *)(page_va + addr_offset)) = (u8)vmm_str2uint(valv[w], 10); break; case 2: *((u16 *)(page_va + addr_offset)) = (u16)vmm_str2uint(valv[w], 10); break; case 4: *((u32 *)(page_va + addr_offset)) = (u32)vmm_str2uint(valv[w], 10); break; default: break; }; addr += wsz; w++; } if (page_mapped) { rc = vmm_host_iounmap(page_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } page_mapped = FALSE; } return VMM_OK; }
static int __init scu_cpu_init(struct vmm_devtree_node *node, unsigned int cpu) { int rc; u32 ncores; physical_addr_t pa; struct vmm_devtree_node *scu_node; /* Map SCU base */ if (!scu_base) { scu_node = vmm_devtree_find_matching(NULL, scu_matches); if (!scu_node) { return VMM_ENODEV; } rc = vmm_devtree_regmap(scu_node, &scu_base, 0); if (rc) { return rc; } } /* Map clear address */ rc = vmm_devtree_read_physaddr(node, VMM_DEVTREE_CPU_CLEAR_ADDR_ATTR_NAME, &pa); if (rc) { clear_addr[cpu] = 0x0; } else { clear_addr[cpu] = vmm_host_iomap(pa, VMM_PAGE_SIZE); } /* Map release address */ rc = vmm_devtree_read_physaddr(node, VMM_DEVTREE_CPU_RELEASE_ADDR_ATTR_NAME, &pa); if (rc) { release_addr[cpu] = 0x0; } else { release_addr[cpu] = vmm_host_iomap(pa, VMM_PAGE_SIZE); } /* Check core count from SCU */ ncores = scu_get_core_count((void *)scu_base); if (ncores <= cpu) { return VMM_ENOSYS; } /* Check SCU status */ if (!scu_cpu_core_is_smp((void *)scu_base, cpu)) { return VMM_ENOSYS; } return VMM_OK; }
static virtual_addr_t __init find_root_system_descriptor(void) { struct acpi_search_area *carea = &acpi_areas[0]; virtual_addr_t area_map; virtual_addr_t rsdp_base = 0; while (carea->area_name) { vmm_printf("Search for RSDP in %s... ", carea->area_name); area_map = vmm_host_iomap(carea->phys_start, (carea->phys_end - carea->phys_start)); BUG_ON((void *)area_map == NULL); if ((rsdp_base = locate_rsdp_in_area(area_map, (carea->phys_end - carea->phys_start))) != 0) { vmm_printf("found.\n"); break; } rsdp_base = 0; carea++; vmm_host_iounmap(area_map, (carea->phys_end - carea->phys_start)); vmm_printf("not found.\n"); } if (likely(rsdp_base)) vmm_printf("RSDP Base: 0x%x\n", rsdp_base); return rsdp_base; }
int arch_defterm_init(void) { omap3_uart_base = vmm_host_iomap(OMAP3_UART_BASE, 0x1000); uart_lowlevel_init("st16654", omap3_uart_base, 4, OMAP3_UART_BAUD, OMAP3_UART_INCLK); return VMM_OK; }
int __init arch_defterm_init(void) { pba8_defterm_base = vmm_host_iomap(PBA8_DEFAULT_UART_BASE, 0x1000); pl011_lowlevel_init(pba8_defterm_base, PBA8_DEFAULT_UART_BAUD, PBA8_DEFAULT_UART_INCLK); return VMM_OK; }
int __init intc_init(physical_addr_t base, u32 nrirq) { u32 i, tmp; intc_base = vmm_host_iomap(base, 0x1000); intc_nrirq = nrirq; tmp = intc_read(INTC_SYSCONFIG); tmp |= INTC_SYSCONFIG_SOFTRST_M; /* soft reset */ intc_write(INTC_SYSCONFIG, tmp); /* Wait for reset to complete */ while (!(intc_read(INTC_SYSSTATUS) & INTC_SYSSTATUS_RESETDONE_M)) ; /* Enable autoidle */ intc_write(INTC_SYSCONFIG, INTC_SYSCONFIG_AUTOIDLE_M); /* * Setup the Host IRQ subsystem. */ for (i = 0; i < intc_nrirq; i++) { vmm_host_irq_set_chip(i, &intc_chip); vmm_host_irq_set_handler(i, vmm_handle_fast_eoi); } /* Set active IRQ callback */ vmm_host_irq_set_active_callback(intc_active_irq); return VMM_OK; }
static int cmd_module_load(struct vmm_chardev *cdev, physical_addr_t phys_addr, physical_size_t phys_size) { int rc; virtual_addr_t mod_va; virtual_size_t mod_sz = phys_size; mod_va = vmm_host_iomap(phys_addr, mod_sz); if ((rc = vmm_modules_load(mod_va, mod_sz))) { vmm_host_iounmap(mod_va); return rc; } else { vmm_cprintf(cdev, "Loaded module succesfully\n"); } rc = vmm_host_iounmap(mod_va); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } return VMM_OK; }
static virtual_addr_t find_root_system_descriptor(void) { struct acpi_search_area *carea = &acpi_areas[0]; virtual_addr_t area_map; virtual_addr_t rsdp_base = 0; while (carea->area_name) { area_map = vmm_host_iomap(carea->phys_start, (carea->phys_end - carea->phys_start)); BUG_ON((void *)area_map == NULL, "Failed to map the %s for RSDP search.\n", carea->area_name); if ((rsdp_base = locate_rsdp_in_area(area_map, (carea->phys_end - carea->phys_start))) != 0) { break; } rsdp_base = 0; carea++; vmm_host_iounmap(area_map, (carea->phys_end - carea->phys_start)); } return rsdp_base; }
static struct clk *clk_get(struct vmm_device *dev, const char *name) { void *cmu_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_CMU + EXYNOS4_CLKGATE_IP_PERIR, sizeof(u32)); return cmu_ptr; }
int versatile_clcd_setup(struct clcd_fb *fb, unsigned long framesize) { int rc; u32 use_dma, val[2]; void *screen_base; unsigned long smem_len; physical_addr_t smem_pa; if (!fb->dev->node) { return VMM_EINVALID; } if (vmm_devtree_read_u32(fb->dev->node, "use_dma", &use_dma)) { use_dma = 0; } if (use_dma) { smem_len = framesize; screen_base = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(smem_len), VMM_MEMORY_READABLE | VMM_MEMORY_WRITEABLE); if (!screen_base) { vmm_printf("CLCD: unable to alloc framebuffer\n"); return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)screen_base, &smem_pa); if (rc) { return rc; } } else { rc = vmm_devtree_read_u32_array(fb->dev->node, "framebuffer", val, 2); if (rc) { return rc; } smem_pa = val[0]; smem_len = val[1]; if (smem_len < framesize) { return VMM_ENOMEM; } screen_base = (void *)vmm_host_iomap(smem_pa, smem_len); if (!screen_base) { vmm_printf("CLCD: unable to map framebuffer\n"); return VMM_ENOMEM; } } fb->fb.screen_base = screen_base; fb->fb.fix.smem_start = smem_pa; fb->fb.fix.smem_len = smem_len; return 0; }
int vmm_cpu_clocksource_init(void) { int rc; virtual_addr_t sctl_base; /* Map control registers */ sctl_base = vmm_host_iomap(REALVIEW_SCTL_BASE, 0x1000); /* Map timer registers */ pba8_timer2_base = vmm_host_iomap(REALVIEW_PBA8_TIMER2_3_BASE, 0x1000); pba8_timer3_base = pba8_timer2_base + 0x20; /* Initialize timers */ rc = realview_timer_init(sctl_base, pba8_timer2_base, REALVIEW_TIMER3_EnSel, IRQ_PBA8_TIMER2_3, NULL); if (rc) { return rc; } rc = realview_timer_init(sctl_base, pba8_timer3_base, REALVIEW_TIMER4_EnSel, IRQ_PBA8_TIMER2_3, NULL); if (rc) { return rc; } /* Unmap control register */ rc = vmm_host_iounmap(sctl_base, 0x1000); if (rc) { return rc; } /* Configure timer3 as free running source */ rc = realview_timer_counter_setup(pba8_timer3_base); if (rc) { return rc; } realview_timer_enable(pba8_timer3_base); return VMM_OK; }
int vmm_pic_init(void) { int ret; virtual_addr_t dist_base, cpu_base; dist_base = vmm_host_iomap(REALVIEW_PBA8_GIC_DIST_BASE, 0x1000); ret = realview_gic_dist_init(0, dist_base, IRQ_PBA8_GIC_START); if (ret) { return ret; } cpu_base = vmm_host_iomap(REALVIEW_PBA8_GIC_CPU_BASE, 0x1000); ret = realview_gic_cpu_init(0, cpu_base); if (ret) { return ret; } return VMM_OK; }
int __init omap3_s32k_init(void) { if(!omap35x_32k_synct_base) { omap35x_32k_synct_base = vmm_host_iomap(OMAP3_S32K_BASE, 0x1000); /* Enable I-clock for S32K */ omap3_cm_setbits(OMAP3_WKUP_CM, OMAP3_CM_ICLKEN_WKUP, OMAP3_CM_ICLKEN_WKUP_EN_32KSYNC_M); } return VMM_OK; }
int __init arch_board_final_init(void) { int rc; struct vmm_devtree_node *node; struct vmm_chardev * cdev; #if defined(CONFIG_RTC) struct vmm_rtcdev * rdev; #endif /* All VMM API's are available here */ /* We can register a Board specific resource here */ #if 0 /* FIXME: */ /* Map control registers */ ca15x4_sys_base = vmm_host_iomap(VEXPRESS_SYS_BASE, 0x1000); /* Unlock Lockable registers */ vmm_writel(VEXPRESS_SYS_LOCKVAL, (void *)(ca15x4_sys_base + VEXPRESS_SYS_LOCK_OFFSET)); #endif /* Do Probing using device driver framework */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_HOSTINFO_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING "nbridge"); if (!node) { return VMM_ENOTAVAIL; } rc = vmm_devdrv_probe(node, NULL); if (rc) { return rc; } /* Find uart0 character device and * set it as vmm_stdio character device */ if ((cdev = vmm_chardev_find("uart0"))) { vmm_stdio_change_indevice(cdev); vmm_stdio_change_outdevice(cdev); } /* Syncup wall-clock time from rtc0 */ #if defined(CONFIG_RTC) if ((rdev = vmm_rtcdev_find("rtc0"))) { if ((rc = vmm_rtcdev_sync_wallclock(rdev))) { return rc; } } #endif return VMM_OK; }
int __init arch_clocksource_init(void) { int rc; u32 val; virtual_addr_t sctl_base; /* Map control registers */ sctl_base = vmm_host_iomap(VERSATILE_SCTL_BASE, 0x1000); /* * set clock frequency: * REALVIEW_REFCLK is 32KHz * REALVIEW_TIMCLK is 1MHz */ val = vmm_readl((void *)sctl_base) | (VERSATILE_TIMCLK << VERSATILE_TIMER2_EnSel); vmm_writel(val, (void *)sctl_base); /* Unmap control register */ rc = vmm_host_iounmap(sctl_base, 0x1000); if (rc) { return rc; } /* Configure timer1 as free running source */ /* Map timer registers */ sp804_timer1_base = vmm_host_iomap(VERSATILE_TIMER0_1_BASE, 0x1000); sp804_timer1_base += 0x20; /* Initialize timer1 as clocksource */ rc = sp804_clocksource_init(sp804_timer1_base, "sp804_timer1", 300, 1000000, 20); if (rc) { return rc; } return VMM_OK; }
int __init omap3_gpt_global_init(u32 gpt_count, struct omap3_gpt_cfg *cfg) { int i; if(!omap3_gpt_config) { omap3_gpt_config = cfg; for(i=0; i<gpt_count; i++) { omap3_gpt_config[i].base_va = vmm_host_iomap(omap3_gpt_config[i].base_pa, 0x1000); if(!omap3_gpt_config[i].base_va) return VMM_EFAIL; } } return VMM_OK; }
int __init arch_cpu_clocksource_init(void) { int rc; u32 val; virtual_addr_t sctl_base; /* Map control registers */ sctl_base = vmm_host_iomap(V2M_SYSCTL, 0x1000); /* Select 1MHz TIMCLK as the reference clock for SP804 timers */ val = vmm_readl((void *)sctl_base) | SCCTRL_TIMEREN1SEL_TIMCLK; vmm_writel(val, (void *)sctl_base); /* Unmap control register */ rc = vmm_host_iounmap(sctl_base, 0x1000); if (rc) { return rc; } /* Map timer registers */ ca15x4_timer1_base = vmm_host_iomap(V2M_TIMER1, 0x1000); /* Initialize timers */ rc = sp804_timer_init(ca15x4_timer1_base, IRQ_V2M_TIMER1, NULL); if (rc) { return rc; } /* Configure timer1 as free running source */ rc = sp804_timer_counter_start(ca15x4_timer1_base); if (rc) { return rc; } sp804_timer_enable(ca15x4_timer1_base); return VMM_OK; }
int __init arch_clockchip_init(void) { int rc; u32 val; virtual_addr_t sctl_base; /* Map control registers */ sctl_base = vmm_host_iomap(VERSATILE_SCTL_BASE, 0x1000); /* * set clock frequency: * REALVIEW_REFCLK is 32KHz * REALVIEW_TIMCLK is 1MHz */ val = vmm_readl((void *)sctl_base) | (VERSATILE_TIMCLK << VERSATILE_TIMER1_EnSel); vmm_writel(val, (void *)sctl_base); /* Unmap control register */ rc = vmm_host_iounmap(sctl_base, 0x1000); if (rc) { return rc; } /* Map timer0 registers */ sp804_timer0_base = vmm_host_iomap(VERSATILE_TIMER0_1_BASE, 0x1000); /* Initialize timer0 as clockchip */ rc = sp804_clockchip_init(sp804_timer0_base, INT_TIMERINT0_1, "sp804_timer0", 300, 1000000, 0); if (rc) { return rc; } return VMM_OK; }
int arch_board_shutdown(void) { /* FIXME: For now we do a soft reset */ void *pmu_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_PMU + EXYNOS_SWRESET, sizeof(u32)); if (pmu_ptr) { /* Trigger a Software reset */ vmm_writel(0x1, pmu_ptr); vmm_host_iounmap((virtual_addr_t) pmu_ptr, sizeof(u32)); } vmm_mdelay(500); vmm_printf("%s: failed\n", __func__); return VMM_EFAIL; }
static int acpi_read_sdt_at(physical_addr_t addr, struct acpi_sdt_hdr * tb, size_t size, const char * name) { struct acpi_sdt_hdr hdr; void *sdt_va = NULL; sdt_va = (void *)vmm_host_iomap(addr, PAGE_SIZE); if (unlikely(!sdt_va)) { vmm_printf("ACPI ERROR: Failed to map physical address 0x%x.\n", __func__, addr); return VMM_EFAIL; } /* if NULL is supplied, we only return the size of the table */ if (tb == NULL) { vmm_memcpy(&hdr, sdt_va, sizeof(struct acpi_sdt_hdr)); return hdr.len; } vmm_memcpy(tb, sdt_va, sizeof(struct acpi_sdt_hdr)); if (acpi_check_signature((const char *)tb->signature, (const char *)name)) { vmm_printf("ACPI ERROR: acpi %s signature does not match\n", name); return VMM_EFAIL; } if (size < tb->len) { vmm_printf("ACPI ERROR: acpi buffer too small for %s\n", name); return VMM_EFAIL; } vmm_memcpy(tb, sdt_va, size); if (acpi_check_csum(tb, tb->len)) { vmm_printf("ACPI ERROR: acpi %s checksum does not match\n", name); return VMM_EFAIL; } return tb->len; }
int __init s32k_clocksource_init(physical_addr_t base) { int rc; virtual_addr_t synct_base; /* Map registers */ synct_base = vmm_host_iomap(base, 0x1000); /* Save pointer to registers in clocksource private */ s32k_clksrc.priv = (void *)synct_base; /* Compute mult for clocksource */ vmm_clocks_calc_mult_shift(&s32k_clksrc.mult, &s32k_clksrc.shift, S32K_FREQ_HZ, VMM_NSEC_PER_SEC, 10); /* Register clocksource */ if ((rc = vmm_clocksource_register(&s32k_clksrc))) { return rc; } return VMM_OK; }
int __init arch_board_final_init(void) { int rc; struct vmm_devtree_node *node; struct vmm_chardev * cdev; /* All VMM API's are available here */ /* We can register a Board specific resource here */ /* Map control registers */ pba8_sys_base = vmm_host_iomap(REALVIEW_SYS_BASE, 0x1000); /* Unlock Lockable registers */ vmm_writel(REALVIEW_SYS_LOCKVAL, (void *)(pba8_sys_base + REALVIEW_SYS_LOCK_OFFSET)); /* Do Probing using device driver framework */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_HOSTINFO_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING "nbridge"); if (!node) { return VMM_ENOTAVAIL; } rc = vmm_devdrv_probe(node); if (rc) { return rc; } /* Find uart0 character device and * set it as vmm_stdio character device */ if ((cdev = vmm_chardev_find("uart0"))) { vmm_stdio_change_device(cdev); } return VMM_OK; }
static int sram_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nodeid) { void *virt_base = NULL; struct sram_dev *sram = NULL; physical_addr_t start = 0; virtual_size_t size = 0; int ret = VMM_OK; ret = vmm_devtree_regaddr(dev->of_node, &start, 0); if (VMM_OK != ret) { vmm_printf("%s: Failed to get device base\n", dev->name); return ret; } ret = vmm_devtree_regsize(dev->of_node, &size, 0); if (VMM_OK != ret) { vmm_printf("%s: Failed to get device size\n", dev->name); goto err_out; } virt_base = (void *)vmm_host_iomap(start, size); if (NULL == virt_base) { vmm_printf("%s: Failed to get remap memory\n", dev->name); ret = VMM_ENOMEM; goto err_out; } sram = vmm_devm_zalloc(dev, sizeof(*sram)); if (!sram) { vmm_printf("%s: Failed to allocate structure\n", dev->name); ret = VMM_ENOMEM; goto err_out; } sram->clk = devm_clk_get(dev, NULL); if (VMM_IS_ERR(sram->clk)) sram->clk = NULL; else clk_prepare_enable(sram->clk); sram->pool = devm_gen_pool_create(dev, SRAM_GRANULARITY_LOG); if (!sram->pool) { vmm_printf("%s: Failed to create memory pool\n", dev->name); ret = VMM_ENOMEM; } ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, start, size); if (ret < 0) { vmm_printf("%s: Failed to add memory chunk\n", dev->name); goto err_out; } vmm_devdrv_set_data(dev, sram); vmm_printf("%s: SRAM pool: %ld KiB @ 0x%p\n", dev->name, size / 1024, virt_base); return 0; err_out: if (sram->pool) gen_pool_destroy(sram->pool); #if 0 if (sram->clk) clk_disable_unprepare(sram->clk); #endif /* 0 */ if (sram) vmm_free(sram); sram = NULL; if (virt_base) vmm_host_iounmap((virtual_addr_t)virt_base); virt_base = NULL; return ret; }
int __init acpi_init(void) { int i, nr_sys_hdr, ret = VMM_EFAIL; struct acpi_rsdp *root_desc = NULL; struct acpi_rsdt rsdt, *prsdt; vmm_printf("Starting to parse ACPI tables...\n"); root_desc = (struct acpi_rsdp *)find_root_system_descriptor(); if (root_desc == NULL) { vmm_printf("ACPI ERROR: No root system descriptor" " table found!\n"); goto rdesc_fail; } if (root_desc->rsdt_addr == 0) { vmm_printf("ACPI ERROR: No root descriptor found" " in RSD Pointer!\n"); goto rsdt_fail; } prsdt = (struct acpi_rsdt *)vmm_host_iomap(root_desc->rsdt_addr, PAGE_SIZE); if (unlikely(!prsdt)) { vmm_printf("ACPI ERROR: Failed to map physical address 0x%x.\n", __func__, root_desc->rsdt_addr); goto rsdt_fail; } if (acpi_read_sdt_at(prsdt, (struct acpi_sdt_hdr *)&rsdt, sizeof(struct acpi_rsdt), RSDT_SIGNATURE) < 0) { goto sdt_fail; } nr_sys_hdr = (rsdt.hdr.len - sizeof(struct acpi_sdt_hdr))/sizeof(u32); for (i = 0; i < nr_sys_hdr; i++) { struct acpi_sdt_hdr *hdr; char sign[32]; memset(sign, 0, sizeof(sign)); hdr = (struct acpi_sdt_hdr *) vmm_host_iomap(rsdt.data[i], PAGE_SIZE); if (hdr == NULL) { vmm_printf("ACPI ERROR: Cannot read header at 0x%x\n", rsdt.data[i]); goto sdt_fail; } memcpy(sign, hdr->signature, SDT_SIGN_LEN); sign[SDT_SIGN_LEN] = 0; if (process_acpi_sdt_table((char *)sign, (u32 *)hdr) != VMM_OK) { vmm_host_iounmap((virtual_addr_t)hdr); goto sdt_fail; } vmm_host_iounmap((virtual_addr_t)hdr); } ret = VMM_OK; sdt_fail: vmm_host_iounmap((virtual_addr_t)prsdt); rsdt_fail: vmm_host_iounmap((virtual_addr_t)root_desc); rdesc_fail: return ret; }
int acpi_init(void) { int i; if (!acpi_ctxt) { acpi_ctxt = vmm_malloc(sizeof(struct acpi_context)); if (!acpi_ctxt) { vmm_printf("ACPI ERROR: Failed to allocate memory for" " ACPI context.\n"); return VMM_EFAIL; } acpi_ctxt->root_desc = (struct acpi_rsdp *)find_root_system_descriptor(); acpi_ctxt->rsdt = NULL; if (acpi_ctxt->root_desc == NULL) { vmm_printf("ACPI ERROR: No root system descriptor" " table found!\n"); goto rdesc_fail; } if (acpi_ctxt->root_desc->rsdt_addr == 0) { vmm_printf("ACPI ERROR: No root descriptor found" " in RSD Pointer!\n"); goto rsdt_fail; } acpi_ctxt->rsdt = (struct acpi_rsdt *)vmm_malloc(sizeof(struct acpi_rsdt)); if (!acpi_ctxt->rsdt) goto rsdt_fail; if (acpi_read_sdt_at(acpi_ctxt->root_desc->rsdt_addr, (struct acpi_sdt_hdr *)acpi_ctxt->rsdt, sizeof(struct acpi_rsdt), RSDT_SIGNATURE) < 0) { goto sdt_fail; } acpi_ctxt->nr_sys_hdr = (acpi_ctxt->rsdt->hdr.len - sizeof(struct acpi_sdt_hdr))/sizeof(u32); for (i = 0; i < acpi_ctxt->nr_sys_hdr; i++) { struct acpi_sdt_hdr *hdr; hdr = (struct acpi_sdt_hdr *) vmm_host_iomap(acpi_ctxt->rsdt->data[i], PAGE_SIZE); if (hdr == NULL) { vmm_printf("ACPI ERROR: Cannot read header at 0x%x\n", acpi_ctxt->rsdt->data[i]); goto sdt_fail; } vmm_memcpy(&acpi_ctxt->sdt_trans[i].signature, &hdr->signature, SDT_SIGN_LEN); acpi_ctxt->sdt_trans[i].signature[SDT_SIGN_LEN] = '\0'; acpi_ctxt->sdt_trans[i].length = hdr->len; //vmm_host_iounmap((virtual_addr_t)hdr, PAGE_SIZE); } acpi_ctxt->madt_hdr = (struct acpi_madt_hdr *) vmm_host_iomap(acpi_get_table_base("APIC"), PAGE_SIZE); if (acpi_ctxt->madt_hdr == NULL) goto sdt_fail; } return VMM_OK; sdt_fail: vmm_free(acpi_ctxt->rsdt); rsdt_fail: vmm_host_iounmap((virtual_addr_t)acpi_ctxt->root_desc, PAGE_SIZE); rdesc_fail: vmm_free(acpi_ctxt); acpi_ctxt = NULL; return VMM_EFAIL; }
int cmd_memory_dump(struct vmm_chardev *cdev, physical_addr_t addr, u32 wsz, u32 wcnt) { int rc; u32 w; bool page_mapped; virtual_addr_t page_va, addr_offset; physical_addr_t page_pa; addr = addr - (addr & (wsz - 1)); if (sizeof(physical_addr_t) == sizeof(u64)) { vmm_cprintf(cdev, "Host physical memory " "0x%016llx - 0x%016llx:", (u64)addr, (u64)(addr + wsz*wcnt)); } else { vmm_cprintf(cdev, "Host physical memory " "0x%08x - 0x%08x:", (u32)addr, (u32)(addr + wsz*wcnt)); } w = 0; page_pa = addr - (addr & VMM_PAGE_MASK); page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE); page_mapped = TRUE; while (w < wcnt) { if (page_pa != (addr - (addr & VMM_PAGE_MASK))) { if (page_mapped) { rc = vmm_host_iounmap(page_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } page_mapped = FALSE; } page_pa = addr - (addr & VMM_PAGE_MASK); page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE); page_mapped = TRUE; } if (!(w * wsz & 0x0000000F)) { if (sizeof(physical_addr_t) == sizeof(u64)) { vmm_cprintf(cdev, "\n%016llx:", addr); } else { vmm_cprintf(cdev, "\n%08x:", addr); } } addr_offset = (addr & VMM_PAGE_MASK); switch (wsz) { case 1: vmm_cprintf(cdev, " %02x", *((u8 *)(page_va + addr_offset))); break; case 2: vmm_cprintf(cdev, " %04x", *((u16 *)(page_va + addr_offset))); break; case 4: vmm_cprintf(cdev, " %08x", *((u32 *)(page_va + addr_offset))); break; default: break; }; addr += wsz; w++; } vmm_cprintf(cdev, "\n"); if (page_mapped) { rc = vmm_host_iounmap(page_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } page_mapped = FALSE; } return VMM_OK; }