int __init acpi_init(void) { int i, nr_sys_hdr, ret = VMM_EFAIL; struct acpi_rsdp *root_desc = NULL; struct acpi_rsdt rsdt, *prsdt; vmm_printf("Starting to parse ACPI tables...\n"); root_desc = (struct acpi_rsdp *)find_root_system_descriptor(); if (root_desc == NULL) { vmm_printf("ACPI ERROR: No root system descriptor" " table found!\n"); goto rdesc_fail; } if (root_desc->rsdt_addr == 0) { vmm_printf("ACPI ERROR: No root descriptor found" " in RSD Pointer!\n"); goto rsdt_fail; } prsdt = (struct acpi_rsdt *)vmm_host_iomap(root_desc->rsdt_addr, PAGE_SIZE); if (unlikely(!prsdt)) { vmm_printf("ACPI ERROR: Failed to map physical address 0x%x.\n", __func__, root_desc->rsdt_addr); goto rsdt_fail; } if (acpi_read_sdt_at(prsdt, (struct acpi_sdt_hdr *)&rsdt, sizeof(struct acpi_rsdt), RSDT_SIGNATURE) < 0) { goto sdt_fail; } nr_sys_hdr = (rsdt.hdr.len - sizeof(struct acpi_sdt_hdr))/sizeof(u32); for (i = 0; i < nr_sys_hdr; i++) { struct acpi_sdt_hdr *hdr; char sign[32]; memset(sign, 0, sizeof(sign)); hdr = (struct acpi_sdt_hdr *) vmm_host_iomap(rsdt.data[i], PAGE_SIZE); if (hdr == NULL) { vmm_printf("ACPI ERROR: Cannot read header at 0x%x\n", rsdt.data[i]); goto sdt_fail; } memcpy(sign, hdr->signature, SDT_SIGN_LEN); sign[SDT_SIGN_LEN] = 0; if (process_acpi_sdt_table((char *)sign, (u32 *)hdr) != VMM_OK) { vmm_host_iounmap((virtual_addr_t)hdr); goto sdt_fail; } vmm_host_iounmap((virtual_addr_t)hdr); } ret = VMM_OK; sdt_fail: vmm_host_iounmap((virtual_addr_t)prsdt); rsdt_fail: vmm_host_iounmap((virtual_addr_t)root_desc); rdesc_fail: return ret; }
static int sram_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nodeid) { void *virt_base = NULL; struct sram_dev *sram = NULL; physical_addr_t start = 0; virtual_size_t size = 0; int ret = VMM_OK; ret = vmm_devtree_regaddr(dev->of_node, &start, 0); if (VMM_OK != ret) { vmm_printf("%s: Failed to get device base\n", dev->name); return ret; } ret = vmm_devtree_regsize(dev->of_node, &size, 0); if (VMM_OK != ret) { vmm_printf("%s: Failed to get device size\n", dev->name); goto err_out; } virt_base = (void *)vmm_host_iomap(start, size); if (NULL == virt_base) { vmm_printf("%s: Failed to get remap memory\n", dev->name); ret = VMM_ENOMEM; goto err_out; } sram = vmm_devm_zalloc(dev, sizeof(*sram)); if (!sram) { vmm_printf("%s: Failed to allocate structure\n", dev->name); ret = VMM_ENOMEM; goto err_out; } sram->clk = devm_clk_get(dev, NULL); if (VMM_IS_ERR(sram->clk)) sram->clk = NULL; else clk_prepare_enable(sram->clk); sram->pool = devm_gen_pool_create(dev, SRAM_GRANULARITY_LOG); if (!sram->pool) { vmm_printf("%s: Failed to create memory pool\n", dev->name); ret = VMM_ENOMEM; } ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base, start, size); if (ret < 0) { vmm_printf("%s: Failed to add memory chunk\n", dev->name); goto err_out; } vmm_devdrv_set_data(dev, sram); vmm_printf("%s: SRAM pool: %ld KiB @ 0x%p\n", dev->name, size / 1024, virt_base); return 0; err_out: if (sram->pool) gen_pool_destroy(sram->pool); #if 0 if (sram->clk) clk_disable_unprepare(sram->clk); #endif /* 0 */ if (sram) vmm_free(sram); sram = NULL; if (virt_base) vmm_host_iounmap((virtual_addr_t)virt_base); virt_base = NULL; return ret; }
int acpi_init(void) { int i; if (!acpi_ctxt) { acpi_ctxt = vmm_malloc(sizeof(struct acpi_context)); if (!acpi_ctxt) { vmm_printf("ACPI ERROR: Failed to allocate memory for" " ACPI context.\n"); return VMM_EFAIL; } acpi_ctxt->root_desc = (struct acpi_rsdp *)find_root_system_descriptor(); acpi_ctxt->rsdt = NULL; if (acpi_ctxt->root_desc == NULL) { vmm_printf("ACPI ERROR: No root system descriptor" " table found!\n"); goto rdesc_fail; } if (acpi_ctxt->root_desc->rsdt_addr == 0) { vmm_printf("ACPI ERROR: No root descriptor found" " in RSD Pointer!\n"); goto rsdt_fail; } acpi_ctxt->rsdt = (struct acpi_rsdt *)vmm_malloc(sizeof(struct acpi_rsdt)); if (!acpi_ctxt->rsdt) goto rsdt_fail; if (acpi_read_sdt_at(acpi_ctxt->root_desc->rsdt_addr, (struct acpi_sdt_hdr *)acpi_ctxt->rsdt, sizeof(struct acpi_rsdt), RSDT_SIGNATURE) < 0) { goto sdt_fail; } acpi_ctxt->nr_sys_hdr = (acpi_ctxt->rsdt->hdr.len - sizeof(struct acpi_sdt_hdr))/sizeof(u32); for (i = 0; i < acpi_ctxt->nr_sys_hdr; i++) { struct acpi_sdt_hdr *hdr; hdr = (struct acpi_sdt_hdr *) vmm_host_iomap(acpi_ctxt->rsdt->data[i], PAGE_SIZE); if (hdr == NULL) { vmm_printf("ACPI ERROR: Cannot read header at 0x%x\n", acpi_ctxt->rsdt->data[i]); goto sdt_fail; } vmm_memcpy(&acpi_ctxt->sdt_trans[i].signature, &hdr->signature, SDT_SIGN_LEN); acpi_ctxt->sdt_trans[i].signature[SDT_SIGN_LEN] = '\0'; acpi_ctxt->sdt_trans[i].length = hdr->len; //vmm_host_iounmap((virtual_addr_t)hdr, PAGE_SIZE); } acpi_ctxt->madt_hdr = (struct acpi_madt_hdr *) vmm_host_iomap(acpi_get_table_base("APIC"), PAGE_SIZE); if (acpi_ctxt->madt_hdr == NULL) goto sdt_fail; } return VMM_OK; sdt_fail: vmm_free(acpi_ctxt->rsdt); rsdt_fail: vmm_host_iounmap((virtual_addr_t)acpi_ctxt->root_desc, PAGE_SIZE); rdesc_fail: vmm_free(acpi_ctxt); acpi_ctxt = NULL; return VMM_EFAIL; }
static void clk_put(struct clk *clk) { vmm_host_iounmap((virtual_addr_t) clk, sizeof(u32)); }
/** * This is a temporary solution until we have a clock management * API */ void clk_put(struct clk *clk) { vmm_host_iounmap((virtual_addr_t)clk); }
int cmd_memory_dump(struct vmm_chardev *cdev, physical_addr_t addr, u32 wsz, u32 wcnt) { int rc; u32 w; bool page_mapped; virtual_addr_t page_va, addr_offset; physical_addr_t page_pa; addr = addr - (addr & (wsz - 1)); if (sizeof(physical_addr_t) == sizeof(u64)) { vmm_cprintf(cdev, "Host physical memory " "0x%016llx - 0x%016llx:", (u64)addr, (u64)(addr + wsz*wcnt)); } else { vmm_cprintf(cdev, "Host physical memory " "0x%08x - 0x%08x:", (u32)addr, (u32)(addr + wsz*wcnt)); } w = 0; page_pa = addr - (addr & VMM_PAGE_MASK); page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE); page_mapped = TRUE; while (w < wcnt) { if (page_pa != (addr - (addr & VMM_PAGE_MASK))) { if (page_mapped) { rc = vmm_host_iounmap(page_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } page_mapped = FALSE; } page_pa = addr - (addr & VMM_PAGE_MASK); page_va = vmm_host_iomap(page_pa, VMM_PAGE_SIZE); page_mapped = TRUE; } if (!(w * wsz & 0x0000000F)) { if (sizeof(physical_addr_t) == sizeof(u64)) { vmm_cprintf(cdev, "\n%016llx:", addr); } else { vmm_cprintf(cdev, "\n%08x:", addr); } } addr_offset = (addr & VMM_PAGE_MASK); switch (wsz) { case 1: vmm_cprintf(cdev, " %02x", *((u8 *)(page_va + addr_offset))); break; case 2: vmm_cprintf(cdev, " %04x", *((u16 *)(page_va + addr_offset))); break; case 4: vmm_cprintf(cdev, " %08x", *((u32 *)(page_va + addr_offset))); break; default: break; }; addr += wsz; w++; } vmm_cprintf(cdev, "\n"); if (page_mapped) { rc = vmm_host_iounmap(page_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } page_mapped = FALSE; } return VMM_OK; }
int cmd_memory_copy(struct vmm_chardev *cdev, physical_addr_t daddr, physical_addr_t saddr, u32 bcnt) { int rc; u32 b = 0, b2copy; bool dpage_mapped, spage_mapped; virtual_addr_t dva, dpage_va, sva, spage_va; physical_addr_t dpage_pa, spage_pa; dpage_pa = daddr - (daddr & VMM_PAGE_MASK); dpage_va = vmm_host_iomap(dpage_pa, VMM_PAGE_SIZE); dpage_mapped = TRUE; spage_pa = saddr - (saddr & VMM_PAGE_MASK); spage_va = vmm_host_iomap(spage_pa, VMM_PAGE_SIZE); spage_mapped = TRUE; while (b < bcnt) { if (dpage_pa != (daddr - (daddr & VMM_PAGE_MASK))) { if (dpage_mapped) { rc = vmm_host_iounmap(dpage_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } dpage_mapped = FALSE; } dpage_pa = daddr - (daddr & VMM_PAGE_MASK); dpage_va = vmm_host_iomap(dpage_pa, VMM_PAGE_SIZE); dpage_mapped = TRUE; } dva = dpage_va + (virtual_addr_t)(daddr & VMM_PAGE_MASK); if (spage_pa != (saddr - (saddr & VMM_PAGE_MASK))) { if (spage_mapped) { rc = vmm_host_iounmap(spage_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } spage_mapped = FALSE; } spage_pa = saddr - (saddr & VMM_PAGE_MASK); spage_va = vmm_host_iomap(spage_pa, VMM_PAGE_SIZE); spage_mapped = TRUE; } sva = spage_va + (virtual_addr_t)(saddr & VMM_PAGE_MASK); if ((daddr & VMM_PAGE_MASK) < (saddr & VMM_PAGE_MASK)) { b2copy = VMM_PAGE_SIZE - (u32)(saddr & VMM_PAGE_MASK); } else { b2copy = VMM_PAGE_SIZE - (u32)(daddr & VMM_PAGE_MASK); } b2copy = ((bcnt - b) < b2copy) ? (bcnt - b) : b2copy; vmm_memcpy((void *)dva, (void *)sva, b2copy); b += b2copy; daddr += b2copy; saddr += b2copy; } vmm_cprintf(cdev, "Copied %d (0x%x) bytes.\n", b, b); if (dpage_mapped) { rc = vmm_host_iounmap(dpage_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } dpage_mapped = FALSE; } if (spage_mapped) { rc = vmm_host_iounmap(spage_va, VMM_PAGE_SIZE); if (rc) { vmm_cprintf(cdev, "Error: Failed to unmap memory.\n"); return rc; } spage_mapped = FALSE; } return VMM_OK; }