static void generic_virt_irq_inject(struct vmm_vcpu *vcpu, struct generic_timer_context *cntx) { int rc; u32 virq; virq = cntx->virt_timer_irq; if (virq == 0) { vmm_printf("%s: Virtual timer irq not available (VCPU=%s)\n", __func__, vcpu->name); return; } rc = vmm_devemu_emulate_percpu_irq(vcpu->guest, virq, vcpu->subid, 0); if (rc) { vmm_printf("%s: Emulate VCPU=%s irq=%d level=0 failed\n", __func__, vcpu->name, virq); } rc = vmm_devemu_emulate_percpu_irq(vcpu->guest, virq, vcpu->subid, 1); if (rc) { vmm_printf("%s: Emulate VCPU=%s irq=%d level=1 failed\n", __func__, vcpu->name, virq); } }
static int mterm_main(void *udata) { size_t cmds_len; char cmds[MTERM_CMD_STRING_SIZE]; /* Print Banner */ vmm_printf("%s", VMM_BANNER_STRING); /* Main loop of VMM */ while (1) { /* Show prompt */ vmm_printf("XVisor# "); vmm_memset(cmds, 0, sizeof(cmds)); /* Get command string */ vmm_gets(cmds, MTERM_CMD_STRING_SIZE, '\n'); cmds_len = vmm_strlen(cmds); if (cmds_len > 0) { if (cmds[cmds_len - 1] == '\r') cmds[cmds_len - 1] = '\0'; /* Execute command string */ vmm_cmdmgr_execute_cmdstr(vmm_stdio_device(), cmds); } } return VMM_OK; }
int vmm_netport_init(void) { int rc; struct vmm_class *c; vmm_printf("Initialize Network Port Framework\n"); c = vmm_malloc(sizeof(struct vmm_class)); if (!c) return VMM_EFAIL; INIT_LIST_HEAD(&c->head); strcpy(c->name, VMM_NETPORT_CLASS_NAME); INIT_LIST_HEAD(&c->classdev_list); rc = vmm_devdrv_register_class(c); if (rc) { vmm_printf("Failed to register %s class\n", VMM_NETPORT_CLASS_NAME); vmm_free(c); return rc; } return VMM_OK; }
int arch_board_reset(void) { #if 0 void *wdt_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_WATCHDOG, 0x100); if (wdt_ptr) { u32 perir_reg; void *cmu_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_CMU + EXYNOS4_CLKGATE_IP_PERIR, sizeof(perir_reg)); if (cmu_ptr) { vmm_printf("%s: CMU reg is at 0x%08x + 0x%08x\n", __func__, EXYNOS4_PA_CMU, EXYNOS4_CLKGATE_IP_PERIR); vmm_writel(0, wdt_ptr + S3C2410_WTCON); /* enable the WDT clock if it is not already enabled */ perir_reg = vmm_readl(cmu_ptr); vmm_printf("%s: CMU PERIR reg is 0x%08x\n", __func__, perir_reg); if (!(perir_reg & (1 << 14))) { perir_reg |= (1 << 14); vmm_printf ("%s: enabling WDT in PERIR: writing 0x%08x\n", __func__, perir_reg); vmm_writel(perir_reg, cmu_ptr); } vmm_writel(0x80, wdt_ptr + S3C2410_WTDAT); vmm_writel(0x80, wdt_ptr + S3C2410_WTCNT); vmm_writel(0x2025, wdt_ptr + S3C2410_WTCON); vmm_host_iounmap((virtual_addr_t) cmu_ptr, sizeof(perir_reg)); } vmm_host_iounmap((virtual_addr_t) wdt_ptr, 0x100); } #else void *pmu_ptr = (void *)vmm_host_iomap(EXYNOS4_PA_PMU + EXYNOS_SWRESET, sizeof(u32)); if (pmu_ptr) { /* Trigger a Software reset */ vmm_writel(0x1, pmu_ptr); vmm_host_iounmap((virtual_addr_t) pmu_ptr, sizeof(u32)); } #endif vmm_mdelay(500); vmm_printf("%s: failed\n", __func__); return VMM_EFAIL; }
BOOLEAN DeadloopHelper( const char* assert_condition, const char* func_name, const char* file_name, UINT32 line_num, UINT32 access_level) { (void)access_level; if (!assert_condition) { vmm_printf("Deadloop in %s() - %s:%d\n", func_name, file_name, line_num); } else { vmm_printf("VMM assert (%s) failed\n\t in %s() at %s:%d\n", assert_condition, func_name, file_name, line_num); } return TRUE; }
/* * Read a cpu's enable method from the device tree and * record it in smp_cpu_ops. */ static int __init smp_read_ops(struct vmm_devtree_node *dn, int cpu) { int rc; const char *enable_method; rc = vmm_devtree_read_string(dn, VMM_DEVTREE_ENABLE_METHOD_ATTR_NAME, &enable_method); if (rc) { /* * The boot CPU may not have an enable method (e.g. when * spin-table is used for secondaries). Don't warn spuriously. */ if (cpu != 0) { vmm_printf("%s: missing enable-method property\n", dn->name); } return rc; } smp_cpu_ops[cpu] = smp_get_ops(enable_method); if (!smp_cpu_ops[cpu]) { vmm_printf("%s: unsupported enable-method property: %s\n", dn->name, enable_method); return VMM_ENOTAVAIL; } return 0; }
static virtual_addr_t __init find_root_system_descriptor(void) { struct acpi_search_area *carea = &acpi_areas[0]; virtual_addr_t area_map; virtual_addr_t rsdp_base = 0; virtual_size_t sz = 0; while (carea->area_name) { vmm_printf("Search for RSDP in %s... ", carea->area_name); sz = carea->phys_end - carea->phys_start; area_map = vmm_host_memmap(carea->phys_start, sz, VMM_MEMORY_FLAGS_NORMAL_NOCACHE); BUG_ON((void *)area_map == NULL); if ((rsdp_base = locate_rsdp_in_area(area_map, sz)) != 0) { vmm_printf("found.\n"); break; } rsdp_base = 0; carea++; vmm_host_memunmap(area_map); vmm_printf("not found.\n"); } if (likely(rsdp_base)) vmm_printf("RSDP Base: 0x%x\n", rsdp_base); return rsdp_base; }
void do_bad_mode(arch_regs_t *regs, unsigned long mode) { u32 ec, il, iss; u64 esr, far, elr; struct vmm_vcpu *vcpu; esr = mrs(esr_el2); far = mrs(far_el2); elr = mrs(elr_el2); ec = (esr & ESR_EC_MASK) >> ESR_EC_SHIFT; il = (esr & ESR_IL_MASK) >> ESR_IL_SHIFT; iss = (esr & ESR_ISS_MASK) >> ESR_ISS_SHIFT; vcpu = vmm_scheduler_current_vcpu(); vmm_printf("%s: CPU%d VCPU=%s unexpected exception\n", __func__, vmm_smp_processor_id(), (vcpu) ? vcpu->name : "(NULL)"); vmm_printf("%s: ESR=0x%016lx EC=0x%x IL=0x%x ISS=0x%x\n", __func__, esr, ec, il, iss); vmm_printf("%s: ELR=0x%016lx FAR=0x%016lx HPFAR=0x%016lx\n", __func__, elr, far, mrs(hpfar_el2)); cpu_vcpu_dump_user_reg(regs); vmm_panic("%s: please reboot ...\n", __func__); }
static int _irqext_expand(void) { unsigned int old_size = iectrl.count; unsigned int new_size = iectrl.count + HOST_IRQEXT_CHUNK; struct vmm_host_irq **irqs = NULL; unsigned long *bitmap = NULL; irqs = realloc(iectrl.irqs, old_size * sizeof (struct vmm_host_irq *), new_size * sizeof (struct vmm_host_irq *)); if (!irqs) { vmm_printf("%s: Failed to reallocate extended IRQ array from " "%d to %d bytes\n", __func__, old_size, new_size); return VMM_ENOMEM; } old_size = BITMAP_SIZE(old_size); new_size = BITMAP_SIZE(new_size); bitmap = realloc(iectrl.bitmap, old_size, new_size); if (!bitmap) { vmm_printf("%s: Failed to reallocate extended IRQ bitmap from " "%d to %d bytes\n", __func__, old_size, new_size); vmm_free(irqs); return VMM_ENOMEM; } iectrl.irqs = irqs; iectrl.bitmap = bitmap; iectrl.count += HOST_IRQEXT_CHUNK; return VMM_OK; }
static int __init acpi_read_sdt_at(void *sdt_va, struct acpi_sdt_hdr * tb, size_t size, const char * name) { struct acpi_sdt_hdr hdr; /* if NULL is supplied, we only return the size of the table */ if (tb == NULL) { memcpy(&hdr, sdt_va, sizeof(struct acpi_sdt_hdr)); return hdr.len; } memcpy(tb, sdt_va, sizeof(struct acpi_sdt_hdr)); if (acpi_check_signature((const char *)tb->signature, (const char *)name)) { vmm_printf("ACPI ERROR: acpi %s signature does not match\n", name); return VMM_EFAIL; } if (size < tb->len) { vmm_printf("ACPI ERROR: acpi buffer too small for %s\n", name); return VMM_EFAIL; } memcpy(tb, sdt_va, size); if (acpi_check_csum(tb, tb->len)) { vmm_printf("ACPI ERROR: acpi %s checksum does not match\n", name); return VMM_EFAIL; } return tb->len; }
static int mmci_read_bytes(struct mmc_host *mmc, u32 *dest, u32 blkcount, u32 blksize) { u32 *tempbuff = dest; u64 xfercount = (u64)blkcount * blksize; struct mmci_host *host = mmc_priv(mmc); u32 status, status_err; debug("%s: read_bytes: blkcount=%u blksize=%u\n", __func__, blkcount, blksize); status = vmm_readl(&host->base->status); status_err = status & (SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT | SDI_STA_RXOVERR); while ((!status_err) && (xfercount >= sizeof(u32))) { if (status & SDI_STA_RXDAVL) { *(tempbuff) = vmm_readl(&host->base->fifo); tempbuff++; xfercount -= sizeof(u32); } status = vmm_readl(&host->base->status); status_err = status & (SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT | SDI_STA_RXOVERR); } status_err = status & (SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT | SDI_STA_DBCKEND | SDI_STA_RXOVERR); while (!status_err) { status = vmm_readl(&host->base->status); status_err = status & (SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT | SDI_STA_DBCKEND | SDI_STA_RXOVERR); } if (status & SDI_STA_DTIMEOUT) { vmm_printf("%s: Read data timed out, " "xfercount: %llu, status: 0x%08X\n", __func__, xfercount, status); return VMM_ETIMEDOUT; } else if (status & SDI_STA_DCRCFAIL) { vmm_printf("%s: Read data bytes CRC error: 0x%x\n", __func__, status); return VMM_EILSEQ; } else if (status & SDI_STA_RXOVERR) { vmm_printf("%s: Read data RX overflow error\n", __func__); return VMM_EIO; } vmm_writel(SDI_ICR_MASK, &host->base->status_clear); if (xfercount) { vmm_printf("%s: Read data error, xfercount: %llu\n", __func__, xfercount); return VMM_EIO; } return VMM_OK; }
int versatile_clcd_setup(struct clcd_fb *fb, unsigned long framesize) { int rc; u32 use_dma, val[2]; void *screen_base; unsigned long smem_len; physical_addr_t smem_pa; if (!fb->dev->node) { return VMM_EINVALID; } if (vmm_devtree_read_u32(fb->dev->node, "use_dma", &use_dma)) { use_dma = 0; } if (use_dma) { smem_len = framesize; screen_base = (void *)vmm_host_alloc_pages( VMM_SIZE_TO_PAGE(smem_len), VMM_MEMORY_READABLE | VMM_MEMORY_WRITEABLE); if (!screen_base) { vmm_printf("CLCD: unable to alloc framebuffer\n"); return VMM_ENOMEM; } rc = vmm_host_va2pa((virtual_addr_t)screen_base, &smem_pa); if (rc) { return rc; } } else { rc = vmm_devtree_read_u32_array(fb->dev->node, "framebuffer", val, 2); if (rc) { return rc; } smem_pa = val[0]; smem_len = val[1]; if (smem_len < framesize) { return VMM_ENOMEM; } screen_base = (void *)vmm_host_iomap(smem_pa, smem_len); if (!screen_base) { vmm_printf("CLCD: unable to map framebuffer\n"); return VMM_ENOMEM; } } fb->fb.screen_base = screen_base; fb->fb.fix.smem_start = smem_pa; fb->fb.fix.smem_len = smem_len; return 0; }
static int sdhci_transfer_data(struct sdhci_host *host, struct mmc_data *data, u32 start_addr) { u32 ctrl, stat, rdy, mask, timeout, block = 0; if (host->sdhci_caps & SDHCI_CAN_DO_SDMA) { ctrl = sdhci_readl(host, SDHCI_HOST_CONTROL); ctrl &= ~SDHCI_CTRL_DMA_MASK; ctrl |= SDHCI_CTRL_SDMA; sdhci_writel(host, ctrl, SDHCI_HOST_CONTROL); } timeout = 1000000; rdy = SDHCI_INT_SPACE_AVAIL | SDHCI_INT_DATA_AVAIL; mask = SDHCI_DATA_AVAILABLE | SDHCI_SPACE_AVAILABLE; do { stat = sdhci_readl(host, SDHCI_INT_STATUS); if (stat & SDHCI_INT_ERROR) { vmm_printf("%s: Error detected in status(0x%X)!\n", __func__, stat); return VMM_EFAIL; } if (stat & rdy) { if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)) { continue; } sdhci_writel(host, rdy, SDHCI_INT_STATUS); sdhci_transfer_pio(host, data); data->dest += data->blocksize; if (++block >= data->blocks) { break; } } if (host->sdhci_caps & SDHCI_CAN_DO_SDMA) { if (stat & SDHCI_INT_DMA_END) { sdhci_writel(host, SDHCI_INT_DMA_END, SDHCI_INT_STATUS); start_addr &= ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1); start_addr += SDHCI_DEFAULT_BOUNDARY_SIZE; sdhci_writel(host, start_addr, SDHCI_DMA_ADDRESS); } } if (timeout-- > 0) { vmm_udelay(10); } else { vmm_printf("%s: Transfer data timeout\n", __func__); return VMM_ETIMEDOUT; } } while (!(stat & SDHCI_INT_DATA_END)); return VMM_OK; }
static int cmd_profile_status(struct vmm_chardev * cdev, char *dummy) { if (vmm_profiler_isactive()) { vmm_printf("profile function is running\n"); } else { vmm_printf("profile function is not running\n"); } return VMM_OK; }
void push_packet_len(struct nic_priv_data *dp, int len) { vmm_printf("pushed len = %d\n", len); if (len >= 2000) { vmm_printf("NE2000: packet too big\n"); return; } dp83902a_recv(dp, len); /* FIXME: Just pass it to the upper layer*/ //NetReceive(&pbuf[0], len); }
int get_prom(struct nic_priv_data *dp, u8* mac_addr) { u8 prom[32]; int i, j; struct { unsigned char value, offset; } program_seq[] = { {E8390_NODMA+E8390_PAGE0+E8390_STOP, E8390_CMD}, /* Select page 0*/ {0x48, EN0_DCFG}, /* Set byte-wide (0x48) access. */ {0x00, EN0_RCNTLO}, /* Clear the count regs. */ {0x00, EN0_RCNTHI}, {0x00, EN0_IMR}, /* Mask completion irq. */ {0xFF, EN0_ISR}, {E8390_RXOFF, EN0_RXCR}, /* 0x20 Set to monitor */ {E8390_TXOFF, EN0_TXCR}, /* 0x02 and loopback mode. */ {32, EN0_RCNTLO}, {0x00, EN0_RCNTHI}, {0x00, EN0_RSARLO}, /* DMA starting at 0x0000. */ {0x00, EN0_RSARHI}, {E8390_RREAD+E8390_START, E8390_CMD}, }; pcnet_reset_8390(dp); for (i = 0; i < sizeof (program_seq) / sizeof (program_seq[0]); i++) n2k_outb (dp, program_seq[i].value, program_seq[i].offset); for (i = 0; i < 32; i++) { prom[i] = n2k_inb (dp, PCNET_DATAPORT); } for (i = 0;; i++) { if (hw_info[i].dev_name == NULL) break; if ((prom[0] == hw_info[i].a0) && (prom[2] == hw_info[i].a1) && (prom[4] == hw_info[i].a2)) { vmm_printf("%s detected.\n", hw_info[i].dev_name); break; } } if ((prom[28] == 0x57) && (prom[30] == 0x57)) { vmm_printf ("MAC address is "); for (j = 0; j < 6; j++) { if (j) vmm_printf(":"); mac_addr[j] = prom[j << 1]; vmm_printf ("%02x", mac_addr[j]); } vmm_printf ("\n"); } return VMM_OK; }
static int virtio_net_connect(struct virtio_device *dev, struct virtio_emulator *emu) { int i, rc; char *attr; struct virtio_net_dev *ndev; struct vmm_netswitch *nsw; ndev = vmm_zalloc(sizeof(struct virtio_net_dev)); if (!ndev) { vmm_printf("Failed to allocate virtio net device....\n"); return VMM_EFAIL; } ndev->vdev = dev; vmm_snprintf(ndev->name, VIRTIO_DEVICE_MAX_NAME_LEN, "%s", dev->name); ndev->port = vmm_netport_alloc(ndev->name, VMM_NETPORT_DEF_QUEUE_SIZE); ndev->port->mtu = VIRTIO_NET_MTU; ndev->port->link_changed = virtio_net_set_link; ndev->port->can_receive = virtio_net_can_receive; ndev->port->switch2port_xfer = virtio_net_switch2port_xfer; ndev->port->priv = ndev; rc = vmm_netport_register(ndev->port); if (rc) { vmm_netport_free(ndev->port); vmm_free(ndev); return rc; } attr = vmm_devtree_attrval(dev->edev->node, "switch"); if (attr) { nsw = vmm_netswitch_find((char *)attr); if (!nsw) { vmm_printf("%s: Cannot find netswitch \"%s\"\n", __func__, (char *)attr); } else { vmm_netswitch_port_add(nsw, ndev->port); } } for (i = 0; i < 6; i++) { ndev->config.mac[i] = vmm_netport_mac(ndev->port)[i]; } ndev->config.status = VIRTIO_NET_S_LINK_UP; dev->emu_data = ndev; return VMM_OK; }
/** * clk_register_fixed_rate - register fixed-rate clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ struct clk *clk_register_fixed_rate(struct vmm_device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate) { struct clk_fixed_rate *fixed; struct clk *clk; struct clk_init_data init; /* allocate fixed-rate clock */ fixed = vmm_zalloc(sizeof(struct clk_fixed_rate)); if (!fixed) { vmm_printf("%s: could not allocate fixed clk\n", __func__); return NULL; } init.name = name; init.ops = &clk_fixed_rate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); /* struct clk_fixed_rate assignments */ fixed->fixed_rate = fixed_rate; fixed->hw.init = &init; /* register the clock */ clk = clk_register(dev, &fixed->hw); if (!clk) vmm_free(fixed); return clk; }
/* * Detect which LCD panel is connected, and return the appropriate * clcd_panel structure. Note: we do not have any information on * the required timings for the 8.4in panel, so we presently assume * VGA timings. */ static int versatile_clcd_setup(struct clcd_fb *fb) { void *sys_clcd = (void *)versatile_sys_base + VERSATILE_SYS_CLCD_OFFSET; const char *panel_name; u32 val; val = vmm_readl(sys_clcd) & SYS_CLCD_ID_MASK; if (val == SYS_CLCD_ID_SANYO_3_8) panel_name = "Sanyo TM38QV67A02A"; else if (val == SYS_CLCD_ID_SANYO_2_5) { panel_name = "Sanyo QVGA Portrait"; } else if (val == SYS_CLCD_ID_EPSON_2_2) panel_name = "Epson L2F50113T00"; else if (val == SYS_CLCD_ID_VGA) panel_name = "VGA"; else { vmm_printf("CLCD: unknown LCD panel ID 0x%08x, " "using VGA\n", val); panel_name = "VGA"; } fb->panel = versatile_clcd_get_panel(panel_name); if (!fb->panel) return VMM_EINVALID; return versatile_clcd_setup_dma(fb, 1024 * 1024); }
int register_netdev(struct net_device *ndev) { int rc = VMM_OK; if (ndev == NULL) { return VMM_EFAIL; } if (ndev->netdev_ops && ndev->netdev_ops->ndo_init) { rc = ndev->netdev_ops->ndo_init(ndev); if (rc != VMM_OK) { vmm_printf("%s: Device %s Failed during initializaion" "with err %d!!!!\n", __func__ , ndev->name, rc); goto fail_ndev_reg; } } ndev->state &= ~NETDEV_UNINITIALIZED; ndev->state |= NETDEV_REGISTERED; rc = netdev_register_port(ndev); return rc; fail_ndev_reg: return rc; }
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmci_host *host = mmc_priv(mmc); u32 sdi_clkcr; sdi_clkcr = vmm_readl(&host->base->clock); /* Ramp up the clock rate */ if (ios->clock) { u32 clkdiv = 0; u32 tmp_clock; if (ios->clock >= mmc->f_max) { clkdiv = 0; ios->clock = mmc->f_max; } else { clkdiv = udiv32(host->clock_in, ios->clock) - 2; } tmp_clock = udiv32(host->clock_in, (clkdiv + 2)); while (tmp_clock > ios->clock) { clkdiv++; tmp_clock = udiv32(host->clock_in, (clkdiv + 2)); } if (clkdiv > SDI_CLKCR_CLKDIV_MASK) clkdiv = SDI_CLKCR_CLKDIV_MASK; tmp_clock = udiv32(host->clock_in, (clkdiv + 2)); ios->clock = tmp_clock; sdi_clkcr &= ~(SDI_CLKCR_CLKDIV_MASK); sdi_clkcr |= clkdiv; } /* Set the bus width */ if (ios->bus_width) { u32 buswidth = 0; switch (ios->bus_width) { case 1: buswidth |= SDI_CLKCR_WIDBUS_1; break; case 4: buswidth |= SDI_CLKCR_WIDBUS_4; break; case 8: buswidth |= SDI_CLKCR_WIDBUS_8; break; default: vmm_printf("%s: Invalid bus width: %d\n", __func__, ios->bus_width); break; } sdi_clkcr &= ~(SDI_CLKCR_WIDBUS_MASK); sdi_clkcr |= buswidth; } vmm_writel(sdi_clkcr, &host->base->clock); vmm_udelay(CLK_CHANGE_DELAY); }
static void mbuf_data_dump(char *buf, unsigned int buflen) { int index; vmm_printf("%02x:%02x:%02x:%02x:%02x:%02x ", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); vmm_printf("%02x:%02x:%02x:%02x:%02x:%02x ", buf[6], buf[7], buf[8], buf[9], buf[10], buf[11]); vmm_printf("%02x%02x\n", buf[12], buf[13]); for (index = 14; index < buflen; ++index) { vmm_printf("%02x", buf[index]); } vmm_printf("\n"); }
void do_soft_irq(arch_regs_t * uregs) { int rc = VMM_OK; struct vmm_vcpu * vcpu; if ((uregs->cpsr & CPSR_MODE_MASK) != CPSR_MODE_USER) { vmm_panic("%s: unexpected exception\n", __func__); } vmm_scheduler_irq_enter(uregs, TRUE); vcpu = vmm_scheduler_current_vcpu(); /* If vcpu priviledge is user then generate exception * and return without emulating instruction */ if ((arm_priv(vcpu)->cpsr & CPSR_MODE_MASK) == CPSR_MODE_USER) { vmm_vcpu_irq_assert(vcpu, CPU_SOFT_IRQ, 0x0); } else { if (uregs->cpsr & CPSR_THUMB_ENABLED) { rc = cpu_vcpu_hypercall_thumb(vcpu, uregs, *((u32 *)uregs->pc)); } else { rc = cpu_vcpu_hypercall_arm(vcpu, uregs, *((u32 *)uregs->pc)); } } if (rc) { vmm_printf("%s: error %d\n", __func__, rc); } vmm_scheduler_irq_exit(uregs); }
int virtio_pci_config_read(struct virtio_pci_dev *m, u32 offset, void *dst, u32 dst_len) { int rc = VMM_OK; switch (offset) { case VMM_VIRTIO_PCI_HOST_FEATURES: *(u32 *)dst = m->dev.emu->get_host_features(&m->dev); break; case VMM_VIRTIO_PCI_QUEUE_PFN: *(u32 *)dst = m->dev.emu->get_pfn_vq(&m->dev, m->config.queue_sel); break; case VMM_VIRTIO_PCI_QUEUE_NUM: *(u32 *)dst = m->dev.emu->get_size_vq(&m->dev, m->config.queue_sel); break; case VMM_VIRTIO_PCI_STATUS: *(u32 *)dst = m->config.status; break; case VMM_VIRTIO_PCI_ISR: /* reading from the ISR also clears it. */ *(u32 *)dst = m->config.interrupt_state; m->config.interrupt_state = 0; vmm_devemu_emulate_irq(m->guest, m->irq, 0); break; default: vmm_printf("%s: guest=%s offset=0x%x\n", __func__, m->guest->name, offset); rc = VMM_EINVALID; break; } return rc; }
static int imx_src_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *nodeid) { int ret = VMM_OK; struct vmm_devtree_node *np = dev->node; u32 val; ret = vmm_devtree_request_regmap(np, (virtual_addr_t *)&src_base, 0, "i.MX Reset Control"); if (VMM_OK != ret) { vmm_printf("Failed to retrive %s register mapping\n"); return ret; } imx_reset_controller.node = np; #ifdef CONFIG_RESET_CONTROLLER reset_controller_register(&imx_reset_controller); #endif /* CONFIG_RESET_CONTROLLER */ /* * force warm reset sources to generate cold reset * for a more reliable restart */ spin_lock(&scr_lock); val = readl_relaxed(src_base + SRC_SCR); val &= ~(1 << BP_SRC_SCR_WARM_RESET_ENABLE); writel_relaxed(val, src_base + SRC_SCR); spin_unlock(&scr_lock); return 0; }
int scsi_inquiry(struct scsi_request *srb, struct scsi_transport *tr, void *priv) { int retry, rc = VMM_OK; unsigned long datalen; if (!srb || !srb->data || (srb->datalen < 36) || !tr || !tr->transport) { return VMM_EINVALID; } datalen = srb->datalen; retry = 5; do { memset(&srb->cmd, 0, sizeof(srb->cmd)); srb->cmd[0] = SCSI_INQUIRY; srb->cmd[1] = srb->lun << 5; srb->cmd[4] = 36; srb->datalen = 36; srb->cmdlen = 12; rc = tr->transport(srb, tr, priv); DPRINTF("%s: inquiry returns %d\n", __func__, rc); if (rc == VMM_OK) break; } while (--retry); srb->datalen = datalen; if (!retry) { vmm_printf("%s: error in inquiry\n", __func__); return VMM_EFAIL; } return rc; }
/*---------------------------------------------------------------------------*/ void uip_neighbor_add(uip_ipaddr_t ipaddr, struct uip_neighbor_addr *addr) { int i, oldest; u8_t oldest_time; vmm_printf("Adding neighbor with link address %02x:%02x:%02x:%02x:%02x:%02x\n", addr->addr.addr[0], addr->addr.addr[1], addr->addr.addr[2], addr->addr.addr[3], addr->addr.addr[4], addr->addr.addr[5]); /* Find the first unused entry or the oldest used entry. */ oldest_time = 0; oldest = 0; for(i = 0; i < ENTRIES; ++i) { if(entries[i].time == MAX_TIME) { oldest = i; break; } if(uip_ipaddr_cmp(entries[i].ipaddr, addr)) { oldest = i; break; } if(entries[i].time > oldest_time) { oldest = i; oldest_time = entries[i].time; } } /* Use the oldest or first free entry (either pointed to by the "oldest" variable). */ entries[oldest].time = 0; uip_ipaddr_copy(entries[oldest].ipaddr, ipaddr); memcpy(&entries[oldest].addr, addr, sizeof(struct uip_neighbor_addr)); }
const char *realview_clcd_panel_name(void) { u32 val; const char *panel_name, *vga_panel_name; void *sys_clcd; if (!realview_sysreg_base) { return NULL; } sys_clcd = realview_sysreg_base + REALVIEW_SYS_CLCD_OFFSET; val = vmm_readl(sys_clcd) & REALVIEW_SYS_CLCD_ID_MASK; /* XVGA, 16bpp * (Assuming machine is always realview-pb-a8 and not realview-eb) */ vga_panel_name = "XVGA"; if (val == REALVIEW_SYS_CLCD_ID_SANYO_3_8) panel_name = "Sanyo TM38QV67A02A"; else if (val == REALVIEW_SYS_CLCD_ID_SANYO_2_5) panel_name = "Sanyo QVGA Portrait"; else if (val == REALVIEW_SYS_CLCD_ID_EPSON_2_2) panel_name = "Epson L2F50113T00"; else if (val == REALVIEW_SYS_CLCD_ID_VGA) panel_name = vga_panel_name; else { vmm_printf("CLCD: unknown LCD panel ID 0x%08x, using VGA\n", val); panel_name = vga_panel_name; } return panel_name; }
static int sdhci_set_clock(struct mmc_host *mmc, u32 clock) { struct sdhci_host *host = (struct sdhci_host *)mmc->priv; u32 div, clk, timeout; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) { return VMM_OK; } if ((host->sdhci_version & SDHCI_SPEC_VER_MASK) >= SDHCI_SPEC_300) { /* Version 3.00 divisors must be a multiple of 2. */ if (mmc->f_max <= clock) div = 1; else { for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { if (udiv32(mmc->f_max, div) <= clock) { break; } } } } else { /* Version 2.00 divisors must be a power of 2. */ for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) { if (udiv32(mmc->f_max, div) <= clock) { break; } } } div >>= 1; if (host->ops.set_clock) { host->ops.set_clock(host, div); } clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) << SDHCI_DIVIDER_HI_SHIFT; clk |= SDHCI_CLOCK_INT_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); /* Wait max 20 ms */ timeout = 20; while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) & SDHCI_CLOCK_INT_STABLE)) { if (timeout == 0) { vmm_printf("%s: Internal clock never stabilised.\n", __func__); return VMM_EFAIL; } timeout--; vmm_udelay(1000); } clk |= SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); return VMM_OK; }
static int __init vmm_blockdev_init(void) { int rc; struct vmm_class *c; vmm_printf("Initialize Block Device Framework\n"); c = vmm_malloc(sizeof(struct vmm_class)); if (!c) { rc = VMM_ENOMEM; goto fail; } INIT_LIST_HEAD(&c->head); if (strlcpy(c->name, VMM_BLOCKDEV_CLASS_NAME, sizeof(c->name)) >= sizeof(c->name)) { rc = VMM_EOVERFLOW; goto free_class; } INIT_LIST_HEAD(&c->classdev_list); rc = vmm_devdrv_register_class(c); if (rc) { goto free_class; } return VMM_OK; free_class: vmm_free(c); fail: return rc; }