struct vmm_devtree_node *vmm_devtree_irq_find_parent( struct vmm_devtree_node *child) { struct vmm_devtree_node *p; const u32 *parp; if (!child) { return NULL; } vmm_devtree_ref_node(child); do { parp = vmm_devtree_attrval(child, "interrupt-parent"); if (parp == NULL) { p = child->parent; vmm_devtree_ref_node(child->parent); } else { p = vmm_devtree_find_node_by_phandle( vmm_be32_to_cpu(*parp)); } vmm_devtree_dref_node(child); child = p; } while (p && vmm_devtree_attrval(p, "#interrupt-cells") == NULL); return p; }
int __init arch_defterm_init(void) { int rc; u32 *val; struct vmm_devtree_node *node; node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_HOSTINFO_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING "motherboard" VMM_DEVTREE_PATH_SEPARATOR_STRING "iofpga" VMM_DEVTREE_PATH_SEPARATOR_STRING "uart0"); if (!node) { return VMM_ENODEV; } rc = vmm_devtree_regmap(node, &v2m_defterm_base, 0); if (rc) { return rc; } val = vmm_devtree_attrval(node, VMM_DEVTREE_CLOCK_RATE_ATTR_NAME); v2m_defterm_inclk = (val) ? *val : 24000000; val = vmm_devtree_attrval(node, "baudrate"); v2m_defterm_baud = (val) ? *val : 115200; pl011_lowlevel_init(v2m_defterm_base, v2m_defterm_baud, v2m_defterm_inclk); return VMM_OK; }
int __init arch_defterm_init(void) { int rc; u32 *val; struct vmm_devtree_node *node; node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_HOSTINFO_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING "soc" VMM_DEVTREE_PATH_SEPARATOR_STRING "uart0"); if (!node) { return VMM_ENODEV; } rc = vmm_devtree_regmap(node, &sun4i_uart_port.base, 0); if (rc) { return rc; } val = vmm_devtree_attrval(node, VMM_DEVTREE_CLOCK_RATE_ATTR_NAME); sun4i_uart_port.input_clock = (val) ? *val : 24000000; val = vmm_devtree_attrval(node, "baudrate"); sun4i_uart_port.baudrate = (val) ? *val : 115200; val = vmm_devtree_attrval(node, "reg_align"); sun4i_uart_port.reg_align = (val) ? *val : 4; uart_8250_lowlevel_init(&sun4i_uart_port); return VMM_OK; }
int vmm_devtree_irq_get(struct vmm_devtree_node *node, u32 *irq, int index) { u32 alen; const char *aval; if (!node || !irq || index < 0) { return VMM_EFAIL; } aval = vmm_devtree_attrval(node, VMM_DEVTREE_INTERRUPTS_ATTR_NAME); if (!aval) { return VMM_ENOTAVAIL; } alen = vmm_devtree_attrlen(node, VMM_DEVTREE_INTERRUPTS_ATTR_NAME); if (alen <= (index * sizeof(u32))) { return VMM_ENOTAVAIL; } aval += index * sizeof(u32); *irq = vmm_be32_to_cpu(*((u32 *)aval)); return VMM_OK; }
static int __init daemon_mterm_init(void) { u8 mterm_priority; u32 mterm_time_slice; struct vmm_devtree_node * node; const char * attrval; /* Reset the control structure */ vmm_memset(&mtctrl, 0, sizeof(mtctrl)); /* Retrive mterm time slice */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_VMMINFO_NODE_NAME); if (!node) { return VMM_EFAIL; } attrval = vmm_devtree_attrval(node, "mterm_priority"); if (attrval) { mterm_priority = *((u32 *) attrval); } else { mterm_priority = VMM_THREAD_DEF_PRIORITY; } attrval = vmm_devtree_attrval(node, "mterm_time_slice"); if (attrval) { mterm_time_slice = *((u32 *) attrval); } else { mterm_time_slice = VMM_THREAD_DEF_TIME_SLICE; } /* Create mterm thread */ mtctrl.thread = vmm_threads_create("mterm", &mterm_main, NULL, mterm_priority, mterm_time_slice); if (!mtctrl.thread) { vmm_panic("Creation of system critical thread failed.\n"); } /* Start the mterm thread */ vmm_threads_start(mtctrl.thread); return VMM_OK; }
static int virtio_mmio_probe(struct vmm_guest *guest, struct vmm_emudev *edev, const struct vmm_devtree_nodeid *eid) { int rc = VMM_OK; const char *attr; struct virtio_mmio_dev *m; m = vmm_zalloc(sizeof(struct virtio_mmio_dev)); if (!m) { rc = VMM_EFAIL; goto virtio_mmio_probe_done; } m->guest = guest; vmm_snprintf(m->dev.name, VIRTIO_DEVICE_MAX_NAME_LEN, "%s/%s", guest->name, edev->node->name); m->dev.edev = edev; m->dev.tra = &mmio_tra; m->dev.tra_data = m; m->dev.guest = guest; m->config = (struct virtio_mmio_config) { .magic = {'v', 'i', 'r', 't'}, .version = 1, .vendor_id = 0x52535658, /* XVSR */ .queue_num_max = 256, }; attr = vmm_devtree_attrval(edev->node, "virtio_type"); if (attr) { m->config.device_id = *((u32 *)attr); } else { rc = VMM_EFAIL; goto virtio_mmio_probe_freestate_fail; } m->dev.id.type = m->config.device_id; rc = vmm_devtree_irq_get(edev->node, &m->irq, 0); if (rc) { goto virtio_mmio_probe_freestate_fail; } if ((rc = virtio_register_device(&m->dev))) { goto virtio_mmio_probe_freestate_fail; } edev->priv = m; goto virtio_mmio_probe_done; virtio_mmio_probe_freestate_fail: vmm_free(m); virtio_mmio_probe_done: return rc; }
int __init arch_defterm_init(void) { int rc; u32 *val; const char *attr; struct vmm_devtree_node *node; u32 imx_defterm_inclk; u32 imx_defterm_baud; node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_CHOSEN_NODE_NAME); if (!node) { return VMM_ENODEV; } attr = vmm_devtree_attrval(node, VMM_DEVTREE_CONSOLE_ATTR_NAME); if (!attr) { return VMM_ENODEV; } node = vmm_devtree_getnode(attr); if (!node) { return VMM_ENODEV; } rc = vmm_devtree_regmap(node, &imx_defterm_base, 0); if (rc) { return rc; } rc = vmm_devtree_clock_frequency(node, &imx_defterm_inclk); if (rc) { return rc; } val = vmm_devtree_attrval(node, "baudrate"); imx_defterm_baud = (val) ? *val : 115200; imx_lowlevel_init(imx_defterm_base, imx_defterm_baud, imx_defterm_inclk); return VMM_OK; }
static int virtio_net_connect(struct virtio_device *dev, struct virtio_emulator *emu) { int i, rc; char *attr; struct virtio_net_dev *ndev; struct vmm_netswitch *nsw; ndev = vmm_zalloc(sizeof(struct virtio_net_dev)); if (!ndev) { vmm_printf("Failed to allocate virtio net device....\n"); return VMM_EFAIL; } ndev->vdev = dev; vmm_snprintf(ndev->name, VIRTIO_DEVICE_MAX_NAME_LEN, "%s", dev->name); ndev->port = vmm_netport_alloc(ndev->name, VMM_NETPORT_DEF_QUEUE_SIZE); ndev->port->mtu = VIRTIO_NET_MTU; ndev->port->link_changed = virtio_net_set_link; ndev->port->can_receive = virtio_net_can_receive; ndev->port->switch2port_xfer = virtio_net_switch2port_xfer; ndev->port->priv = ndev; rc = vmm_netport_register(ndev->port); if (rc) { vmm_netport_free(ndev->port); vmm_free(ndev); return rc; } attr = vmm_devtree_attrval(dev->edev->node, "switch"); if (attr) { nsw = vmm_netswitch_find((char *)attr); if (!nsw) { vmm_printf("%s: Cannot find netswitch \"%s\"\n", __func__, (char *)attr); } else { vmm_netswitch_port_add(nsw, ndev->port); } } for (i = 0; i < 6; i++) { ndev->config.mac[i] = vmm_netport_mac(ndev->port)[i]; } ndev->config.status = VIRTIO_NET_S_LINK_UP; dev->emu_data = ndev; return VMM_OK; }
static int virtio_blk_connect(struct virtio_device *dev, struct virtio_emulator *emu) { int rc; char *attr; struct virtio_blk_dev *bdev; bdev = vmm_zalloc(sizeof(struct virtio_blk_dev)); if (!bdev) { vmm_printf("Failed to allocate virtio block device....\n"); return VMM_ENOMEM; } bdev->vdev = dev; bdev->blk_client.notifier_call = &virtio_blk_notification; bdev->blk_client.priority = 0; rc = vmm_blockdev_register_client(&bdev->blk_client); if (rc) { vmm_free(bdev); return rc; } INIT_SPIN_LOCK(&bdev->blk_lock); attr = vmm_devtree_attrval(dev->edev->node, "blkdev"); if (attr) { if (strlcpy(bdev->blk_name,attr, sizeof(bdev->blk_name)) >= sizeof(bdev->blk_name)) { vmm_free(bdev); return VMM_EOVERFLOW; } bdev->blk = vmm_blockdev_find(bdev->blk_name); } else { bdev->blk_name[0] = 0; bdev->blk = NULL; } bdev->config.capacity = (bdev->blk) ? bdev->blk->num_blocks : 0; bdev->config.seg_max = VIRTIO_BLK_DISK_SEG_MAX, bdev->config.blk_size = (bdev->blk) ? bdev->blk->block_size : VIRTIO_BLK_SECTOR_SIZE; dev->emu_data = bdev; return VMM_OK; }
static int devtree_node_is_compatible(const struct vmm_devtree_node *node, const char *compat) { const char *cp; int cplen, l; cp = vmm_devtree_attrval(node, VMM_DEVTREE_COMPATIBLE_ATTR_NAME); cplen = vmm_devtree_attrlen(node, VMM_DEVTREE_COMPATIBLE_ATTR_NAME); if (cp == NULL) return 0; while (cplen > 0) { if (strcmp(cp, compat) == 0) return 1; l = strlen(cp) + 1; cp += l; cplen -= l; } return 0; }
void cmd_host_info(struct vmm_chardev *cdev) { char *attr; struct vmm_devtree_node *node; u32 total = vmm_host_ram_total_frame_count(); attr = NULL; node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING); if (node) { attr = vmm_devtree_attrval(node, VMM_DEVTREE_MODEL_ATTR_NAME); } if (attr) { vmm_cprintf(cdev, "%-20s: %s\n", "Host Name", attr); } else { vmm_cprintf(cdev, "%-20s: %s\n", "Host Name", CONFIG_BOARD); } vmm_cprintf(cdev, "%-20s: %u\n", "Total Online CPUs", vmm_num_online_cpus()); vmm_cprintf(cdev, "%-20s: %u MB\n", "Total VAPOOL", CONFIG_VAPOOL_SIZE_MB); vmm_cprintf(cdev, "%-20s: %u MB\n", "Total RAM", ((total *VMM_PAGE_SIZE) >> 20)); arch_board_print_info(cdev); }
int __init arch_cpu_early_init(void) { char *attr; struct vmm_devtree_node *node; /* * Host virtual memory, device tree, heap is up. * Do necessary early stuff like iomapping devices * memory or boot time memory reservation here. */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_CHOSEN_NODE_NAME); if (!node) { return VMM_ENODEV; } attr = vmm_devtree_attrval(node, VMM_DEVTREE_BOOTARGS_ATTR_NAME); if (attr) { vmm_parse_early_options(attr); } return VMM_OK; }
int arch_vcpu_init(struct vmm_vcpu *vcpu) { u32 ite, cpuid; const char *attr; /* Initialize User Mode Registers */ /* For both Orphan & Normal VCPUs */ memset(arm_regs(vcpu), 0, sizeof(arch_regs_t)); arm_regs(vcpu)->pc = vcpu->start_pc; arm_regs(vcpu)->sp_excp = vcpu->stack_va + vcpu->stack_sz - 4; if (vcpu->is_normal) { arm_regs(vcpu)->cpsr = CPSR_ZERO_MASK; arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED; arm_regs(vcpu)->cpsr |= CPSR_MODE_USER; arm_regs(vcpu)->sp = 0; } else { arm_regs(vcpu)->cpsr = CPSR_ZERO_MASK; arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED; arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR; arm_regs(vcpu)->sp = arm_regs(vcpu)->sp_excp; } /* Initialize Supervisor Mode Registers */ /* For only Normal VCPUs */ if (!vcpu->is_normal) { return VMM_OK; } attr = vmm_devtree_attrval(vcpu->node, VMM_DEVTREE_COMPATIBLE_ATTR_NAME); if (!attr) { return VMM_EFAIL; } if (strcmp(attr, "armv5te,arm926ej") == 0) { cpuid = ARM_CPUID_ARM926; } else if (strcmp(attr, "armv6,arm11mp") == 0) { cpuid = ARM_CPUID_ARM11MPCORE; } else if (strcmp(attr, "armv7a,cortex-a8") == 0) { cpuid = ARM_CPUID_CORTEXA8; } else if (strcmp(attr, "armv7a,cortex-a9") == 0) { cpuid = ARM_CPUID_CORTEXA9; } else { return VMM_EFAIL; } if (!vcpu->reset_count) { vcpu->arch_priv = vmm_zalloc(sizeof(arm_priv_t)); arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | CPSR_IRQ_DISABLED | CPSR_FIQ_DISABLED | CPSR_MODE_SUPERVISOR; } else { for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) { arm_priv(vcpu)->gpr_usr[ite] = 0x0; arm_priv(vcpu)->gpr_fiq[ite] = 0x0; } arm_priv(vcpu)->sp_usr = 0x0; arm_priv(vcpu)->lr_usr = 0x0; arm_priv(vcpu)->sp_svc = 0x0; arm_priv(vcpu)->lr_svc = 0x0; arm_priv(vcpu)->spsr_svc = 0x0; arm_priv(vcpu)->sp_mon = 0x0; arm_priv(vcpu)->lr_mon = 0x0; arm_priv(vcpu)->spsr_mon = 0x0; arm_priv(vcpu)->sp_abt = 0x0; arm_priv(vcpu)->lr_abt = 0x0; arm_priv(vcpu)->spsr_abt = 0x0; arm_priv(vcpu)->sp_und = 0x0; arm_priv(vcpu)->lr_und = 0x0; arm_priv(vcpu)->spsr_und = 0x0; arm_priv(vcpu)->sp_irq = 0x0; arm_priv(vcpu)->lr_irq = 0x0; arm_priv(vcpu)->spsr_irq = 0x0; arm_priv(vcpu)->sp_fiq = 0x0; arm_priv(vcpu)->lr_fiq = 0x0; arm_priv(vcpu)->spsr_fiq = 0x0; cpu_vcpu_cpsr_update(vcpu, arm_regs(vcpu), (CPSR_ZERO_MASK | CPSR_ASYNC_ABORT_DISABLED | CPSR_IRQ_DISABLED | CPSR_FIQ_DISABLED | CPSR_MODE_SUPERVISOR), CPSR_ALLBITS_MASK); } if (!vcpu->reset_count) { arm_priv(vcpu)->features = 0; switch (cpuid) { case ARM_CPUID_ARM926: arm_set_feature(vcpu, ARM_FEATURE_V5); arm_set_feature(vcpu, ARM_FEATURE_VFP); arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS); arm_set_feature(vcpu, ARM_FEATURE_CACHE_TEST_CLEAN); break; case ARM_CPUID_ARM11MPCORE: arm_set_feature(vcpu, ARM_FEATURE_V6); arm_set_feature(vcpu, ARM_FEATURE_V6K); arm_set_feature(vcpu, ARM_FEATURE_VFP); arm_set_feature(vcpu, ARM_FEATURE_VAPA); arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS); break; case ARM_CPUID_CORTEXA8: arm_set_feature(vcpu, ARM_FEATURE_V7); arm_set_feature(vcpu, ARM_FEATURE_VFP3); arm_set_feature(vcpu, ARM_FEATURE_NEON); arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE); arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS); arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE); break; case ARM_CPUID_CORTEXA9: arm_set_feature(vcpu, ARM_FEATURE_V7); arm_set_feature(vcpu, ARM_FEATURE_VFP3); arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16); arm_set_feature(vcpu, ARM_FEATURE_NEON); arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE); arm_set_feature(vcpu, ARM_FEATURE_V7MP); arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE); break; default: break; }; /* Some features automatically imply others: */ if (arm_feature(vcpu, ARM_FEATURE_V7)) { arm_set_feature(vcpu, ARM_FEATURE_VAPA); arm_set_feature(vcpu, ARM_FEATURE_THUMB2); arm_set_feature(vcpu, ARM_FEATURE_MPIDR); if (!arm_feature(vcpu, ARM_FEATURE_M)) { arm_set_feature(vcpu, ARM_FEATURE_V6K); } else { arm_set_feature(vcpu, ARM_FEATURE_V6); } } if (arm_feature(vcpu, ARM_FEATURE_V6K)) { arm_set_feature(vcpu, ARM_FEATURE_V6); arm_set_feature(vcpu, ARM_FEATURE_MVFR); } if (arm_feature(vcpu, ARM_FEATURE_V6)) { arm_set_feature(vcpu, ARM_FEATURE_V5); if (!arm_feature(vcpu, ARM_FEATURE_M)) { arm_set_feature(vcpu, ARM_FEATURE_AUXCR); } } if (arm_feature(vcpu, ARM_FEATURE_V5)) { arm_set_feature(vcpu, ARM_FEATURE_V4T); } if (arm_feature(vcpu, ARM_FEATURE_M)) { arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV); } if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) { arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV); } if (arm_feature(vcpu, ARM_FEATURE_VFP4)) { arm_set_feature(vcpu, ARM_FEATURE_VFP3); } if (arm_feature(vcpu, ARM_FEATURE_VFP3)) { arm_set_feature(vcpu, ARM_FEATURE_VFP); } if (arm_feature(vcpu, ARM_FEATURE_LPAE)) { arm_set_feature(vcpu, ARM_FEATURE_PXN); } } return cpu_vcpu_cp15_init(vcpu, cpuid); }
static void system_init_work(struct vmm_work *work) { #define BOOTCMD_WIDTH 256 int ret; char bcmd[BOOTCMD_WIDTH]; const char *str; u32 c, freed; struct vmm_chardev *cdev; #if defined(CONFIG_RTC) struct vmm_rtcdev *rdev; #endif struct vmm_devtree_node *node, *node1; /* Initialize command manager */ vmm_printf("Initialize Command Manager\n"); ret = vmm_cmdmgr_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Initialize device driver framework */ vmm_printf("Initialize Device Driver Framework\n"); ret = vmm_devdrv_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Initialize device emulation framework */ vmm_printf("Initialize Device Emulation Framework\n"); ret = vmm_devemu_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Initialize character device framework */ vmm_printf("Initialize Character Device Framework\n"); ret = vmm_chardev_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Initialize virtual serial port framework */ vmm_printf("Initialize Virtual Serial Port Framework\n"); ret = vmm_vserial_init(); if (ret) { vmm_panic("Error %d\n", ret); } #if defined(CONFIG_SMP) /* Poll for all present CPUs to become online */ /* Note: There is a timeout of 1 second */ /* Note: The modules might use SMP IPIs or might have per-cpu context * so, we do this before vmm_modules_init() in-order to make sure that * correct number of online CPUs are visible to all modules. */ ret = 1000; while(ret--) { int all_cpu_online = 1; for_each_present_cpu(c) { if (!vmm_cpu_online(c)) { all_cpu_online = 0; } } if (all_cpu_online) { break; } vmm_mdelay(1); } #endif /* Initialize hypervisor modules */ vmm_printf("Initialize Hypervisor Modules\n"); ret = vmm_modules_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Initialize cpu final */ vmm_printf("Initialize CPU Final\n"); ret = arch_cpu_final_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Intialize board final */ vmm_printf("Initialize Board Final\n"); ret = arch_board_final_init(); if (ret) { vmm_panic("Error %d\n", ret); } /* Print status of present host CPUs */ for_each_present_cpu(c) { if (vmm_cpu_online(c)) { vmm_printf("CPU%d: Online\n", c); } else { vmm_printf("CPU%d: Possible\n", c); } } vmm_printf("Brought Up %d CPUs\n", vmm_num_online_cpus()); /* Free init memory */ vmm_printf("Freeing init memory: "); freed = vmm_host_free_initmem(); vmm_printf("%dK\n", freed); /* Process attributes in chosen node */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_CHOSEN_NODE_NAME); if (node) { /* Find character device based on console attribute */ str = vmm_devtree_attrval(node, VMM_DEVTREE_CONSOLE_ATTR_NAME); if (!(cdev = vmm_chardev_find(str))) { if ((node1 = vmm_devtree_getnode(str))) { cdev = vmm_chardev_find(node1->name); } } /* Set chosen console device as stdio device */ if (cdev) { vmm_printf("Change stdio device to %s\n", cdev->name); vmm_stdio_change_device(cdev); } #if defined(CONFIG_RTC) /* Find rtc device based on rtcdev attribute */ str = vmm_devtree_attrval(node, VMM_DEVTREE_RTCDEV_ATTR_NAME); if (!(rdev = vmm_rtcdev_find(str))) { if ((node1 = vmm_devtree_getnode(str))) { rdev = vmm_rtcdev_find(node1->name); } } /* Syncup wallclock time with chosen rtc device */ if (rdev) { ret = vmm_rtcdev_sync_wallclock(rdev); vmm_printf("Syncup wallclock using %s", rdev->name); if (ret) { vmm_printf("(error %d)", ret); } vmm_printf("\n"); } #endif /* Execute boot commands */ str = vmm_devtree_attrval(node, VMM_DEVTREE_BOOTCMD_ATTR_NAME); if (str) { c = vmm_devtree_attrlen(node, VMM_DEVTREE_BOOTCMD_ATTR_NAME); while (c) { #if defined(CONFIG_VERBOSE_MODE) /* Print boot command */ vmm_printf("bootcmd: %s\n", str); #endif /* Execute boot command */ strlcpy(bcmd, str, sizeof(bcmd)); cdev = vmm_stdio_device(); vmm_cmdmgr_execute_cmdstr(cdev, bcmd, NULL); /* Next boot command */ c -= strlen(str) + 1; str += strlen(str) + 1; } } } }
static int uart_driver_probe(struct vmm_device *dev,const struct vmm_devid *devid) { int rc; const char *attr; struct vmm_chardev *cd; struct uart_port *port; cd = vmm_malloc(sizeof(struct vmm_chardev)); if(!cd) { rc = VMM_EFAIL; goto free_nothing; } vmm_memset(cd, 0, sizeof(struct vmm_chardev)); port = vmm_malloc(sizeof(struct uart_port)); if(!port) { rc = VMM_EFAIL; goto free_chardev; } vmm_memset(port, 0, sizeof(struct uart_port)); vmm_strcpy(cd->name, dev->node->name); cd->dev = dev; cd->ioctl = NULL; cd->read = uart_read; cd->write = uart_write; cd->priv = port; rc = vmm_devdrv_ioremap(dev, &port->base, 0); if(rc) { goto free_port; } attr = vmm_devtree_attrval(dev->node, "reg_align"); if (attr) { port->reg_align = *((u32 *)attr); } else { port->reg_align = 1; } attr = vmm_devtree_attrval(dev->node, "reg_offset"); if (attr) { port->base += *((u32 *)attr); } attr = vmm_devtree_attrval(dev->node, "baudrate"); if(!attr) { rc = VMM_EFAIL; goto free_port; } port->baudrate = *((u32 *)attr); port->input_clock = vmm_devdrv_clock_rate(dev); /* Call low-level init function */ uart_lowlevel_init(port->base, port->reg_align, port->baudrate, port->input_clock); rc = vmm_chardev_register(cd); if(rc) { goto free_port; } return VMM_OK; free_port: vmm_free(port); free_chardev: vmm_free(cd); free_nothing: return rc; }
static int imx_driver_probe(struct vmm_device *dev, const struct vmm_devtree_nodeid *devid) { int rc = VMM_EFAIL; const char *attr = NULL; struct imx_port *port = NULL; port = vmm_zalloc(sizeof(struct imx_port)); if (!port) { rc = VMM_ENOMEM; goto free_nothing; } if (strlcpy(port->cd.name, dev->node->name, sizeof(port->cd.name)) >= sizeof(port->cd.name)) { rc = VMM_EOVERFLOW; goto free_port; } port->cd.dev = dev; port->cd.ioctl = NULL; port->cd.read = imx_read; port->cd.write = imx_write; port->cd.priv = port; INIT_COMPLETION(&port->read_possible); #if defined(UART_IMX_USE_TXINTR) INIT_COMPLETION(&port->write_possible); #endif rc = vmm_devtree_regmap(dev->node, &port->base, 0); if (rc) { goto free_port; } port->mask = UCR1_RRDYEN | UCR1_RTSDEN; #if defined(UART_IMX_USE_TXINTR) port->mask |= UCR1_TRDYEN; #endif vmm_writel(port->mask, (void *)port->base + UCR1); attr = vmm_devtree_attrval(dev->node, "baudrate"); if (!attr) { rc = VMM_EFAIL; goto free_reg; } port->baudrate = *((u32 *) attr); rc = vmm_devtree_clock_frequency(dev->node, &port->input_clock); if (!attr) { rc = VMM_EFAIL; goto free_reg; } rc = vmm_devtree_irq_get(dev->node, &port->irq, 0); if (rc) { rc = VMM_EFAIL; goto free_reg; } if ((rc = vmm_host_irq_register(port->irq, dev->node->name, imx_irq_handler, port))) { goto free_reg; } /* Call low-level init function */ imx_lowlevel_init(port->base, port->baudrate, port->input_clock); port->mask = vmm_readl((void *)port->base + UCR1); rc = vmm_chardev_register(&port->cd); if (rc) { goto free_irq; } dev->priv = port; return rc; free_irq: vmm_host_irq_unregister(port->irq, port); free_reg: vmm_devtree_regunmap(dev->node, port->base, 0); free_port: vmm_free(port); free_nothing: return rc; }
int __cpuinit epit_clockchip_init(void) { int rc = VMM_ENODEV; u32 clock, hirq, timer_num, *val; struct vmm_devtree_node *node; struct epit_clockchip *ecc; /* find the first epit compatible node */ node = vmm_devtree_find_compatible(NULL, NULL, "freescale,epit-timer"); if (!node) { goto fail; } /* Read clock frequency */ rc = vmm_devtree_clock_frequency(node, &clock); if (rc) { goto fail; } /* Read timer_num attribute */ val = vmm_devtree_attrval(node, "timer_num"); if (!val) { rc = VMM_ENOTAVAIL; goto fail; } timer_num = *val; /* Read irq attribute */ rc = vmm_devtree_irq_get(node, &hirq, 0); if (rc) { goto fail; } /* allocate our struct */ ecc = vmm_zalloc(sizeof(struct epit_clockchip)); if (!ecc) { rc = VMM_ENOMEM; goto fail; } /* Map timer registers */ rc = vmm_devtree_regmap(node, &ecc->base, 0); if (rc) { goto regmap_fail; } ecc->match_mask = 1 << timer_num; ecc->timer_num = timer_num; /* Setup clockchip */ ecc->clkchip.name = node->name; ecc->clkchip.hirq = hirq; ecc->clkchip.rating = 300; ecc->clkchip.cpumask = vmm_cpumask_of(0); ecc->clkchip.features = VMM_CLOCKCHIP_FEAT_ONESHOT; vmm_clocks_calc_mult_shift(&ecc->clkchip.mult, &ecc->clkchip.shift, VMM_NSEC_PER_SEC, clock, 10); ecc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(MIN_REG_COMPARE, &ecc->clkchip); ecc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(MAX_REG_COMPARE, &ecc->clkchip); ecc->clkchip.set_mode = epit_set_mode; ecc->clkchip.set_next_event = epit_set_next_event; ecc->clkchip.priv = ecc; /* * Initialise to a known state (all timers off, and timing reset) */ vmm_writel(0x0, (void *)(ecc->base + EPITCR)); /* * Initialize the load register to the max value to decrement. */ vmm_writel(0xffffffff, (void *)(ecc->base + EPITLR)); /* * enable the timer, set it to the high reference clock, * allow the timer to work in WAIT mode. */ vmm_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN, (void *)(ecc->base + EPITCR)); /* Register interrupt handler */ rc = vmm_host_irq_register(hirq, ecc->clkchip.name, &epit_timer_interrupt, ecc); if (rc) { goto irq_fail; } /* Register clockchip */ rc = vmm_clockchip_register(&ecc->clkchip); if (rc) { goto register_fail; } return VMM_OK; register_fail: vmm_host_irq_unregister(hirq, ecc); irq_fail: vmm_devtree_regunmap(node, ecc->base, 0); regmap_fail: vmm_free(ecc); fail: return rc; }
int arch_vcpu_regs_init(struct vmm_vcpu * vcpu) { u32 ite, cpuid; const char * attr; /* Initialize User Mode Registers */ /* For both Orphan & Normal VCPUs */ vmm_memset(arm_regs(vcpu), 0, sizeof(arch_regs_t)); arm_regs(vcpu)->pc = vcpu->start_pc; if (vcpu->is_normal) { arm_regs(vcpu)->cpsr = CPSR_ZERO_MASK; arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED; arm_regs(vcpu)->cpsr |= CPSR_MODE_USER; } else { arm_regs(vcpu)->cpsr = CPSR_ZERO_MASK; arm_regs(vcpu)->cpsr |= CPSR_ASYNC_ABORT_DISABLED; arm_regs(vcpu)->cpsr |= CPSR_MODE_SUPERVISOR; arm_regs(vcpu)->sp = vcpu->start_sp; } /* Initialize Supervisor Mode Registers */ /* For only Normal VCPUs */ if (!vcpu->is_normal) { return VMM_OK; } attr = vmm_devtree_attrval(vcpu->node, VMM_DEVTREE_COMPATIBLE_ATTR_NAME); if (vmm_strcmp(attr, "ARMv7a,cortex-a8") == 0) { cpuid = ARM_CPUID_CORTEXA8; } else if (vmm_strcmp(attr, "ARMv5te,ARM926ej") == 0) { cpuid = ARM_CPUID_ARM926; } else { return VMM_EFAIL; } if (!vcpu->reset_count) { vcpu->arch_priv = vmm_malloc(sizeof(arm_priv_t)); vmm_memset(arm_priv(vcpu), 0, sizeof(arm_priv_t)); arm_priv(vcpu)->cpsr = CPSR_ASYNC_ABORT_DISABLED | CPSR_IRQ_DISABLED | CPSR_FIQ_DISABLED | CPSR_MODE_SUPERVISOR; } else { for (ite = 0; ite < CPU_FIQ_GPR_COUNT; ite++) { arm_priv(vcpu)->gpr_usr[ite] = 0x0; arm_priv(vcpu)->gpr_fiq[ite] = 0x0; } arm_priv(vcpu)->sp_usr = 0x0; arm_priv(vcpu)->lr_usr = 0x0; arm_priv(vcpu)->sp_svc = 0x0; arm_priv(vcpu)->lr_svc = 0x0; arm_priv(vcpu)->spsr_svc = 0x0; arm_priv(vcpu)->sp_mon = 0x0; arm_priv(vcpu)->lr_mon = 0x0; arm_priv(vcpu)->spsr_mon = 0x0; arm_priv(vcpu)->sp_abt = 0x0; arm_priv(vcpu)->lr_abt = 0x0; arm_priv(vcpu)->spsr_abt = 0x0; arm_priv(vcpu)->sp_und = 0x0; arm_priv(vcpu)->lr_und = 0x0; arm_priv(vcpu)->spsr_und = 0x0; arm_priv(vcpu)->sp_irq = 0x0; arm_priv(vcpu)->lr_irq = 0x0; arm_priv(vcpu)->spsr_irq = 0x0; arm_priv(vcpu)->sp_fiq = 0x0; arm_priv(vcpu)->lr_fiq = 0x0; arm_priv(vcpu)->spsr_fiq = 0x0; cpu_vcpu_cpsr_update(vcpu, arm_regs(vcpu), (CPSR_ZERO_MASK | CPSR_ASYNC_ABORT_DISABLED | CPSR_IRQ_DISABLED | CPSR_FIQ_DISABLED | CPSR_MODE_SUPERVISOR), CPSR_ALLBITS_MASK); } if (!vcpu->reset_count) { arm_priv(vcpu)->features = 0; switch (cpuid) { case ARM_CPUID_ARM926: arm_set_feature(vcpu, ARM_FEATURE_V4T); arm_set_feature(vcpu, ARM_FEATURE_V5); arm_set_feature(vcpu, ARM_FEATURE_VFP); break; case ARM_CPUID_CORTEXA8: arm_set_feature(vcpu, ARM_FEATURE_V4T); arm_set_feature(vcpu, ARM_FEATURE_V5); arm_set_feature(vcpu, ARM_FEATURE_V6); arm_set_feature(vcpu, ARM_FEATURE_V6K); arm_set_feature(vcpu, ARM_FEATURE_V7); arm_set_feature(vcpu, ARM_FEATURE_AUXCR); arm_set_feature(vcpu, ARM_FEATURE_THUMB2); arm_set_feature(vcpu, ARM_FEATURE_VFP); arm_set_feature(vcpu, ARM_FEATURE_VFP3); arm_set_feature(vcpu, ARM_FEATURE_NEON); arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE); break; case ARM_CPUID_CORTEXA9: arm_set_feature(vcpu, ARM_FEATURE_V4T); arm_set_feature(vcpu, ARM_FEATURE_V5); arm_set_feature(vcpu, ARM_FEATURE_V6); arm_set_feature(vcpu, ARM_FEATURE_V6K); arm_set_feature(vcpu, ARM_FEATURE_V7); arm_set_feature(vcpu, ARM_FEATURE_AUXCR); arm_set_feature(vcpu, ARM_FEATURE_THUMB2); arm_set_feature(vcpu, ARM_FEATURE_VFP); arm_set_feature(vcpu, ARM_FEATURE_VFP3); arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16); arm_set_feature(vcpu, ARM_FEATURE_NEON); arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE); arm_set_feature(vcpu, ARM_FEATURE_V7MP); break; default: break; }; } #ifdef CONFIG_ARM32_FUNCSTATS for (ite=0; ite < ARM_FUNCSTAT_MAX; ite++) { arm_priv(vcpu)->funcstat[ite].function_name = NULL; arm_priv(vcpu)->funcstat[ite].entry_count = 0; arm_priv(vcpu)->funcstat[ite].exit_count = 0; arm_priv(vcpu)->funcstat[ite].time = 0; } #endif return cpu_vcpu_cp15_init(vcpu, cpuid); }
static int __init lwip_netstack_init(void) { int rc; struct vmm_netswitch *nsw; struct vmm_devtree_node *node; const char *attrval; u8 ip[] = {192, 168, 0, 1}; u8 mask[] = {255, 255, 255, 0}; /* Clear lwIP state */ memset(&lns, 0, sizeof(lns)); /* Get netstack device tree node if available */ node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_VMMINFO_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_VMMNET_NODE_NAME VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_NETSTACK_NODE_NAME); /* Retrive preferred IP address */ attrval = vmm_devtree_attrval(node, "ipaddr"); if (attrval) { /* Read ip address from netstack node */ str2ipaddr(ip, attrval); } /* Retrive preferred IP address */ attrval = vmm_devtree_attrval(node, "netmask"); if (attrval) { /* Read network mask from netstack node */ str2ipaddr(mask, attrval); } /* Retrive preferred netswitch */ attrval = vmm_devtree_attrval(node, "netswitch"); if (attrval) { /* Find netswitch with given name */ nsw = vmm_netswitch_find(attrval); } else { /* Get the first netswitch */ nsw = vmm_netswitch_get(0); } if (!nsw) { vmm_panic("No netswitch found\n"); } /* Allocate a netport */ lns.port = vmm_netport_alloc("lwip-netport", VMM_NETPORT_DEF_QUEUE_SIZE); if (!lns.port) { vmm_printf("lwIP netport_alloc() failed\n"); rc = VMM_EFAIL; goto fail; } /* Setup a netport */ lns.port->mtu = 1500; lns.port->link_changed = lwip_set_link; lns.port->can_receive = lwip_can_receive; lns.port->switch2port_xfer = lwip_switch2port_xfer; lns.port->priv = &lns; /* Register a netport */ rc = vmm_netport_register(lns.port); if (rc) { goto fail1; } /* Initialize lwIP + TCP/IP APIs */ tcpip_init(NULL, NULL); /* Add netif */ IP4_ADDR(&lns.ipaddr, ip[0],ip[1],ip[2],ip[3]); IP4_ADDR(&lns.netmask, mask[0],mask[1],mask[2],mask[3]); IP4_ADDR(&lns.gw, ip[0],ip[1],ip[2],ip[3]); netif_add(&lns.nif, &lns.ipaddr, &lns.netmask, &lns.gw, &lns, lwip_netstack_netif_init, ethernet_input); /* Set default netif */ netif_set_default(&lns.nif); /* Attach netport with netswitch * Note: This will cause netport link_change() */ rc = vmm_netswitch_port_add(nsw, lns.port); if (rc) { goto fail2; } #if !defined(PING_USE_SOCKETS) /* Initalize RAW PCB for ping */ ping_raw_init(); #endif return VMM_OK; fail2: vmm_netport_unregister(lns.port); fail1: vmm_netport_free(lns.port); fail: return rc; }