static int cmd_host_cpu_info(struct vmm_chardev *cdev) { int rc; u32 c, khz; unsigned long hwid; char name[25]; vmm_cprintf(cdev, "%-25s: %s\n", "CPU Type", CONFIG_CPU); vmm_cprintf(cdev, "%-25s: %d\n", "CPU Present Count", vmm_num_present_cpus()); vmm_cprintf(cdev, "%-25s: %d\n", "CPU Possible Count", vmm_num_possible_cpus()); vmm_cprintf(cdev, "%-25s: %u\n", "CPU Online Count", vmm_num_online_cpus()); vmm_cprintf(cdev, "\n"); for_each_online_cpu(c) { rc = vmm_smp_map_hwid(c, &hwid); if (rc) return rc; vmm_sprintf(name, "CPU%d Hardware ID", c); vmm_cprintf(cdev, "%-25s: 0x%lx\n", name, hwid); vmm_sprintf(name, "CPU%d Estimated Speed", c); khz = vmm_delay_estimate_cpu_khz(c); vmm_cprintf(cdev, "%-25s: %d.%03d MHz\n", name, udiv32(khz, 1000), umod32(khz, 1000)); } vmm_cprintf(cdev, "\n"); arch_cpu_print_info(cdev); return VMM_OK; }
static int __init process_acpi_sdt_table(char *tab_sign, u32 *tab_data) { struct vmm_devtree_node *node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING VMM_DEVTREE_MOTHERBOARD_NODE_NAME); /* FIXME: First find if tab_size already exists. */ struct vmm_devtree_node *cnode = vmm_devtree_addnode(node, tab_sign); vmm_devtree_dref_node(node); if (!strncmp(tab_sign, APIC_SIGNATURE, strlen(APIC_SIGNATURE))) { struct acpi_madt_hdr *madt_hdr; madt_hdr = (struct acpi_madt_hdr *)tab_data; if (acpi_populate_ioapic_devtree(madt_hdr, cnode) != VMM_OK) return VMM_EFAIL; if (acpi_populate_lapic_devtree(madt_hdr, cnode) != VMM_OK) return VMM_EFAIL; } else if (!strncmp(tab_sign, HPET_SIGNATURE, strlen(HPET_SIGNATURE))) { struct acpi_hpet hpet_chip, *hpet; int nr_hpet_blks, i; char hpet_nm[256]; if (acpi_read_sdt_at(tab_data, (struct acpi_sdt_hdr *)&hpet_chip, sizeof(struct acpi_hpet), HPET_SIGNATURE) < 0) { return VMM_EFAIL; } hpet = (struct acpi_hpet *)tab_data; nr_hpet_blks = (hpet->hdr.len - sizeof(struct acpi_sdt_hdr)) /sizeof(struct acpi_timer_blocks); vmm_devtree_setattr(cnode, VMM_DEVTREE_NR_HPET_ATTR_NAME, &nr_hpet_blks, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(nr_hpet_blks), FALSE); for (i = 0; i < nr_hpet_blks; i++) { memset(hpet_nm, 0, sizeof(hpet_nm)); vmm_sprintf(hpet_nm, VMM_DEVTREE_HPET_NODE_FMT, i); struct vmm_devtree_node *nnode = vmm_devtree_addnode(cnode, hpet_nm); BUG_ON(nnode == NULL); if (vmm_devtree_setattr(nnode, VMM_DEVTREE_HPET_ID_ATTR_NAME, &hpet->tmr_blks[i].asid, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(hpet->tmr_blks[i].asid), FALSE) != VMM_OK) { return VMM_EFAIL; } if (vmm_devtree_setattr(nnode, VMM_DEVTREE_HPET_PADDR_ATTR_NAME, &hpet->tmr_blks[i].base, VMM_DEVTREE_ATTRTYPE_PHYSADDR, sizeof(physical_addr_t), FALSE) != VMM_OK) { return VMM_EFAIL; } } } return VMM_OK; }
int __cpuinit twd_clockchip_init(virtual_addr_t base, virtual_addr_t ref_counter_addr, u32 ref_counter_freq, u32 ppi_hirq) { int rc; u32 cpu = vmm_smp_processor_id(); struct twd_clockchip *cc = &this_cpu(twd_cc); memset(cc, 0, sizeof(struct twd_clockchip)); twd_caliberate_freq(base, ref_counter_addr, ref_counter_freq); vmm_sprintf(cc->name, "twd/%d", cpu); cc->base = base; cc->clkchip.name = cc->name; cc->clkchip.hirq = ppi_hirq; cc->clkchip.rating = 350; cc->clkchip.cpumask = vmm_cpumask_of(cpu); cc->clkchip.features = VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT; cc->clkchip.shift = 20; cc->clkchip.mult = vmm_clockchip_hz2mult(twd_freq_hz, cc->clkchip.shift); cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip); cc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip); cc->clkchip.set_mode = &twd_clockchip_set_mode; cc->clkchip.set_next_event = &twd_clockchip_set_next_event; cc->clkchip.expire = &twd_clockchip_expire; cc->clkchip.priv = cc; if (!cpu) { /* Register interrupt handler */ if ((rc = vmm_host_irq_register(ppi_hirq, "twd", &twd_clockchip_irq_handler, cc))) { return rc; } /* Mark interrupt as per-cpu */ if ((rc = vmm_host_irq_mark_per_cpu(ppi_hirq))) { return rc; } } /* Explicitly enable local timer PPI in GIC * Note: Local timer requires PPI support hence requires GIC */ gic_enable_ppi(ppi_hirq); return vmm_clockchip_register(&cc->clkchip); }
/* Look up a kernel symbol and return it in a text buffer. */ static __notrace int __sprint_symbol(char *buffer, unsigned long address, int symbol_offset) { const char *name; unsigned long offset, size; int len; address += symbol_offset; name = kallsyms_lookup(address, &size, &offset, buffer); if (!name) return vmm_sprintf(buffer, "0x%lx", address); if (name != buffer) strcpy(buffer, name); len = strlen(buffer); buffer += len; offset -= symbol_offset; len += vmm_sprintf(buffer, "+%#lx/%#lx", offset, size); return len; }
static void cmd_host_cpu_info(struct vmm_chardev *cdev) { u32 c, khz; char name[25]; vmm_cprintf(cdev, "%-25s: %s\n", "CPU Type", CONFIG_CPU); vmm_cprintf(cdev, "%-25s: %d\n", "CPU Present Count", vmm_num_present_cpus()); vmm_cprintf(cdev, "%-25s: %d\n", "CPU Possible Count", vmm_num_possible_cpus()); vmm_cprintf(cdev, "%-25s: %u\n", "CPU Online Count", vmm_num_online_cpus()); for_each_online_cpu(c) { khz = vmm_delay_estimate_cpu_khz(c); vmm_sprintf(name, "CPU%d Speed", c); vmm_cprintf(cdev, "%-25s: %d.%d MHz (Estimated)\n", name, udiv32(khz, 1000), umod32(khz, 1000)); } arch_cpu_print_info(cdev); }
static int __init acpi_populate_lapic_devtree(struct acpi_madt_hdr *madt_hdr, struct vmm_devtree_node *cnode) { unsigned int idx = 0; int ret = VMM_OK; struct acpi_madt_lapic *lapic; char lapic_nm[256]; for (;;) { lapic = (struct acpi_madt_lapic *) acpi_madt_get_typed_item(madt_hdr, ACPI_MADT_TYPE_LAPIC, idx); if (!lapic) break; memset(lapic_nm, 0, sizeof(lapic_nm)); vmm_sprintf(lapic_nm, VMM_DEVTREE_LAPIC_PCPU_NODE_FMT, idx); struct vmm_devtree_node *nnode = vmm_devtree_addnode(cnode, lapic_nm); if (vmm_devtree_setattr(nnode, VMM_DEVTREE_LAPIC_CPU_ID_ATTR_NAME, &lapic->acpi_cpu_id, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(lapic->acpi_cpu_id), FALSE) != VMM_OK) { ret = VMM_EFAIL; break; } if (vmm_devtree_setattr(nnode, VMM_DEVTREE_LAPIC_LAPIC_ID_ATTR_NAME, &lapic->apic_id, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(lapic->apic_id), FALSE) != VMM_OK) { ret = VMM_EFAIL; break; } idx++; } vmm_devtree_setattr(cnode, VMM_DEVTREE_NR_LAPIC_ATTR_NAME, &idx, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(idx), FALSE); return ret; }
static int __init acpi_populate_ioapic_devtree(struct acpi_madt_hdr *madt_hdr, struct vmm_devtree_node *cnode) { unsigned int idx = 0; int ret = VMM_OK; struct acpi_madt_ioapic *ioapic; char ioapic_nm[256]; for (;;) { ioapic = (struct acpi_madt_ioapic *) acpi_madt_get_typed_item(madt_hdr, ACPI_MADT_TYPE_IOAPIC, idx); if (!ioapic) break; memset(ioapic_nm, 0, sizeof(ioapic_nm)); vmm_sprintf(ioapic_nm, VMM_DEVTREE_IOAPIC_NODE_FMT, idx); struct vmm_devtree_node *nnode = vmm_devtree_addnode(cnode, ioapic_nm); if (vmm_devtree_setattr(nnode, VMM_DEVTREE_IOAPIC_PADDR_ATTR_NAME, &ioapic->address, VMM_DEVTREE_ATTRTYPE_PHYSADDR, sizeof(physical_addr_t), FALSE) != VMM_OK) { ret = VMM_EFAIL; break; } if (vmm_devtree_setattr(nnode, VMM_DEVTREE_IOAPIC_GINT_BASE_ATTR_NAME, &ioapic->global_int_base, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(ioapic->global_int_base), FALSE) != VMM_OK) { ret = VMM_EFAIL; break; } idx++; } vmm_devtree_setattr(cnode, VMM_DEVTREE_NR_IOAPIC_ATTR_NAME, &idx, VMM_DEVTREE_ATTRTYPE_UINT32, sizeof(idx), FALSE); return ret; }
static int __cpuinit twd_clockchip_init(struct vmm_devtree_node *node) { int rc; u32 ref_cnt_freq; virtual_addr_t ref_cnt_addr; u32 cpu = vmm_smp_processor_id(); struct twd_clockchip *cc = &this_cpu(twd_cc); if (!twd_base) { rc = vmm_devtree_regmap(node, &twd_base, 0); if (rc) { goto fail; } } if (!twd_ppi_irq) { rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0); if (rc) { goto fail_regunmap; } } if (!twd_freq_hz) { /* First try to find TWD clock */ if (!twd_clk) { twd_clk = of_clk_get(node, 0); } if (!twd_clk) { twd_clk = clk_get_sys("smp_twd", NULL); } if (twd_clk) { /* Use TWD clock to find frequency */ rc = clk_prepare_enable(twd_clk); if (rc) { clk_put(twd_clk); goto fail_regunmap; } twd_freq_hz = clk_get_rate(twd_clk); } else { /* No TWD clock found hence caliberate */ rc = vmm_devtree_regmap(node, &ref_cnt_addr, 1); if (rc) { vmm_devtree_regunmap(node, ref_cnt_addr, 1); goto fail_regunmap; } if (vmm_devtree_read_u32(node, "ref-counter-freq", &ref_cnt_freq)) { vmm_devtree_regunmap(node, ref_cnt_addr, 1); goto fail_regunmap; } twd_caliberate_freq(twd_base, ref_cnt_addr, ref_cnt_freq); vmm_devtree_regunmap(node, ref_cnt_addr, 1); } } memset(cc, 0, sizeof(struct twd_clockchip)); vmm_sprintf(cc->name, "twd/%d", cpu); cc->clkchip.name = cc->name; cc->clkchip.hirq = twd_ppi_irq; cc->clkchip.rating = 350; cc->clkchip.cpumask = vmm_cpumask_of(cpu); cc->clkchip.features = VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT; vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, VMM_NSEC_PER_SEC, twd_freq_hz, 10); cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip); cc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip); cc->clkchip.set_mode = &twd_clockchip_set_mode; cc->clkchip.set_next_event = &twd_clockchip_set_next_event; cc->clkchip.priv = cc; if (vmm_smp_is_bootcpu()) { /* Register interrupt handler */ if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd", &twd_clockchip_irq_handler, cc))) { goto fail_regunmap; } /* Mark interrupt as per-cpu */ if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) { goto fail_unreg_irq; } } /* Explicitly enable local timer PPI in GIC * Note: Local timer requires PPI support hence requires GIC */ gic_enable_ppi(twd_ppi_irq); rc = vmm_clockchip_register(&cc->clkchip); if (rc) { goto fail_unreg_irq; } return VMM_OK; fail_unreg_irq: if (vmm_smp_is_bootcpu()) { vmm_host_irq_unregister(twd_ppi_irq, cc); } fail_regunmap: vmm_devtree_regunmap(node, twd_base, 0); fail: return rc; }
int __cpuinit twd_clockchip_init(virtual_addr_t ref_counter_addr, u32 ref_counter_freq) { int rc; u32 cpu = vmm_smp_processor_id(); struct vmm_devtree_node *node; struct twd_clockchip *cc = &this_cpu(twd_cc); node = vmm_devtree_find_matching(NULL, twd_match); if (!node) { return VMM_ENODEV; } if (!twd_base) { rc = vmm_devtree_regmap(node, &twd_base, 0); if (rc) { return rc; } } if (!twd_ppi_irq) { rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0); if (rc) { return rc; } } twd_caliberate_freq(twd_base, ref_counter_addr, ref_counter_freq); memset(cc, 0, sizeof(struct twd_clockchip)); vmm_sprintf(cc->name, "twd/%d", cpu); cc->clkchip.name = cc->name; cc->clkchip.hirq = twd_ppi_irq; cc->clkchip.rating = 350; cc->clkchip.cpumask = vmm_cpumask_of(cpu); cc->clkchip.features = VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT; vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, VMM_NSEC_PER_SEC, twd_freq_hz, 10); cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip); cc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip); cc->clkchip.set_mode = &twd_clockchip_set_mode; cc->clkchip.set_next_event = &twd_clockchip_set_next_event; cc->clkchip.priv = cc; if (!cpu) { /* Register interrupt handler */ if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd", &twd_clockchip_irq_handler, cc))) { return rc; } /* Mark interrupt as per-cpu */ if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) { return rc; } } /* Explicitly enable local timer PPI in GIC * Note: Local timer requires PPI support hence requires GIC */ gic_enable_ppi(twd_ppi_irq); return vmm_clockchip_register(&cc->clkchip); }
int uip_netport_init(void) { struct vmm_netswitch *nsw; struct uip_port_state *s = &uip_port_state; struct uip_fw_netif *netif; uip_ipaddr_t ipaddr; char tname[64]; uip_buf = vmm_malloc(UIP_BUFSIZE + 2); if(!uip_buf) { vmm_panic("%s: uip_buf alloc failed\n", __func__); } INIT_SPIN_LOCK(&s->lock); INIT_LIST_HEAD(&s->rxbuf); INIT_COMPLETION(&s->rx_possible); /* Get the first netswitch */ nsw = vmm_netswitch_get(0); if(!nsw) { vmm_panic("No netswitch found\n"); } /* Create a port-name */ vmm_sprintf(tname, "%s-uip", nsw->name); /* Allocate a netport for this netswitch */ s->port = vmm_netport_alloc(tname); if(!s->port) { vmm_printf("UIP->netport alloc failed\n"); return VMM_EFAIL; } /* Allocate a uip_fw_netif */ netif = vmm_malloc(sizeof(struct uip_fw_netif)); if(!netif) { vmm_printf("UIP->netif alloc failed\n"); return VMM_EFAIL; } /* Register the netport */ s->port->mtu = UIP_BUFSIZE; s->port->link_changed = uip_set_link; s->port->can_receive = uip_can_receive; s->port->switch2port_xfer = uip_switch2port_xfer; s->port->priv = s; s->netif = netif; vmm_netport_register(s->port); /* Attach with the netswitch */ vmm_netswitch_port_add(nsw, s->port); /* Notify our ethernet address */ uip_setethaddr(((struct uip_eth_addr *)(s->port->macaddr))); /* Generate an IP address */ uip_ipaddr(ipaddr, 192,168,0,1); uip_fw_setipaddr(netif, ipaddr); uip_ipaddr(ipaddr, 255,255,255,0); uip_fw_setnetmask(netif, ipaddr); /* Register the netif with uip stack */ netif->output = &uip_netport_output; netif->priv = s; uip_fw_register(netif); /* Set this interface as default one */ uip_fw_default(netif); return 0; }