static void restart() { int r; DPRINTF("Restarting from checkpoint.\n"); if (g_kvm_fd > 0 && g_vm_fd > 0 && g_vcpu_fd > 0) { r = create_vm(); if (r < 0) { DPRINTF("ERROR: Creating VMFD returned: %d\n", r); DPRINTF("WARNING: Please try checkpointing again\n"); exit(-1); } else if (r > 0) { int i = 0; int t = 0; r = restore_id_map_addr(); if (r < 0) { DPRINTF("ERROR: Restoring identity map addr returned: %d\n", r); DPRINTF("WARNING: Please try checkpointing again\n"); exit(-1); } r = restore_tss_addr(); if (r < 0) { DPRINTF("ERROR: Restoring tss addr returned: %d\n", r); DPRINTF("WARNING: Please try checkpointing again\n"); exit(-1); } r = create_irqchip(); if (r < 0) { DPRINTF("ERROR: Creating IRQCHIP returned: %d\n", r); DPRINTF("WARNING: Please try checkpointing again\n"); exit(-1); } r = create_vcpu(); if (r < 0) { DPRINTF("ERROR: Creating new VCPU returned: %d\n", r); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } if (NEXT_FNC(mmap)(g_vcpu_mmap_addr, g_vcpu_mmap_length, g_vcpu_mmap_prot, g_vcpu_mmap_flags | MAP_FIXED, g_vcpu_fd, 0) == MAP_FAILED) { DPRINTF("ERROR: Mapping the new VCPU returned MAP_FAILED\n"); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } r = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_SIGNAL_MASK, &g_kvm_sigmask); if (r < 0) { DPRINTF("ERROR: Setting VCPU Signal Mask returned: %d\n", r); exit(-1); } r = NEXT_FNC(ioctl)(g_vm_fd, KVM_IRQ_LINE_STATUS, &g_kvm_irq_level); if (r < 0) { DPRINTF("ERROR: Setting IRQ LINE status returned: %d\n", r); exit(-1); } r = NEXT_FNC(ioctl)(g_vm_fd, KVM_REGISTER_COALESCED_MMIO, &g_kvm_coalesced_mmio_zone); if (r < 0) { DPRINTF("ERROR: Setting Coalesced MMIO Zone returned: %d\n", r); exit(-1); } DPRINTF("Setting #%d memory regions\n", g_num_of_memory_regions); struct kvm_userspace_memory_region *mem; for (i = 0; i < g_num_of_memory_regions; i++) { mem = &g_kvm_mem_region[i]; DPRINTF("slot:%X, flags:%X, start:%llX, size:%llX, ram:%llX)\n", mem->slot, mem->flags, mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); r = NEXT_FNC(ioctl)(g_vm_fd, KVM_SET_USER_MEMORY_REGION, &g_kvm_mem_region[i]); if (r < 0) { DPRINTF("ERROR: Creating memory region #%d returned: \n", i, r); perror("ioctl(KVM_SET_USER_MEMORY_REGION)"); } } /* See note in the ioctl() wrapper. */ DPRINTF("Setting routing tables. ptr: %p...\n", g_kvm_gsi_routing_table); r = NEXT_FNC(ioctl)(g_vm_fd, KVM_SET_GSI_ROUTING, g_kvm_gsi_routing_table); if (r < 0) { DPRINTF("ERROR: Setting routing table (#routes=%d) returned: " "%d\n", g_kvm_gsi_routing_table->nr, r); } r = create_pit2(); if (r < 0) { DPRINTF("Creating PIT2 returned: %d\n", r); } r = restore_pit2(); if (r < 0) { DPRINTF("ERROR: Restoring PIT2 returned: %d\n", r); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } int array[] = { 0, 1, 4, 8, 12 }; g_kvm_irq_level.level = 0; for (i = 0; i < 5; i++) { g_kvm_irq_level.irq = array[i]; r = NEXT_FNC(ioctl)(g_vm_fd, KVM_IRQ_LINE_STATUS, &g_kvm_irq_level); if (r < 0) { DPRINTF("ERROR: Resetting IRQ#%d LINE returned: %d\n", g_kvm_irq_level.irq, r); exit(-1); } } r = restore_irqchip(); if (r < 0) { DPRINTF("ERROR: Restoring IRQCHIP returned: %d\n", r); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } r = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_TPR_ACCESS_REPORTING, &g_kvm_tpr_access_ctl); if (r < 0) { DPRINTF("ERROR: Restoring the tpr access reporting returned: %d\n", r); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } r = NEXT_FNC(ioctl)(g_vcpu_fd, KVM_SET_VAPIC_ADDR, &g_kvm_vapic_addr); if (r < 0) { DPRINTF("ERROR: Restoring the vapic addr returned: %d\n", r); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } r = restore_registers(); if (r < 0) { DPRINTF("ERROR: Restoring the registers returned: %d\n", r); DPRINTF("WARNING: Cannot continue\n"); exit(-1); } } } }
/** Create and initialize the structure of the VM abstraction */ vm_t *create_vm(uint32_t vm[][VMCONF_NUMCOLUNS]) { static uint32_t vm_id = 1; /* vm_id is the guestid */ static uint32_t tlbindex = 0; /* unique tlb entry */ vm_t *ret; ll_node_t* nd; uint32_t i; vcpu_t *vcpu; /* number of fix tlb entries */ uint32_t ntlbent = vm[0][2]; if(!(nd = (ll_node_t*) calloc(1, sizeof(ll_node_t) + sizeof(vm_t)))) return NULL; ret = (vm_t*)((unsigned int)nd + sizeof(ll_node_t)); //Memory map ret->base_addr = vm[0][0]; ret->size = vm[0][1]; ret->id = vm_id++; ret->os_type = vm[0][3]; ret->ntlbent = ntlbent; ret->init = 1; ret->tlbentries = NULL; #ifdef STATICTLB /* allocate a tlb entry to the VM */ if(ret->os_type != IDLEVCPU){ ret->tlbentries = (struct tlbentry *)calloc(1, sizeof(struct tlbentry)*(ntlbent)); memset(ret->tlbentries, 0, sizeof(struct tlbentry)*ntlbent); }else{ ret->tlbentries = NULL; ret->id = 0; } /* fill the tlb entries to the VM */ for(i=0; i<ntlbent; i++, tlbindex++){ ret->tlbentries[i].guestid = vm[i+1][0]; ret->tlbentries[i].index = tlbindex; ret->tlbentries[i].entrylo0 = vm[i+1][1]; if(ret->tlbentries[i].entrylo0) ret->tlbentries[i].lo0flags = ENTRYLO_V | ENTRYLO_D; ret->tlbentries[i].entrylo1 = vm[i+1][2]; if(ret->tlbentries[i].entrylo1) ret->tlbentries[i].lo1flags = ENTRYLO_V | ENTRYLO_D; ret->tlbentries[i].pagemask = vm[i+1][3]; ret->tlbentries[i].entryhi = vm[i+1][4]; ret->tlbentries[i].coherency = vm[i+1][5]; ret->tlbentries[i].onhardware = 0; } #else if(ret->os_type != IDLEVCPU){ ret->vmmap = (memVMMap_t *)calloc(1, sizeof(memVMMap_t)*(ntlbent)); memset(ret->vmmap, 0, sizeof(memVMMap_t)*ntlbent); }else{ ret->vmmap = NULL; ret->id = 0; } /* fill the tlb entries to the VM */ for(i=0; i<ntlbent; i++, tlbindex++){ ret->vmmap[i].phyGuestBase = vm[i+1][1]; ret->vmmap[i].vGuestBase = vm[i+1][4]; ret->vmmap[i].size = vm[i+1][2]; ret->vmmap[i].coherency = vm[i+1][5]; } #endif /* Set the VM entry Point and scheduler*/ switch(ret->os_type){ case BAREOS: vcpu = create_vcpu(ret, vm[0][5], 0 ,0, vm[0][4], BAREOS); addVcpu_bestEffortList(vcpu); ll_append(&virtualmachines, nd); break; case GENERIC: vcpu = create_vcpu(ret, 0x80000000, 0, 0, vm[0][4], GENERIC); addVcpu_bestEffortList(vcpu); ll_append(&virtualmachines, nd); break; case HELLFIRE: vcpu = create_vcpu(ret, vm[0][5], 0, 0, vm[0][4], HELLFIRE); addVcpu_bestEffortList(vcpu); ll_append(&virtualmachines, nd); break; case BAREOS_RT: vcpu = create_vcpu(ret, 0x801000f4, 0, 0, vm[0][4], BAREOS_RT); addVcpu_servicesInitList(vcpu); ll_append(&virtualmachines_rt, nd); break; case LINUX: vcpu = create_vcpu(ret, vm[0][5], 0, 0, vm[0][4], LINUX); addVcpu_bestEffortList(vcpu); ll_append(&virtualmachines, nd); break; case IDLEVCPU: vcpu = create_vcpu(ret, vm[0][5], 0, 0, vm[0][4], IDLEVCPU); idle_vcpu = vcpu; ll_append(&virtualmachines, nd); break; default: Warning("OS type 0x%x supported!\n", ret->os_type); break; } nd->ptr = ret; return ret; }