static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest_id, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; #ifdef CONFIG_X86 if (irq == 0) { irqe.dest_mode = 0; /* need to read apic_id from apic regiest since * it can be rewritten */ irqe.dest_id = ioapic->kvm->bsp_vcpu_id; } #endif return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); }
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; int ret; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest_id, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; if (irq == RTC_GSI && line_status) { BUG_ON(ioapic->rtc_status.pending_eoi != 0); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, ioapic->rtc_status.dest_map); ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); } else ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); return ret; }
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: /* Writes are ignored. */ break; case IOAPIC_REG_APIC_ID: ioapic->id = (val >> 24) & 0xf; break; case IOAPIC_REG_ARB_ID: break; default: index = (ioapic->ioregsel - 0x10) >> 1; ioapic_debug("change redir index %x val %x\n", index, val); if (index >= IOAPIC_NUM_PINS) return; if (ioapic->ioregsel & 1) { ioapic->redirtbl[index].bits &= 0xffffffff; ioapic->redirtbl[index].bits |= (u64) val << 32; } else { ioapic->redirtbl[index].bits &= ~0xffffffffULL; ioapic->redirtbl[index].bits |= (u32) val; ioapic->redirtbl[index].fields.remote_irr = 0; } if (ioapic->irr & (1 << index)) ioapic_service(ioapic, index); break; } }
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; #ifdef CONFIG_X86 /* Always delivery PIT interrupt to vcpu 0 */ if (irq == 0) { irqe.dest_mode = 0; /* Physical mode. */ irqe.dest_id = ioapic->kvm->vcpus[0]->vcpu_id; } #endif return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe); }
static void ioapic_inj_irq(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu, u8 vector, u8 trig_mode, u8 delivery_mode) { ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode, delivery_mode); ASSERT((delivery_mode == IOAPIC_FIXED) || (delivery_mode == IOAPIC_LOWEST_PRIORITY)); kvm_apic_set_irq(vcpu, vector, trig_mode); }
static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, u8 dest_mode) { u32 mask = 0; int i; struct kvm *kvm = ioapic->kvm; struct kvm_vcpu *vcpu; ioapic_debug("dest %d dest_mode %d\n", dest, dest_mode); if (dest_mode == 0) { /* Physical mode. */ if (dest == 0xFF) { /* Broadcast. */ for (i = 0; i < KVM_MAX_VCPUS; ++i) if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic) mask |= 1 << i; return mask; } for (i = 0; i < KVM_MAX_VCPUS; ++i) { vcpu = kvm->vcpus[i]; if (!vcpu) continue; if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) { if (vcpu->arch.apic) mask = 1 << i; break; } } } else if (dest != 0) /* Logical mode, MDA non-zero. */ for (i = 0; i < KVM_MAX_VCPUS; ++i) { vcpu = kvm->vcpus[i]; if (!vcpu) continue; if (vcpu->arch.apic && kvm_apic_match_logical_addr(vcpu->arch.apic, dest)) mask |= 1 << vcpu->vcpu_id; } ioapic_debug("mask %x\n", mask); return mask; }
static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; int ret; if (entry->fields.mask) return -1; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest_id, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; irqe.msi_redir_hint = false; if (irqe.trig_mode == IOAPIC_EDGE_TRIG) ioapic->irr_delivered |= 1 << irq; if (irq == RTC_GSI && line_status) { /* * pending_eoi cannot ever become negative (see * rtc_status_pending_eoi_check_valid) and the caller * ensures that it is only called if it is >= zero, namely * if rtc_irq_check_coalesced returns false). */ BUG_ON(ioapic->rtc_status.pending_eoi != 0); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, &ioapic->rtc_status.dest_map); ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret); } else ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) entry->fields.remote_irr = 1; return ret; }
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; union kvm_ioapic_redirect_entry *e; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: /* Writes are ignored. */ break; case IOAPIC_REG_APIC_ID: ioapic->id = (val >> 24) & 0xf; break; case IOAPIC_REG_ARB_ID: break; default: index = (ioapic->ioregsel - 0x10) >> 1; ioapic_debug("change redir index %x val %x\n", index, val); if (index >= IOAPIC_NUM_PINS) return; e = &ioapic->redirtbl[index]; mask_before = e->fields.mask; if (ioapic->ioregsel & 1) { e->bits &= 0xffffffff; e->bits |= (u64) val << 32; } else { e->bits &= ~0xffffffffULL; e->bits |= (u32) val; e->fields.remote_irr = 0; } update_handled_vectors(ioapic); mask_after = e->fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after); if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index, false); kvm_vcpu_request_scan_ioapic(ioapic->kvm); break; } }
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val) { unsigned index; bool mask_before, mask_after; switch (ioapic->ioregsel) { case IOAPIC_REG_VERSION: /* Writes are ignored. */ break; case IOAPIC_REG_APIC_ID: ioapic->id = (val >> 24) & 0xf; break; case IOAPIC_REG_ARB_ID: break; default: index = (ioapic->ioregsel - 0x10) >> 1; ioapic_debug("change redir index %x val %x\n", index, val); if (index >= IOAPIC_NUM_PINS) return; mask_before = ioapic->redirtbl[index].fields.mask; if (ioapic->ioregsel & 1) { ioapic->redirtbl[index].bits &= 0xffffffff; ioapic->redirtbl[index].bits |= (u64) val << 32; } else { ioapic->redirtbl[index].bits &= ~0xffffffffULL; ioapic->redirtbl[index].bits |= (u32) val; ioapic->redirtbl[index].fields.remote_irr = 0; } mask_after = ioapic->redirtbl[index].fields.mask; if (mask_before != mask_after) kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); if (ioapic->redirtbl[index].fields.trig_mode == IOAPIC_LEVEL_TRIG && ioapic->irr & (1 << index)) ioapic_service(ioapic, index); break; } }
static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status) { union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq]; struct kvm_lapic_irq irqe; int ret; if (entry->fields.mask) return -1; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", entry->fields.dest_id, entry->fields.dest_mode, entry->fields.delivery_mode, entry->fields.vector, entry->fields.trig_mode); irqe.dest_id = entry->fields.dest_id; irqe.vector = entry->fields.vector; irqe.dest_mode = entry->fields.dest_mode; irqe.trig_mode = entry->fields.trig_mode; irqe.delivery_mode = entry->fields.delivery_mode << 8; irqe.level = 1; irqe.shorthand = 0; if (irqe.trig_mode == IOAPIC_EDGE_TRIG) ioapic->irr &= ~(1 << irq); if (irq == RTC_GSI && line_status) { BUG_ON(ioapic->rtc_status.pending_eoi != 0); ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, ioapic->rtc_status.dest_map); ioapic->rtc_status.pending_eoi = ret; } else ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL); if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG) entry->fields.remote_irr = 1; return ret; }
// The main function int init(unsigned long magic, multiboot_info_t* hdr) { setGDT(); init_heap(); #ifdef SLAB slab_alloc_init(); #endif textInit(); /** * \todo Make complement_heap so that it allocates memory from pte */ complement_heap(&end, HEAPSIZE); addr_t tmp = (addr_t)hdr + offset; hdr = (multiboot_info_t*)tmp; if (magic != MULTIBOOT_BOOTLOADER_MAGIC) { printf("\nInvalid magic word: %X\n", magic); panic(""); } if (hdr->flags & MULTIBOOT_INFO_MEMORY) { memsize = hdr->mem_upper; memsize += 1024; } else panic("No memory flags!"); if (!(hdr->flags & MULTIBOOT_INFO_MEM_MAP)) panic("Invalid memory map"); mmap = (multiboot_memory_map_t*) hdr->mmap_addr; /** Build the memory map and allow for allocation */ x86_pte_init(); page_alloc_init(mmap, (unsigned int)hdr->mmap_length); vm_init(); #ifdef PA_DBG // endProg(); #endif /** In the progress of phasing out */ /** Set up paging administration */ x86_page_init(memsize); mboot_page_setup(mmap, (uint32_t)hdr->mmap_length); mboot_map_modules((void*)hdr->mods_addr, hdr->mods_count); /** For now this is the temporary page table map */ build_map(mmap, (unsigned int) hdr->mmap_length); /** end of deprication */ task_init(); page_init(); printf(WELCOME); // The only screen output that should be maintained page_unmap_low_mem(); pic_init(); setIDT(); setup_irq_data(); if (dev_init() != -E_SUCCESS) panic("Couldn't initialise /dev"); ol_pit_init(1024); // program pic to 1024 hertz debug("Size of the heap: 0x%x\tStarting at: %x\n", HEAPSIZE, heap); acpi_init(); ol_cpu_t cpu = kalloc(sizeof (*cpu)); if (cpu == NULL) panic("OUT OF MEMORY!"); ol_cpu_init(cpu); ol_ps2_init_keyboard(); ol_apic_init(cpu); init_ioapic(); ol_pci_init(); debug("Little endian 0xf in net endian %x\n", htons(0xf)); #ifdef DBG #ifdef __IOAPIC_DBG ioapic_debug(); #endif #ifdef __MEMTEST ol_detach_all_devices(); /* free's al the pci devices */ #endif #ifdef __DBG_HEAP printf("Heap list:\n"); ol_dbg_heap(); #endif printf("\nSome (temp) debug info:\n"); printf("CPU vendor: %s\n", cpus->vendor); if(systables->magic == SYS_TABLE_MAGIC) { printf("RSDP ASCII signature: 0x%x%x\n", *(((uint32_t*) systables->rsdp->signature) + 1), *(((uint32_t*) systables->rsdp->signature))); printf("MP specification signature: 0x%x\n", systables->mp->signature); } #endif #ifdef PA_DBG addr_t p = (addr_t)page_alloc(); page_free((void*)p); printf("Allocated: %X\n", p); page_dump(); #endif #ifdef PA_DBG addr_t p = (addr_t)page_alloc(); page_free((void*)p); printf("Allocated: %X\n", p); page_dump(); #endif core_loop(); return 0; // To keep the compiler happy. }
static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq) { u8 dest = ioapic->redirtbl[irq].fields.dest_id; u8 dest_mode = ioapic->redirtbl[irq].fields.dest_mode; u8 delivery_mode = ioapic->redirtbl[irq].fields.delivery_mode; u8 vector = ioapic->redirtbl[irq].fields.vector; u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode; u32 deliver_bitmask; struct kvm_vcpu *vcpu; int vcpu_id; ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x " "vector=%x trig_mode=%x\n", dest, dest_mode, delivery_mode, vector, trig_mode); deliver_bitmask = ioapic_get_delivery_bitmask(ioapic, dest, dest_mode); if (!deliver_bitmask) { ioapic_debug("no target on destination\n"); return; } switch (delivery_mode) { case IOAPIC_LOWEST_PRIORITY: vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector, deliver_bitmask); #ifdef CONFIG_X86 if (irq == 0) vcpu = ioapic->kvm->vcpus[0]; #endif if (vcpu != NULL) ioapic_inj_irq(ioapic, vcpu, vector, trig_mode, delivery_mode); else ioapic_debug("null lowest prio vcpu: " "mask=%x vector=%x delivery_mode=%x\n", deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY); break; case IOAPIC_FIXED: #ifdef CONFIG_X86 if (irq == 0) deliver_bitmask = 1; #endif for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) { if (!(deliver_bitmask & (1 << vcpu_id))) continue; deliver_bitmask &= ~(1 << vcpu_id); vcpu = ioapic->kvm->vcpus[vcpu_id]; if (vcpu) { ioapic_inj_irq(ioapic, vcpu, vector, trig_mode, delivery_mode); } } break; /* TODO: NMI */ default: printk(KERN_WARNING "Unsupported delivery mode %d\n", delivery_mode); break; } }