void isr_init() { fault_init(); /* treat unkown interrupt as fault */ int i; for (i = ISR_IRQ0; i < NIDT; i++) { idt_install(i,(uint32_t)isr_unknown, SEL_KCODE << 3, GATE_INT, IDT_PR|IDT_DPL_KERN); } irq_init(); sys_init(); }
/* * paging_init() sets up the page tables */ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; #ifdef CONFIG_64BIT if (VMALLOC_END > (1UL << 42)) { asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; pgd_type = _REGION2_ENTRY_EMPTY; } else { asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; pgd_type = _REGION3_ENTRY_EMPTY; } #else asce_bits = _ASCE_TABLE_LENGTH; pgd_type = _SEGMENT_ENTRY_EMPTY; #endif S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); /* enable virtual mapping in kernel mode */ __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); atomic_set(&init_mm.context.attach_count, 1); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); max_zone_pfns[ZONE_NORMAL] = max_low_pfn; free_area_init_nodes(max_zone_pfns); fault_init(); }
int vm_create(const char* name, int priority, seL4_CPtr vmm_endpoint, seL4_Word vm_badge, vka_t *vka, simple_t *simple, vspace_t *vmm_vspace, ps_io_ops_t* io_ops, vm_t* vm) { seL4_CapData_t null_cap_data = {{0}}; seL4_CapData_t cspace_root_data; cspacepath_t src, dst; int err; vm->name = name; vm->ndevices = 0; vm->onode_head = NULL; vm->entry_point = NULL; vm->vka = vka; vm->simple = simple; vm->vmm_vspace = vmm_vspace; vm->io_ops = io_ops; vm->vchan_num_cons = 0; vm->vchan_cons = NULL; /* Create a cspace */ err = vka_alloc_cnode_object(vka, VM_CSPACE_SIZE_BITS, &vm->cspace); assert(!err); vka_cspace_make_path(vka, vm->cspace.cptr, &src); cspace_root_data = seL4_CapData_Guard_new(0, 32 - VM_CSPACE_SIZE_BITS); dst.root = vm->cspace.cptr; dst.capPtr = VM_CSPACE_SLOT; dst.capDepth = VM_CSPACE_SIZE_BITS; err = vka_cnode_mint(&dst, &src, seL4_AllRights, cspace_root_data); assert(!err); /* Create a vspace */ err = vka_alloc_page_directory(vka, &vm->pd); assert(!err); err = simple_ASIDPool_assign(simple, vm->pd.cptr); assert(err == seL4_NoError); err = sel4utils_get_vspace(vmm_vspace, &vm->vm_vspace, &vm->data, vka, vm->pd.cptr, &vm_object_allocation_cb, (void*)vm); assert(!err); /* Badge the endpoint */ vka_cspace_make_path(vka, vmm_endpoint, &src); err = vka_cspace_alloc_path(vka, &dst); assert(!err); err = vka_cnode_mint(&dst, &src, seL4_AllRights, seL4_CapData_Badge_new(vm_badge)); assert(!err); /* Copy it to the cspace of the VM for fault IPC */ src = dst; dst.root = vm->cspace.cptr; dst.capPtr = VM_FAULT_EP_SLOT; dst.capDepth = VM_CSPACE_SIZE_BITS; err = vka_cnode_copy(&dst, &src, seL4_AllRights); assert(!err); /* Create TCB */ err = vka_alloc_tcb(vka, &vm->tcb); assert(!err); err = seL4_TCB_Configure(vm_get_tcb(vm), VM_FAULT_EP_SLOT, priority - 1, vm->cspace.cptr, cspace_root_data, vm->pd.cptr, null_cap_data, 0, seL4_CapNull); assert(!err); /* Create VCPU */ err = vka_alloc_vcpu(vka, &vm->vcpu); assert(!err); err = seL4_ARM_VCPU_SetTCB(vm->vcpu.cptr, vm_get_tcb(vm)); assert(!err); /* Initialise fault system */ vm->fault = fault_init(vm); assert(vm->fault); return err; }