static void notify_loading_app(status_t result, bool suspend) { Team* team = thread_get_current_thread()->team; TeamLocker teamLocker(team); if (team->loading_info) { // there's indeed someone waiting struct team_loading_info* loadingInfo = team->loading_info; team->loading_info = NULL; loadingInfo->result = result; loadingInfo->done = true; // we're done with the team stuff, get the scheduler lock instead teamLocker.Unlock(); InterruptsSpinLocker schedulerLocker(gSchedulerLock); // wake up the waiting thread if (loadingInfo->thread->state == B_THREAD_SUSPENDED) scheduler_enqueue_in_run_queue(loadingInfo->thread); // suspend ourselves, if desired if (suspend) { thread_get_current_thread()->next_state = B_THREAD_SUSPENDED; scheduler_reschedule(); } } }
void x86_hardware_interrupt(struct iframe* frame) { int32 vector = frame->vector - ARCH_INTERRUPT_BASE; bool levelTriggered = false; Thread* thread = thread_get_current_thread(); if (sCurrentPIC->is_spurious_interrupt(vector)) { TRACE(("got spurious interrupt at vector %ld\n", vector)); return; } levelTriggered = sCurrentPIC->is_level_triggered_interrupt(vector); if (!levelTriggered) { // if it's not handled by the current pic then it's an apic generated // interrupt like local interrupts, msi or ipi. if (!sCurrentPIC->end_of_interrupt(vector)) apic_end_of_interrupt(); } int_io_interrupt_handler(vector, levelTriggered); if (levelTriggered) { if (!sCurrentPIC->end_of_interrupt(vector)) apic_end_of_interrupt(); } cpu_status state = disable_interrupts(); if (thread->cpu->invoke_scheduler) { SpinLocker schedulerLocker(thread->scheduler_lock); scheduler_reschedule(B_THREAD_READY); schedulerLocker.Unlock(); restore_interrupts(state); } else if (thread->post_interrupt_callback != NULL) { void (*callback)(void*) = thread->post_interrupt_callback; void* data = thread->post_interrupt_data; thread->post_interrupt_callback = NULL; thread->post_interrupt_data = NULL; restore_interrupts(state); callback(data); } }
void i686_kmain(unsigned long magic, multiboot_info_t *info) { bootvideo_cls(); parse_cmdline(info->cmdline); if (use_serial) i686_tty_init(0, 9600); i686_kernel.debug = i686_debug; if (magic != MULTIBOOT_BOOTLOADER_MAGIC) { i686_debug("Not booted from multiboot loader!\n"); while (1); } i686_debug("mods_addr: %x\nmod_start: %x\n", info->mods_addr, 0); i686_kernel.mutex = &i686_mutex; i686_kernel.bsp = (struct cpu *)i686_cpu_alloc(); i686_kernel.bsp->kvirt = i686_virtmem_init(&i686_kernel); i686_kernel.phys = i686_physmem_alloc(&i686_kernel, info); kmem_init(i686_kernel.bsp->allocator); i686_kernel.bsp->v.init(i686_kernel.bsp); i686_debug("Location GDT entry: %x\n", ((struct i686_cpu *)i686_kernel.bsp)->gdt); virtaddr_t a; physaddr_t p; virtmem_error_t e1 = virtmem_kernel_alloc(i686_kernel.bsp->kvirt, &a, 1); assert(e1 == VIRTMEM_SUCCESS); physmem_error_t e2 = physmem_page_alloc(i686_kernel.bsp->localmem, 0, &p); assert(e2 == PHYSMEM_SUCCESS); virtmem_kernel_map_virt_to_phys(i686_kernel.bsp->kvirt, p, a); i686_debug("Allocated address: %x(->%x)\n", a, p); char *s = (char *)a; strcpy(s, "This shows the validity of this memory"); i686_debug("%x contains: %s\n", a, s); struct kmem_cache *s1 = kmem_alloc(i686_kernel.bsp->allocator); kmem_cache_init(i686_kernel.bsp->allocator, s1, i686_kernel.bsp, "test", 128, NULL, NULL); char *t1 = kmem_cache_alloc(s1); i686_debug("cache at %x provided us with %x\n", s1, t1); strcpy(t1, "This shows the validity of the slab allocation"); i686_debug("%x contains: %s\n", t1, t1); i686_address_space_init(); struct address_space *as; struct memory_region *mr; address_space_alloc(&as); memory_region_alloc(&mr); e1 = virtmem_kernel_alloc(i686_kernel.bsp->kvirt, &a, 1); virtmem_kernel_map_virt_to_phys(i686_kernel.bsp->kvirt, (physaddr_t)as->pd, a); address_space_init_region(as, mr, (virtaddr_t)0x1000000, 0x2000); memory_region_set_flags(mr, 1, 1); memory_region_map(as, mr, NULL); const char *teststr = "This is a test string to be copied to userspace."; char testcpybuf[128]; char opcodes[] = {0xeb, 0xfe}; virtmem_copy_kernel_to_user(i686_kernel.bsp->kvirt, as->pd, (void *)0x1000ffc, (const void *)teststr, strlen(teststr) + 1); virtmem_copy_user_to_kernel(i686_kernel.bsp->kvirt, (void *)&testcpybuf, as->pd, (const void *)0x1000ffc, strlen(teststr) + 1); i686_debug("testcpybuf contains '%s'\n", testcpybuf); virtmem_copy_kernel_to_user(i686_kernel.bsp->kvirt, as->pd, (void *)0x1000000, (const void *)opcodes, 2); struct thread *thr1; scheduler_thread_alloc(cpu()->sched, &thr1); thread_init(thr1, as); thr1->state = THREAD_RUNNABLE; scheduler_thread_add(cpu()->sched, thr1); scheduler_reschedule(cpu()->sched); virtmem_user_setup_kernelspace(i686_kernel.bsp->kvirt, as->pd); virtmem_set_context(i686_kernel.bsp->kvirt, as->pd); scheduler_resume(cpu()->sched); while (1); }