Esempio n. 1
0
void _context_enter_userland(context_t *cxt)
{
  /* Setup a stack */
  uintptr_t stack = ((uintptr_t)cxt->stack) - 0x10;
  if(stack & 0xF)
    {
      stack = (stack & 0xFFFFFFFFFFFFFFF0);
      stack += 0x10;
    }

  virtaddr_t *rsp = (virtaddr_t*)stack;
  virtaddr_t rbp = stack;

  /* Call stack, start at end-entry */
  *(rsp--) = 0;
  *(rsp--) = (uint64_t)(GDT_KERNEL_DATA << 3);
  *(rsp--) = rbp;
  *(rsp--) = (uint64_t)THREAD_FLAGS;
  *(rsp--) = (uint64_t)(GDT_KERNEL_CODE << 3);
  *(rsp--) = cxt->rip;

  /* Reset rbp */
  rbp = (uint64_t)rsp;

  /* Initialise Registers */
  *(rsp--) = 0; //R15
  *(rsp--) = 0; //R14
  *(rsp--) = 0; //R13
  *(rsp--) = 0; //R12
  *(rsp--) = 0; //R11
  *(rsp--) = 0; //R10
  *(rsp--) = 0; //R9
  *(rsp--) = 0; //R8
  *(rsp--) = 0; //RDI
  *(rsp--) = 0; //RSI
  *(rsp--) = rbp;       //RBP
  *(rsp--) = 0; //RSP
  *(rsp--) = 0; //RBX
  *(rsp--) = 0; //RDX
  *(rsp--) = 0; //RCX
  *(rsp) = 0;           //RAX

  /* Update stack */
  cxt->stack = rsp;

  /* Enable user-mode */
  thread_get_current_thread_entry()->user_context = cxt;
  thread_get_current_thread_entry()->attribs |= THREAD_FLAG_ENTERUSER;

  /* Yield, like NOW */
  thread_switch();

  /* No escape from this path */
  for(;;);
}
Esempio n. 2
0
/**
 *
 * Terminate the current process (maybe).  If the current process has
 * more than one running thread, only terminate the current thread.
 * The process is only completely terminated (as per process_join
 * wakeup and page table deallocation) when the final thread calls
 * process_finish().
 *
 * @param The return value of the process.  This is only used when the
 * final thread exits.
 *
 */
void process_finish(int retval)
{
    interrupt_status_t intr_status;
    thread_table_t *thread = thread_get_current_thread_entry();
    process_id_t pid = thread->process_id;

    if (retval < 0) {
        /* Not permitted! */
        retval = 0;
    }

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    /* Mark the stack as free so new threads can reuse it. */
    process_free_stack(thread);

    if (--process_table[pid].threads == 0) {
        /* We are the last thread - kill process! */
        vm_destroy_pagetable(thread->pagetable);

        finish_given_process(pid, retval);
    }

    thread->pagetable = NULL;

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    thread_finish();
}
Esempio n. 3
0
void process_run(process_id_t pid)
{
  context_t user_context;

  thread_table_t *my_thread = thread_get_current_thread_entry();

  /* If my process is a zombie, that means initialisation failed. */
  if (process_table[pid].state == PROCESS_ZOMBIE) {
    if (my_thread->pagetable) {
      vm_destroy_pagetable(my_thread->pagetable);
      my_thread->pagetable = NULL;
    }
    thread_finish();
  }

  process_set_pagetable(my_thread->pagetable);
  my_thread->process_id = pid;
  my_thread->pagetable = my_thread->pagetable;

  /* Initialize the user context. (Status register is handled by
     thread_goto_userland) */
  memoryset(&user_context, 0, sizeof(user_context));

  _context_set_ip(&user_context, process_table[pid].entry_point);
  _context_set_sp(&user_context, process_table[pid].stack_top);

  thread_goto_userland(&user_context);
}
Esempio n. 4
0
File: tlb.c Progetto: cfrost/buenos
// send terminate signal to process
void tlb_modified_exception(void) {
    tlb_exception_state_t tlb_es;
    _tlb_get_exception_state(&tlb_es);
    pagetable_t *current_pagetable;
    current_pagetable = thread_get_current_thread_entry()->pagetable;
    
    if (current_pagetable == NULL) {
        KERNEL_PANIC("Pagetable is non-existing");
    }
    
    uint32_t i;
    for (i = 0; i < current_pagetable->valid_count; i++) {
        tlb_entry_t *entry = &current_pagetable->entries[i];
        // find addr fra pagetable og put i tlb. 
        if (entry->VPN2 == tlb_es.badvpn2) {
            
            /* Checks if address is odd( see vm.c)
             * and thereafter checks validbit */
            if (ADDR_IS_ON_ODD_PAGE(entry->VPN2)){
                KERNEL_ASSERT(entry->D1);
            } else {
                KERNEL_ASSERT(entry->D0);
            }
            return;
        }
    }
    KERNEL_PANIC("Unhandled TLB modified exception");
}
Esempio n. 5
0
void process_finish(int retval) {
    interrupt_status_t intr_status;
    thread_table_t *thr = thread_get_current_thread_entry();
    process_id_t pid = process_get_current_process();
    
    int i;
    // remove parent references in other processes
    for (i = 0; i < PROCESS_MAX_PROCESSES; ++i) {
        intr_status = _interrupt_disable();
        spinlock_acquire(&process_table_slock);
        if (process_table[i].parent_id == pid)
            process_table[i].parent_id = -1;
        spinlock_release(&process_table_slock);
        _interrupt_set_state(intr_status);
    }
    
    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);
    
    process_table[pid].retval = retval;
    process_table[pid].state = PROCESS_ZOMBIE;
    
    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    
    // clean up virtual memory
    vm_destroy_pagetable(thr->pagetable);
    thr->pagetable = NULL;
    
    thread_finish();
}
Esempio n. 6
0
File: tlb.c Progetto: cfrost/buenos
void tlb_common_exception(void) {
    tlb_exception_state_t tlb_es;
    _tlb_get_exception_state(&tlb_es);
    pagetable_t *current_pagetable;
    current_pagetable = thread_get_current_thread_entry()->pagetable;
    if (current_pagetable == NULL) {
        KERNEL_PANIC("Pagetable is non-existing");
    }
    uint32_t i;
    for (i = 0; i < current_pagetable->valid_count; i++) {
        tlb_entry_t *entry = &current_pagetable->entries[i];
        // find addr fra pagetable og put i tlb. 
        if (entry->VPN2 == tlb_es.badvpn2) {
            
            KERNEL_ASSERT(entry->VPN2 == tlb_es.badvaddr >> 13);

            /* Checks if address is odd( see vm.c)
             * and thereafter checks validbit */

            if (ADDR_IS_ON_ODD_PAGE(tlb_es.badvaddr)){
                KERNEL_ASSERT(entry->V1);
            } else {
                KERNEL_ASSERT(entry->V0);
            }

            // Inserting into a random entry of the tlb.
            _tlb_write_random(&current_pagetable->entries[i]);
            return;
        }
    }
Esempio n. 7
0
/* Stop the process and the thread it runs in. Sets the return value as well */
void process_finish(int retval) {
  thread_table_t *thr;
  thr = thread_get_current_thread_entry();
  process_table[process_get_current_process()].state = ZOMBIE;
  process_table[process_get_current_process()].retval = retval;
  vm_destroy_pagetable(thr->pagetable);
  thr->pagetable = NULL;
  thread_finish();
}
Esempio n. 8
0
void setup_thread(thread_params_t *params)
{
    context_t user_context;
    uint32_t phys_page;
    int i;
    interrupt_status_t intr_status;
    thread_table_t *thread= thread_get_current_thread_entry();

    /* Copy thread parameters. */
    int arg = params->arg;
    void (*func)(int) = params->func;
    process_id_t pid = thread->process_id = params->pid;
    thread->pagetable = params->pagetable;
    params->done = 1; /* OK, we don't need params any more. */

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    /* Set up userspace environment. */
    memoryset(&user_context, 0, sizeof(user_context));

    user_context.cpu_regs[MIPS_REGISTER_A0] = arg;
    user_context.pc = (uint32_t)func;

    /* Allocate thread stack */
    if (process_table[pid].bot_free_stack != 0) {
        /* Reuse old thread stack. */
        user_context.cpu_regs[MIPS_REGISTER_SP] =
            process_table[pid].bot_free_stack
            + CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE
            - 4; /* Space for the thread argument */
        process_table[pid].bot_free_stack =
            *(uint32_t*)process_table[pid].bot_free_stack;
    } else {
        /* Allocate physical pages (frames) for the stack. */
        for (i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
            phys_page = pagepool_get_phys_page();
            KERNEL_ASSERT(phys_page != 0);
            vm_map(thread->pagetable, phys_page,
                    process_table[pid].stack_end - (i+1)*PAGE_SIZE, 1);
        }
        user_context.cpu_regs[MIPS_REGISTER_SP] =
            process_table[pid].stack_end-4; /* Space for the thread argument */
        process_table[pid].stack_end -= PAGE_SIZE*CONFIG_USERLAND_STACK_SIZE;
    }

    tlb_fill(thread->pagetable);

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);

    thread_goto_userland(&user_context);
}
Esempio n. 9
0
/** Handles an interrupt (exception code 0). All interrupt handlers
 * that are registered for any of the occured interrupts (hardware
 * 0-5, software 0-1) are called. The scheduler is called if a timer
 * interrupt (hardware 5) or a context switch request (software
 * interrupt 0) occured, or if the currently running thread for the
 * processor is the idle thread.
 *
 * @param cause The Cause register from CP0
 */
void interrupt_handle(virtaddr_t cause) {
  int this_cpu, i;
    
  if(cause & INTERRUPT_CAUSE_SOFTWARE_0) {
    _interrupt_clear_sw0();
  }

  this_cpu = _interrupt_getcpu();

  /* Exceptions should be handled elsewhere: */
  if((cause  & 0x0000007c) != 0) {
    kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu);
    KERNEL_PANIC("Exception in interrupt_handle");
  }


  /* Call appropiate interrupt handlers.  Handlers cannot be
   * unregistered, so after the first empty * entry all others are
   * also empty.
   */
  for (i=0; i<CONFIG_MAX_DEVICES; i++) {
    if (interrupt_handlers[i].device == NULL)
      break;
        
    /* If this handler is registered for any of the interrupts
     * that occured, call it.
     */
    if ((cause & interrupt_handlers[i].irq) != 0)
      interrupt_handlers[i].handler(interrupt_handlers[i].device);
  }


  /* Timer interrupt (HW5) or requested context switch (SW0)
   * Also call scheduler if we're running the idle thread.
   */
  if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 |
               INTERRUPT_CAUSE_HARDWARE_5)) ||
     scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) {
    scheduler_schedule();
        
    /* Until we have proper VM we must manually fill
       the TLB with pagetable entries before running code using
       given pagetable. Note that this method limits pagetable
       rows (possible mapping pairs) to 16 and can't be used
       with proper pagetables and VM.

       Note that if you remove this call (which you probably do when
       you implement proper VM), you must manually call _tlb_set_asid
       here. See the implementation of tlb_fill on details how to do that.
    */
    tlb_fill(thread_get_current_thread_entry()->pagetable);
  }
}
Esempio n. 10
0
void tlb_seek_insert(void)
{
  tlb_exception_state_t state;
  _tlb_get_exception_state(&state);
  pagetable_t *table = thread_get_current_thread_entry()->pagetable;
  for (int i = 0; i < (int)table->valid_count; i++) {
    if (table->entries[i].VPN2 == state.badvpn2) {
      _tlb_write_random(&table->entries[i]);
      return;
    }
  }
  KERNEL_PANIC("Access violation");
}
Esempio n. 11
0
uint64_t *task_switch(uint64_t *stack)
{
  /* OK, We want to save current stack */
  thread_table_t *task = thread_get_current_thread_entry();

  /* Is it a usertask?  */
  if(task->attribs & THREAD_FLAG_USERMODE)
    task->user_context->stack = stack;
  else
    task->context->stack = stack;

  /* Schedule */
  scheduler_schedule();

  /* Get new task */
  task = thread_get_current_thread_entry();

  /* Switch page directory */
  vmm_setcr3(task->context->pml4);

  /* Update TSS */
  tss_setstack(0, (uint64_t)task->context->stack);

  /* Test if this new task is set to
   * enter usermode */
  if(task->attribs & THREAD_FLAG_ENTERUSER)
    {
      task->attribs &= ~THREAD_FLAG_ENTERUSER;
      task->attribs |= THREAD_FLAG_USERMODE;
    }

  /* return new stack */
  if(task->attribs & THREAD_FLAG_USERMODE)
    return task->user_context->stack;
  else
    return task->context->stack;
}
Esempio n. 12
0
File: process.c Progetto: PtxDK/OSM
void* syscall_memlimit(void* new_end){
  uint32_t phys_page;
  interrupt_status_t intr_status;
  
  intr_status = _interrupt_disable();
  spinlock_acquire(&process_table_slock);
  
  process_control_block_t *curr_proc = process_get_current_process_entry();
  // checks if the new_end is NULL
  if (new_end == NULL){
    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return (void*)curr_proc->heap_end;
  }
  // checks if we are trying to shrink the memory, if so we error
  // and return NULL
  if ((uint32_t)new_end < curr_proc->heap_end){
    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    return NULL;
  }
  // loop where we alloc the physical and the virtual memoery, we also check
  // if we have enough physical memory.
  for (uint32_t i = (curr_proc->heap_end / PAGE_SIZE +1);
       i <= ((uint32_t)new_end / PAGE_SIZE);i++){
    // we allocate some physical memory.
    phys_page = physmem_allocblock();
    //check if we got enough physical memory, if we do not we error
    if (phys_page == 0){
      spinlock_release(&process_table_slock);
      _interrupt_set_state(intr_status);
      return NULL;
    }
    // maps the virtual memory
    vm_map(thread_get_current_thread_entry()->pagetable,
           phys_page, i * PAGE_SIZE, 1);
  }
  // if nothing fails we at last set heap_end to  new_end
  curr_proc->heap_end = (uint32_t)new_end;
  
  spinlock_release(&process_table_slock);
  _interrupt_set_state(intr_status);
  return new_end;
}
Esempio n. 13
0
/* Stop the current process and the kernel thread in which it runs
   Argument: return value */
void process_exit(int retval){
  process_id_t pid;
  thread_table_t * thr;

  kprintf("starten af exit. reval: %d \n",retval);
  if (retval < 0){
    return;
  }

  // gets the process id
  pid = process_get_current_process();

  thr = thread_get_current_thread_entry();

  //spinlock
  spinlock_acquire(&process_table_slock);

  //Disbale interrupt
  _interrupt_set_state(_interrupt_disable());

  // set the process state to ZOMBIE
  process_table[pid].state = STATE_ZOMBIE;

  // Set the process retval to given retval  
  process_table[pid].retval = retval;

  //cleanup
  vm_destroy_pagetable(thr->pagetable);
  thr->pagetable = NULL;

  /* Wake process */
  sleepq_wake_all(&process_table[pid]);

  /* Unlock the process table */
  spinlock_release(&process_table_slock);
  
   //enable interrupts
  _interrupt_set_state(_interrupt_disable());

  kprintf("slutningen af exit :O \n");

  thread_finish();
}
Esempio n. 14
0
void tlb_load_exception(void)
{
// kprintf("STOREAH");
  tlb_exception_state_t state;
  pagetable_t* pagetable;
//fylder en state på vores state.
 _tlb_get_exception_state(&state);
  
  pagetable = thread_get_current_thread_entry()->pagetable;

 _tlb_probe(pagetable->entries); 
 if(tlb_index < 0) { 
   KERNEL_PANIC("pagetable not found!");
 }

  for(uint32_t i = 0; i < pagetable->valid_count; i++) {
        _tlb_write_random((tlb_entry_t*) &pagetable->entries[i]);
  } 

 }
Esempio n. 15
0
int process_fork(void (*func)(int), int arg)
{
    TID_t tid;
    thread_table_t *thread = thread_get_current_thread_entry();
    process_id_t pid = thread->process_id;
    interrupt_status_t intr_status;
    thread_params_t params;
    params.done = 0;
    params.func = func;
    params.arg = arg;
    params.pid = pid;
    params.pagetable = thread->pagetable;

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    tid = thread_create((void (*)(uint32_t))(setup_thread), (uint32_t)&params);

    if (tid < 0) {
        spinlock_release(&process_table_slock);
        _interrupt_set_state(intr_status);
        return -1;
    }

    process_table[pid].threads++;

    for(int i = 0 ; i < MAX_OPEN_FILES ; i++) {
        process_table[pid].open_files[i] = -1;
    }

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    
    thread_run(tid);

    /* params will be dellocated when we return, so don't until the
       new thread is ready. */
    while (!params.done);

    return tid;
}
Esempio n. 16
0
void process_finish(int retval) {
    interrupt_status_t intr_status;
    process_id_t cur = process_get_current_process();
    thread_table_t *thread = thread_get_current_thread_entry();

    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    process_table[cur].state = PROCESS_ZOMBIE;
    process_table[cur].retval = retval;

    /* Remember to destroy the pagetable! */
    vm_destroy_pagetable(thread->pagetable);
    thread->pagetable = NULL;

    sleepq_wake_all(&process_table[cur]);

    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);
    thread_finish();
}
Esempio n. 17
0
/**
 * Terminates the current process and sets a return value
 */
void process_finish(uint32_t retval) {
    interrupt_status_t intr_status;
    process_id_t pid;
	thread_table_t *my_thread;

    // Find out who we are.
    pid = process_get_current_process();
    my_thread = thread_get_current_thread_entry();

    // Ensure that we're the only ones touching the process table.
    intr_status = _interrupt_disable();
    spinlock_acquire(&process_table_slock);

    // Mark the stack as free so new threads can reuse it.
    process_free_stack(my_thread);

    if(--process_table[pid].threads == 0) {
        // Last thread in process; now we die.

        // Mark ourself as dying.
        process_table[pid].retval = retval;
        process_table[pid].state = PROCESS_DYING;

        vm_destroy_pagetable(my_thread->pagetable);

        // Wake whomever may be sleeping for the process
        sleepq_wake(&process_table[pid]);
    }

    // Free our locks.
    spinlock_release(&process_table_slock);
    _interrupt_set_state(intr_status);

    my_thread->pagetable = NULL;

    // Kill the thread.
    thread_finish();
}
Esempio n. 18
0
/**
 *
 * Terminate the current process (maybe).  If the current process has
 * more than one running thread, only terminate the current thread.
 * The process is only completely terminated (as per process_join
 * wakeup and page table deallocation) when the final thread calls
 * process_finish().
 *
 * @param The return value of the process.  This is only used when the
 * final thread exits.
 *
 */
void process_finish(int retval)
{
  interrupt_status_t intr_status;
  thread_table_t *thread = thread_get_current_thread_entry();
  process_id_t pid = thread->process_id;

  if (retval < 0) {
    /* Not permitted! */
    retval = 0;
  }

  intr_status = _interrupt_disable();
  spinlock_acquire(&process_table_slock);

  vm_destroy_pagetable(thread->pagetable);

  finish_given_process(pid, retval);

  thread->pagetable = NULL;

  spinlock_release(&process_table_slock);
  _interrupt_set_state(intr_status);
  thread_finish();
}
Esempio n. 19
0
process_id_t process_get_current_process(void)
{
    return thread_get_current_thread_entry()->process_id;
}
Esempio n. 20
0
/**
 * Starts one userland process. The thread calling this function will
 * be used to run the process and will therefore never return from
 * this function. This function asserts that no errors occur in
 * process startup (the executable file exists and is a valid ecoff
 * file, enough memory is available, file operations succeed...).
 * Therefore this function is not suitable to allow startup of
 * arbitrary processes.
 *
 * @executable The name of the executable to be run in the userland
 * process
 */
void process_start(uint32_t pid)
{
    thread_table_t *my_entry;
    pagetable_t *pagetable;
    uint32_t phys_page;
    context_t user_context;
    uint32_t stack_bottom;
    elf_info_t elf;
    openfile_t file;
    const char* executable;

    int i;

    interrupt_status_t intr_status;

    my_entry = thread_get_current_thread_entry();
    my_entry->process_id = pid;
    executable = process_table[pid].executable;

    /* If the pagetable of this thread is not NULL, we are trying to
       run a userland process for a second time in the same thread.
       This is not possible. */
    KERNEL_ASSERT(my_entry->pagetable == NULL);

    pagetable = vm_create_pagetable(thread_get_current_thread());
    KERNEL_ASSERT(pagetable != NULL);

    intr_status = _interrupt_disable();
    my_entry->pagetable = pagetable;
    _interrupt_set_state(intr_status);

    file = vfs_open((char *)executable);
    /* Make sure the file existed and was a valid ELF file */
    KERNEL_ASSERT(file >= 0);
    KERNEL_ASSERT(elf_parse_header(&elf, file));

    /* Trivial and naive sanity check for entry point: */
    KERNEL_ASSERT(elf.entry_point >= PAGE_SIZE);

    /* Calculate the number of pages needed by the whole process
       (including userland stack). Since we don't have proper tlb
       handling code, all these pages must fit into TLB. */
    KERNEL_ASSERT(elf.ro_pages + elf.rw_pages + CONFIG_USERLAND_STACK_SIZE
		  <= _tlb_get_maxindex() + 1);

    /* Allocate and map stack */
    for(i = 0; i < CONFIG_USERLAND_STACK_SIZE; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - i*PAGE_SIZE, 1);
    }

    /* Allocate and map pages for the segments. We assume that
       segments begin at page boundary. (The linker script in tests
       directory creates this kind of segments) */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.ro_vaddr + i*PAGE_SIZE, 1);
    }

    for(i = 0; i < (int)elf.rw_pages; i++) {
        phys_page = pagepool_get_phys_page();
        KERNEL_ASSERT(phys_page != 0);
        vm_map(my_entry->pagetable, phys_page, 
               elf.rw_vaddr + i*PAGE_SIZE, 1);
    }

    /* Put the mapped pages into TLB. Here we again assume that the
       pages fit into the TLB. After writing proper TLB exception
       handling this call should be skipped. */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);
    
    /* Now we may use the virtual addresses of the segments. */

    /* Zero the pages. */
    memoryset((void *)elf.ro_vaddr, 0, elf.ro_pages*PAGE_SIZE);
    memoryset((void *)elf.rw_vaddr, 0, elf.rw_pages*PAGE_SIZE);

    stack_bottom = (USERLAND_STACK_TOP & PAGE_SIZE_MASK) - 
        (CONFIG_USERLAND_STACK_SIZE-1)*PAGE_SIZE;
    memoryset((void *)stack_bottom, 0, CONFIG_USERLAND_STACK_SIZE*PAGE_SIZE);

    /* Copy segments */

    if (elf.ro_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.ro_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.ro_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.ro_vaddr, elf.ro_size)
		      == (int)elf.ro_size);
    }

    if (elf.rw_size > 0) {
	/* Make sure that the segment is in proper place. */
        KERNEL_ASSERT(elf.rw_vaddr >= PAGE_SIZE);
        KERNEL_ASSERT(vfs_seek(file, elf.rw_location) == VFS_OK);
        KERNEL_ASSERT(vfs_read(file, (void *)elf.rw_vaddr, elf.rw_size)
		      == (int)elf.rw_size);
    }


    /* Set the dirty bit to zero (read-only) on read-only pages. */
    for(i = 0; i < (int)elf.ro_pages; i++) {
        vm_set_dirty(my_entry->pagetable, elf.ro_vaddr + i*PAGE_SIZE, 0);
    }

    /* Insert page mappings again to TLB to take read-only bits into use */
    //intr_status = _interrupt_disable();
    //tlb_fill(my_entry->pagetable);
    //_interrupt_set_state(intr_status);

    /* Initialize the user context. (Status register is handled by
       thread_goto_userland) */
    memoryset(&user_context, 0, sizeof(user_context));
    user_context.cpu_regs[MIPS_REGISTER_SP] = USERLAND_STACK_TOP;
    user_context.pc = elf.entry_point;

    vfs_close(file);

    thread_goto_userland(&user_context);

    KERNEL_PANIC("thread_goto_userland failed.");
}
Esempio n. 21
0
/**
 * Returns the process ID of the currently running thread.
 */
process_id_t process_get_current_process() {
	thread_table_t *my_thread = thread_get_current_thread_entry();
	return my_thread->process_id;
}
Esempio n. 22
0
/** Handles an interrupt (exception code 0). All interrupt handlers
 * that are registered for any of the occured interrupts (hardware
 * 0-5, software 0-1) are called. The scheduler is called if a timer
 * interrupt (hardware 5) or a context switch request (software
 * interrupt 0) occured, or if the currently running thread for the
 * processor is the idle thread.
 *
 * @param cause The Cause register from CP0
 */
void interrupt_handle(uint32_t cause) {
    int this_cpu, i;
    
    if(cause & INTERRUPT_CAUSE_SOFTWARE_0) {
        _interrupt_clear_sw0();
    }

    this_cpu = _interrupt_getcpu();

    /* Exceptions should be handled elsewhere: */
    if((cause  & 0x0000007c) != 0) {
	kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu);
	KERNEL_PANIC("Exception in interrupt_handle");
    }


    /* Call appropiate interrupt handlers.  Handlers cannot be
     * unregistered, so after the first empty * entry all others are
     * also empty.
     */
    for (i=0; i<CONFIG_MAX_DEVICES; i++) {
	if (interrupt_handlers[i].device == NULL)
	    break;
	
	/* If this handler is registered for any of the interrupts
	 * that occured, call it.
	 */
	if ((cause & interrupt_handlers[i].irq) != 0)
	    interrupt_handlers[i].handler(interrupt_handlers[i].device);
    }


    /* Timer interrupt (HW5) or requested context switch (SW0)
     * Also call scheduler if we're running the idle thread.
     */
    if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 |
		    INTERRUPT_CAUSE_HARDWARE_5)) ||
        scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) {
      scheduler_schedule();
      tlb_fill(thread_get_current_thread_entry()->pagetable);
    }

/*
        thread_table_t *thread = thread_get_current_thread_entry();
        process_id_t pid = thread->process_id;
        if(pid == -1) {
            * Not a process thread. Use thread id with most significant
              bit flipped as ASID.
               Note: this limits both PROCESS_MAX_PROCESSES and the
               number of kernel work threads to 128 since ASID is one
               byte and the ASID address space is divided into two.
               *
            uint8_t asid = thread_get_current_thread() | 0x8;
            _tlb_set_asid(asid);
          return;
        } else {
            * Use PID as ASID. This ensures that threads within a
               process shares the same ASID
            _tlb_set_asid(pid);
            /
        }
    }*/
}
Esempio n. 23
0
/** Handles an exception (code != 0) that occured in user mode. Will
 * call appropiate handlers for the exception or panic if the
 * exception should not have occured or does not (yet) have a handler.
 * Interrupts are disabled by EXL when this function is called, and
 * must be so when this fucntion returns. Interrupts may be enabled
 * during execution of this function if needed.
 *
 * @param exception The exception code field from the CP0 Cause register
 */
void user_exception_handle(int exception)
{
    thread_table_t *my_entry;

    /* While interrupts are disabled here, they can be enabled when
       handling system calls and certain other exceptions if needed.
       For normal TLB exceptions it is not desirable that context is
       switched before TLB is filled. */
    _interrupt_disable();

    /* Clear EXL to make normal interrupt disable/enable work. */
    _interrupt_clear_EXL();

    /* Save usermode context to user_context for later reference in syscalls */
    my_entry= thread_get_current_thread_entry();
    my_entry->user_context = my_entry->context;

    switch(exception) {
    case EXCEPTION_TLBM:
        tlb_modified_exception();
        break;
    case EXCEPTION_TLBL:
        tlb_load_exception();
        break;
    case EXCEPTION_TLBS:
        tlb_store_exception();
        break;
    case EXCEPTION_ADDRL:
        KERNEL_PANIC("Address Error Load: not handled yet");
        break;
    case EXCEPTION_ADDRS:
        KERNEL_PANIC("Address Error Store: not handled yet");
        break;
    case EXCEPTION_BUSI:
        KERNEL_PANIC("Bus Error Instruction: not handled yet");
        break;
    case EXCEPTION_BUSD:
        KERNEL_PANIC("Bus Error Data: not handled yet");
        break;
    case EXCEPTION_SYSCALL:
        _interrupt_enable();
        syscall_handle(my_entry->user_context);
        _interrupt_disable();
        break;
    case EXCEPTION_BREAK:
        KERNEL_PANIC("Breakpoint: not handled yet");
        break;
    case EXCEPTION_RESVI:
        kprintf("Thread ID: %d\n", thread_get_current_thread());
        KERNEL_PANIC("Reserved instruction: not handled yet");
        break;
    case EXCEPTION_COPROC:
        KERNEL_PANIC("Coprocessor unusable: buggy assembler code?");
        break;
    case EXCEPTION_AOFLOW:
        KERNEL_PANIC("Arithmetic overflow: buggy assembler code?");
        break;
    case EXCEPTION_TRAP:
        KERNEL_PANIC("Trap: this just should not happen");
        break;
    default:
        KERNEL_PANIC("Unknown exception");
    }

    /* Interrupts are disabled by setting EXL after this point. */
    _interrupt_set_EXL();
    _interrupt_enable();

}
Esempio n. 24
0
/** Handles an interrupt (exception code 0). All interrupt handlers
 * that are registered for any of the occured interrupts (hardware
 * 0-5, software 0-1) are called. The scheduler is called if a timer
 * interrupt (hardware 5) or a context switch request (software
 * interrupt 0) occured, or if the currently running thread for the
 * processor is the idle thread.
 *
 * @param cause The Cause register from CP0
 */
void interrupt_handle(virtaddr_t cause) {
    int this_cpu, i;

    if(cause & INTERRUPT_CAUSE_SOFTWARE_0) {
        _interrupt_clear_sw0();
    }

    this_cpu = _interrupt_getcpu();

    /* Exceptions should be handled elsewhere: */
    if((cause  & 0x0000007c) != 0) {
        kprintf("Caught exception, cause %.8x, CPU %i\n", cause, this_cpu);
        KERNEL_PANIC("Exception in interrupt_handle");
    }


    /* Call appropiate interrupt handlers.  Handlers cannot be
     * unregistered, so after the first empty * entry all others are
     * also empty.
     */
    for (i=0; i<CONFIG_MAX_DEVICES; i++) {
        if (interrupt_handlers[i].device == NULL)
            break;

        /* If this handler is registered for any of the interrupts
         * that occured, call it.
         */
        if ((cause & interrupt_handlers[i].irq) != 0)
            interrupt_handlers[i].handler(interrupt_handlers[i].device);
    }


    /* Timer interrupt (HW5) or requested context switch (SW0)
     * Also call scheduler if we're running the idle thread.
     */
    if((cause & (INTERRUPT_CAUSE_SOFTWARE_0 |
                 INTERRUPT_CAUSE_HARDWARE_5)) ||
            scheduler_current_thread[this_cpu] == IDLE_THREAD_TID) {
        scheduler_schedule();

        /* Until we have proper VM we must manually fill
           the TLB with pagetable entries before running code using
           given pagetable. Note that this method limits pagetable
           rows (possible mapping pairs) to 16 and can't be used
           with proper pagetables and VM.

           Note that if you remove this call (which you probably do when
           you implement proper VM), you must manually call _tlb_set_asid
           here. See the implementation of tlb_fill on details how to do that.
        */
        pagetable_t* pagetable = thread_get_current_thread_entry()->pagetable;

        if(pagetable == NULL)
            return;

        /* Check that the pagetable can fit into TLB. This is needed until
           we have proper VM system, because the whole pagetable must fit
           into TLB. */
        KERNEL_ASSERT(pagetable->valid_count <= (_tlb_get_maxindex()+1));

        _tlb_write(pagetable->entries, 0, pagetable->valid_count);

        /* Set ASID field in Co-Processor 0 to match thread ID so that
           only entries with the ASID of the current thread will match in
           the TLB hardware. */
        _tlb_set_asid(pagetable->ASID);
    }
}