Beispiel #1
0
/*
 * Unmaps all the page table entries and pages and frees the frames for a tid
 */
void unmap_process(L4_ThreadId_t tid_killed) {
  //Clear page table
  for(int i=0;i<numPTE;i++) {
    if(L4_ThreadNo(page_table[i].tid) == L4_ThreadNo(tid_killed)) {
	L4_UnmapFpage(page_table[i].tid,page_table[i].pageNo);      
	frame_free(new_low + i * PAGESIZE);
        page_table[i].tid = L4_nilthread;
	page_table[i].referenced = 0;
	page_table[i].dirty = 0;
	page_table[i].being_updated = 0;
	page_table[i].error_in_transfer = 0;
        page_table[i].pinned = 0;
    }
  }
  
  //Clear swap table
  for(int i=0;i<MAX_SWAP_ENTRIES;i++) {
    if(L4_ThreadNo(swap_table[i].tid) == L4_ThreadNo(tid_killed)) {
        swap_table[i].tid = L4_nilthread;
	swap_table[i].next_free = head_free_swap;
	head_free_swap = i;
    }
  }
  //Remove later not required
  L4_CacheFlushAll();
}
Beispiel #2
0
/*
 * This function loads the entire elf file into the phsical frames and
 * maps fpages corresponding to virtual address in elf file to the process
 */
int load_code_segment_virtual(char *elfFile,L4_ThreadId_t new_tid) {
  uint32_t min[2];
  uint32_t max[2];
  elf_getMemoryBounds(elfFile, 0, (uint64_t*)min, (uint64_t*)max);
  //Now we need to reserve memory between min and max
  L4_Word_t lower_address = ((L4_Word_t) min[1] / PAGESIZE) * PAGESIZE; 
  L4_Word_t upper_address = ((L4_Word_t) max[1] / PAGESIZE) * PAGESIZE;
 
  while(lower_address <= upper_address) {
    L4_Word_t frame = frame_alloc();
    if(!frame) {
      //Oops out of frames
      unmap_process(new_tid);
      return -1;
    } else {
      L4_Fpage_t targetpage = L4_FpageLog2(lower_address,12);
      lower_address += PAGESIZE;
      //Now map fpage
      L4_Set_Rights(&targetpage,L4_FullyAccessible);
      L4_PhysDesc_t phys = L4_PhysDesc(frame, L4_DefaultMemory);
      //Map the frame to root task but enter entries in pagetable with tid since we will update the mappings once elf loading is done
      if (L4_MapFpage(L4_Myself(), targetpage, phys) ) {
	page_table[(frame-new_low)/PAGESIZE].tid = new_tid;
	page_table[(frame-new_low)/PAGESIZE].pinned = 1;
	page_table[(frame-new_low)/PAGESIZE].pageNo = targetpage;
      } else {
	unmap_process(new_tid);
      }
    }
  }
  //Now we have mapped the pages, now load elf_file should work with the virtual addresses
  if(elf_loadFile(elfFile,0) == 1) {
      //Elffile was successfully loaded
      //Map the fpages which were previously mapped to Myself to the tid
    for(int i=0;i<numPTE;i++) {
      if(L4_ThreadNo(new_tid) == L4_ThreadNo(page_table[i].tid)) {
	//Now remap the pages which were mapped to root task to the new tid
	L4_UnmapFpage(L4_Myself(),page_table[i].pageNo);
	L4_PhysDesc_t phys = L4_PhysDesc(new_low + i * PAGESIZE, L4_DefaultMemory);
	if(!L4_MapFpage(new_tid, page_table[i].pageNo, phys)) {
	  unmap_process(new_tid);
	  return -1;
	}
      }
    }
  } else {
    unmap_process(new_tid);
  }
  //Remove later
  L4_CacheFlushAll();
  return 0;
}
Beispiel #3
0
int
main(void)
{
    int r;

    DEBUG_PRINT(banner);
#if defined(ARCH_ARM) && ARCH_VER <= 5
    /* Place iguana in L4 ARM vspace #1 */
    (void)L4_SpaceControl(IGUANA_SPACE, L4_SpaceCtrl_resources, IGUANA_CLIST,
                          L4_Nilpage, (1 << 16), NULL);
    /* Cache Flush */
    (void)L4_CacheFlushAll();
#endif
    INIT_PRINT("Processing Boot Info: %p\n", __okl4_bootinfo);

    utcb_init();

    mutex_init();
    space_init();
    pd_init();
    objtable_init();
    thread_init();

    r = bi_execute(__okl4_bootinfo);
    if (r != 0) {
#if defined(IGUANA_DEBUG)
        L4_KDB_Enter("PANIC: Bootinfo did not initialise correctly");
#endif
        while (1);
    }

    extensions_init();

    /*
     * TODO: We could reclaim memory here is we need to 
     */

    /* Now that we are ready to roll, lets start the server */
    INIT_PRINT("iguana_server\n");

    iguana_server_loop();

    assert(!"Should never reach here");

    return 0;
}
Beispiel #4
0
/*
 * Callback function to nfs_read from swapfile
 * Always replies to the thread
 */
void pager_read_callback(uintptr_t token,int status, fattr_t *attr, int bytes_read,char *data) {
  struct page_token *token_val = (struct page_token *) token;
  int pagetableIndex = token_val -> pageIndex;
  int byte_index = token_val -> chunk_index;
  int swapIndex = token_val -> swapIndex;

  if(status != 0) {
    page_table[pagetableIndex].error_in_transfer = 1;
  } else {
    //Copy the data read to memory
    char *memstart = (char *) (new_low + pagetableIndex * PAGESIZE + byte_index*NFS_READ_SIZE); 
    memcpy(memstart,data,NFS_READ_SIZE);
  }
    
  page_table[pagetableIndex].read_bytes_transferred += NFS_READ_SIZE;

  //Check if all the callbacks have been received
  if(page_table[pagetableIndex].read_bytes_transferred == PAGESIZE) {
    if(page_table[pagetableIndex].error_in_transfer == 1) {
      //The memory in pagetable is inconsistent so the best thing would be to mark the 
      //page table entry as unreferenced and hopefully its evicted soon
      //If this occurs for a free frame its best to free the frame
      //This condition is not required we would always want to free the frame(think and remove)
      if(!token_val -> writeToSwapIssued) {
        frame_free(new_low + pagetableIndex * PAGESIZE);
      }
      //Unmap the page table page whose memory we corrupted
      L4_UnmapFpage(page_table[pagetableIndex].tid,page_table[pagetableIndex].pageNo);

      page_table[pagetableIndex].tid = L4_nilthread;
      page_table[pagetableIndex].referenced = 0;
      page_table[pagetableIndex].dirty = 0;

    } else {
      //Free up the swap entry from which we read in
      swap_table[swapIndex].tid = L4_nilthread;
      swap_table[swapIndex].next_free = head_free_swap;
      head_free_swap = swapIndex;
      //Update page table
      page_table[pagetableIndex].tid = token_val -> destination_tid;
      page_table[pagetableIndex].pageNo = token_val -> destination_page;
      page_table[pagetableIndex].referenced = 1;
      page_table[pagetableIndex].dirty = 0;
      //Unmap the page which was written out
      if(token_val -> writeToSwapIssued) {
        L4_UnmapFpage(token_val -> source_tid,token_val -> source_page);
      }
      L4_Set_Rights(&(token_val -> destination_page),L4_Readable);  
      L4_PhysDesc_t phys = L4_PhysDesc(new_low + pagetableIndex * PAGESIZE, L4_DefaultMemory);
      L4_MapFpage(token_val -> destination_tid, token_val -> destination_page, phys);
      L4_CacheFlushAll();
      //Everything went fine
    }
    L4_Msg_t msg;
    L4_MsgClear(&msg);
    L4_MsgLoad(&msg);
    L4_Reply(token_val -> destination_tid);
    //Update the process table size
    update_process_table_size(token_val -> destination_tid,1);
    page_table[pagetableIndex].being_updated = page_table[pagetableIndex].error_in_transfer = 0;
  }
  free(token_val);
}
Beispiel #5
0
/* XXX Note, this fuction does not currently handle faults from
 * vmalloc/vmaped'd memory. That should probably be in a separate
 * function anyway.
 */
int
l4_do_page_fault(unsigned long address, long access, struct pt_regs *regs)
{
	struct vm_area_struct * vma;
	struct mm_struct *mm = current->mm;
	int fault, si_code = SEGV_MAPERR;
	siginfo_t info;

	/* If we're in an interrupt context, or have no user context,
	   we must not take the fault.  */
	if (!mm) /* || in_interrupt()) */
		goto bad_area_nosemaphore;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
	if (!vma)
		goto bad_area;
	if (vma->vm_start <= address)
		goto good_area;
	if (!(vma->vm_flags & VM_GROWSDOWN))
		goto bad_area;
	if (expand_stack(vma, address))
		goto bad_area;

	/* Ok, we have a good vm_area for this memory access, so
	   we can handle it.  */
 good_area:
	si_code = SEGV_ACCERR;
	if (/* LOAD */ access & 0x4) {
		/* Allow reads even for write/execute-only mappings */
		if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
			goto bad_area;
	} else if (/* FETCH */ access & 0x1) {
		if (!(vma->vm_flags & VM_EXEC))
			goto bad_area;
	} else {
		if (!(vma->vm_flags & VM_WRITE))
			goto bad_area;
	}

 survive:
	/* If for any reason at all we couldn't handle the fault,
	   make sure we exit gracefully rather than endlessly redo
	   the fault.  */

	fault = handle_mm_fault(mm, vma, address, access & 0x2);
	up_read(&mm->mmap_sem);

	switch (fault) {
		case VM_FAULT_MINOR:
			current->min_flt++;
			break;
 		case VM_FAULT_MAJOR:
			current->maj_flt++;
			break;
		case VM_FAULT_SIGBUS:
			goto do_sigbus;
		case VM_FAULT_OOM:
			goto out_of_memory;
#if 0
	/*
	 * Well, it's a good idea to have this here, but apparently
	 * handle_mm_fault() can return all sorts of weird stuff, which
	 * makes it unsuitable to put BUG() here. 	-gl
	 */
	      default:
		BUG();
#endif
	}
	return 0;

	/* Something tried to access memory that isn't in our memory map.
	   Fix it, but check if it's kernel or user first.  */
bad_area:
	up_read(&mm->mmap_sem);
	/* Check if it is at TASK_SIG_BASE */
#ifdef CONFIG_ARCH_ARM
	/*
	 * Binary patching for NPTL
	 *
	 * XXX ??? Better place this thing?
	 */
	if (user_mode(regs) && ((address & PAGE_MASK) == 0xffff0000)) {
#if 0
		printk("Fault at address 0x%lx pc = 0x%lx, "
		    "need rewrite\n", address, L4_MsgWord(&current_regs()->msg, 1));
#endif
		if (address == 0xffff0fe0) {
			L4_Msg_t msg;
			unsigned long pc = L4_MsgWord(&current_regs()->msg, 1);
			unsigned long lr, fpc;
			unsigned long instr, r;
			long offs;

			if (pc != 0xffff0fe0)
				goto bad_area_nosemaphore;

			L4_Copy_regs_to_mrs(task_thread_info(current)->user_tid);
			L4_StoreMRs(0, 16, &msg.msg[0]);
			lr = msg.msg[14];
			fpc = lr - 4;

			L4_CacheFlushAll();
			instr = get_instr(fpc);
			if (instr == -1UL) 
				goto bad_area_nosemaphore;

			if ((instr & 0x0f000000) == 0x0b000000) {
				offs = instr << 8;
				offs = offs >> 6;	/* ASR */

				fpc = (fpc + 8) + offs;
				instr = get_instr(fpc);
				if (instr == -1UL)
					goto bad_area_nosemaphore;

				if ((instr & 0xffffffff) == 0xe3e00a0f) {
					/* mvn r0, 0xf000 */

					/*
					 * Rewrite to load the 
					 * kernel_reserved[0] from the
				 	 * utcb.
					 *
					 * This requires L4 to cooperate
					 * with the ExReg() syscall.
					 */
					/* mov r0, #0xff000000 */
					r = set_instr(fpc, 0xe3a004ff);
					if (r == -1UL)
						goto bad_area_nosemaphore;
					fpc += 4;

					/* ldr r0, [r0, #0xff0] */
					r = set_instr(fpc, 0xe5900ff0);
					if (r == -1UL)
						goto bad_area_nosemaphore;
					fpc += 4;

					/* ldr r0, [r0, #56] */
					r = set_instr(fpc, 0xe5900038);
					if (r == -1UL)
						goto bad_area_nosemaphore;
					fpc += 4;

					/* mov pc, lr */
					r = set_instr(fpc, 0xe1a0f00e);
					if (r == -1UL) 
						goto bad_area_nosemaphore;
					L4_CacheFlushAll();

					msg.msg[0] = current_thread_info()->tp_value;
					msg.msg[15] = lr;
					L4_LoadMRs(0, 16, &msg.msg[0]);
					L4_Copy_mrs_to_regs(
					    task_thread_info(current)->user_tid);
					L4_MsgPutWord(&current_regs()->msg, 1,
					    lr);
					return 0;
				}
			} else if (instr == 0xe240f01f) {