示例#1
0
文件: tlb.c 项目: CSU-GH/okl4_3.0
void flush_tlb_mm(struct mm_struct *mm)
{
	unsigned long shift, base = 0;
#ifdef ARM_PID_RELOC
 	struct vm_area_struct *vma;

 	shift = 25;
	base = ((unsigned long)mm->context.pid << shift);
#else
	shift = TASK_SHIFT;
#endif
#if defined(CONFIG_CELL)
	okl4_unmap_page_size(&mm->context, base, shift);
#elif defined(CONFIG_IGUANA)
	{
		L4_Fpage_t fpage;
		fpage = L4_FpageLog2(base, shift);
		eas_unmap(mm->context.eas, fpage);
	}
#endif
#ifdef ARM_PID_RELOC
	/* Walk through the list of VMAs and flush those
	 * that are outside the PID relocation region
	 */
	vma = mm->mmap;
	while(vma) {
		if (vma->vm_start >= 0x2000000UL)
			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
		vma = vma->vm_next;
	}
#endif
}
示例#2
0
文件: tlb.c 项目: CSU-GH/okl4_3.0
/* Flush a page from the user's address space */
void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
{
	address &= PAGE_MASK;

#ifdef ARM_PID_RELOC
	if (address < 0x2000000) {
		unsigned long offset = (unsigned long)vma->vm_mm->context.pid << 25;

		address += offset;
	}
#endif

	{
#if defined(CONFIG_CELL)
	    okl4_unmap_page(&vma->vm_mm->context, address);
#elif defined(CONFIG_IGUANA)
	    L4_Fpage_t fpage;

	    fpage = L4_FpageLog2(address, PAGE_SHIFT);
	    eas_unmap(vma->vm_mm->context.eas, fpage);
#else
#error
#endif
	}
}
示例#3
0
文件: tlb.c 项目: CSU-GH/okl4_3.0
/* Flush a range of memory from the kernel's virtual address space */
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#if 0
	unsigned long base, count;
	L4_Fpage_t fpage;

	count = 0;
	base = start & PAGE_MASK;

	while (1) {
		fpage = L4_FpageLog2(base, PAGE_SHIFT);

		L4_Set_Rights(&fpage, L4_FullyAccessible);  /* To unmap */
		L4_LoadMR(count++, fpage.raw);

		if (count == __L4_NUM_MRS)
		{
			L4_Unmap(count-1);
			count = 0;
		}

		base += PAGE_SIZE;
		if (base >= end)
		{
			if (count)
				L4_Unmap(count-1);
			return;
		}
	}
#endif
}
示例#4
0
文件: pg_drop.c 项目: ksandstr/mung
static void handle_fault(L4_Word_t faddr, L4_Word_t fip, L4_MapItem_t *map)
{
	struct drop_param *param = get_ctx();
	L4_MsgTag_t tag = muidl_get_tag();
	int rwx = tag.X.label & 0x000f;
#if 0
	L4_ThreadId_t from = muidl_get_sender();
	diag("drop_pager: pf in %lu:%lu at %#lx, ip %#lx",
		L4_ThreadNo(from), L4_Version(from), faddr, fip);
#endif
	param->log_top = (param->log_top + 1) % LOG_SIZE;
	param->log[param->log_top] = L4_FpageLog2(faddr, 12);
	L4_Set_Rights(&param->log[param->log_top], rwx);

	int dpos = param->log_top - param->keep;
	if(dpos < 0) dpos += LOG_SIZE;
	assert(dpos >= 0 && dpos < LOG_SIZE);
	L4_Fpage_t drop = param->log[dpos];
	if(!L4_IsNilFpage(drop)
		&& L4_Address(drop) != (faddr & ~PAGE_MASK))
	{
#if 0
		diag("flushing %#lx:%#lx (dpos %d)",
			L4_Address(drop), L4_Size(drop), dpos);
#endif
		L4_Set_Rights(&drop, L4_FullyAccessible);
		L4_FlushFpage(drop);
	}

	/* pass it on. */
	L4_LoadBR(0, L4_CompleteAddressSpace.raw);
	L4_LoadMR(0, (L4_MsgTag_t){ .X.label = 0xffe0 | rwx,
		.X.u = 2 }.raw);
示例#5
0
文件: swapper.c 项目: gz/aos10
/**
 * Dereferences a page. This just unmaps it in the L4 page table.
 * @param page
 */
static void dereference(page_queue_item* page) {

	if(L4_UnmapFpage(page->tid, L4_FpageLog2(page->virtual_address, PAGESIZE_LOG2)) == FALSE) {
		dprintf(0, "Can't unmap page at 0x%X (error:%d)\n", page->virtual_address, L4_ErrorCode());
	}

	//L4_CacheFlushRange(page->tid, pager_table_lookup(page->tid, page->virtual_address)->address, pager_table_lookup(page->tid, page->virtual_address)->address+PAGESIZE);
}
示例#6
0
文件: swapper.c 项目: gz/aos10
/**
 * Checks if a page has been referenced. This works by querying
 * the L4 pagetable. If the page is mapped in there we say it's
 * referenced.
 * @param page
 * @return 1 If page was referenced, 0 otherwise
 */
static L4_Bool_t is_referenced(page_queue_item* page) {
	L4_Fpage_t fpage = L4_FpageLog2(page->virtual_address, PAGESIZE_LOG2);
	L4_PhysDesc_t phys;

	if(L4_GetStatus(page->tid, &fpage, &phys) == FALSE) {
		dprintf(0, "Can't get status for page 0x%X (error:%d)\n", page->virtual_address, L4_ErrorCode());
	}

	return L4_WasReferenced(fpage);

}
示例#7
0
文件: pager.c 项目: gapry/aos-1
/*
 * This function loads the entire elf file into the phsical frames and
 * maps fpages corresponding to virtual address in elf file to the process
 */
int load_code_segment_virtual(char *elfFile,L4_ThreadId_t new_tid) {
  uint32_t min[2];
  uint32_t max[2];
  elf_getMemoryBounds(elfFile, 0, (uint64_t*)min, (uint64_t*)max);
  //Now we need to reserve memory between min and max
  L4_Word_t lower_address = ((L4_Word_t) min[1] / PAGESIZE) * PAGESIZE; 
  L4_Word_t upper_address = ((L4_Word_t) max[1] / PAGESIZE) * PAGESIZE;
 
  while(lower_address <= upper_address) {
    L4_Word_t frame = frame_alloc();
    if(!frame) {
      //Oops out of frames
      unmap_process(new_tid);
      return -1;
    } else {
      L4_Fpage_t targetpage = L4_FpageLog2(lower_address,12);
      lower_address += PAGESIZE;
      //Now map fpage
      L4_Set_Rights(&targetpage,L4_FullyAccessible);
      L4_PhysDesc_t phys = L4_PhysDesc(frame, L4_DefaultMemory);
      //Map the frame to root task but enter entries in pagetable with tid since we will update the mappings once elf loading is done
      if (L4_MapFpage(L4_Myself(), targetpage, phys) ) {
	page_table[(frame-new_low)/PAGESIZE].tid = new_tid;
	page_table[(frame-new_low)/PAGESIZE].pinned = 1;
	page_table[(frame-new_low)/PAGESIZE].pageNo = targetpage;
      } else {
	unmap_process(new_tid);
      }
    }
  }
  //Now we have mapped the pages, now load elf_file should work with the virtual addresses
  if(elf_loadFile(elfFile,0) == 1) {
      //Elffile was successfully loaded
      //Map the fpages which were previously mapped to Myself to the tid
    for(int i=0;i<numPTE;i++) {
      if(L4_ThreadNo(new_tid) == L4_ThreadNo(page_table[i].tid)) {
	//Now remap the pages which were mapped to root task to the new tid
	L4_UnmapFpage(L4_Myself(),page_table[i].pageNo);
	L4_PhysDesc_t phys = L4_PhysDesc(new_low + i * PAGESIZE, L4_DefaultMemory);
	if(!L4_MapFpage(new_tid, page_table[i].pageNo, phys)) {
	  unmap_process(new_tid);
	  return -1;
	}
      }
    }
  } else {
    unmap_process(new_tid);
  }
  //Remove later
  L4_CacheFlushAll();
  return 0;
}
示例#8
0
文件: pager.c 项目: gapry/aos-1
/*
 * Init function to initialise the page table entries
 */
L4_Word_t
pager_init(L4_Word_t low, L4_Word_t high)
{
  //The frames have been used to set up page table entries as well
    page_table = (sos_PTE*) low;
    // Use a simple algaebric formula to calculate optimum size of page table for the
    // amount of memory available
    //new_low points to the memory to be used now, memory low -> new_low is the pagetable
    new_low = ((double)high*sizeof(sos_PTE)+PAGESIZE*(double)low)/
              (double)(PAGESIZE+sizeof(sos_PTE));
    // align it
    new_low = (new_low/PAGESIZE)*PAGESIZE + PAGESIZE;          
    numPTE = (high-new_low)/PAGESIZE;

    printf("low: %lx new_low: %lx high: %lx numPTE: %d \n", low, new_low, high, numPTE);
    //printf("value of swap memory %p \n",swap_table);
    // initialize the empty page table.
    for (int i = 0; i < numPTE; i++)
    {
        page_table[i].tid = L4_nilthread;
	page_table[i].referenced = 0;
	page_table[i].dirty = 0;
	page_table[i].being_updated = 0;
	page_table[i].error_in_transfer = 0;
        page_table[i].pinned = 0;
    }
    
    for(int i=0;i<MAX_SWAP_ENTRIES;i++) {
        swap_table[i].tid = L4_nilthread;
	swap_table[i].offset = PTE_SENTINEL;
	//Initially all entries are free so each points to the next one in the table
	swap_table[i].next_free = i+1;
    }
    // add a guard page against stack overflows and let it map to 0 
    L4_Word_t guardPage = 0x7000000;
    L4_PhysDesc_t phys = L4_PhysDesc(0, L4_DefaultMemory);
    L4_Fpage_t targetFpage = L4_FpageLog2(guardPage, 12);
    L4_Set_Rights(&targetFpage, L4_Readable);
    if ( !L4_MapFpage(L4_Myself(), targetFpage, phys) ) {
        sos_print_error(L4_ErrorCode());
        printf(" Can't map guard page\n");
    }
    return new_low;
}
示例#9
0
文件: tlb.c 项目: CSU-GH/okl4_3.0
/* Update the user's address space with the new mapping
 * This does not do the L4 map, but the message is loaded
 * and the reply in the syscall loop handles this.
 */
void update_mmu_cache(struct vm_area_struct *vma,
	unsigned long address, pte_t *ptep, pte_t pte)
{
	unsigned long phys;

	if (pte_present(pte))
	{
		unsigned long attrib = L4_DefaultMemory;
		L4_Fpage_t fpage;

		if (unlikely(pte_val(pte) & _PAGE_ATTRIBS)) {
			switch (pte_val(pte) & _PAGE_ATTRIBS) {
			case _PAGE_WRITECOMBINE:
			    attrib = L4_IOCombinedMemory; break;
			case _PAGE_WRITETHROUGH:
			    attrib = L4_WriteThroughMemory; break;
			case _PAGE_NOCACHE:
			default:
			    attrib = L4_UncachedMemory; break;
			}
		}

		phys = pte_pfn(pte) << PAGE_SHIFT;

		fpage = (L4_Fpage_t)(L4_FpageLog2((unsigned long)phys, 
		    PAGE_SHIFT).raw + pte_access(pte));

#ifdef ARM_PID_RELOC
		if (address < 0x2000000)
			address += ((unsigned long)vma->vm_mm->context.pid << 25);
#endif

#if defined(CONFIG_IGUANA)
		l4_map_page(&vma->vm_mm->context, fpage, address, attrib);
#elif defined(CONFIG_CELL)
		okl4_map_page(&vma->vm_mm->context, address, phys, 
			pte_access(pte), attrib);
#endif
		*ptep = pte_mkmapped(pte);
	}
}
示例#10
0
文件: tlb.c 项目: CSU-GH/okl4_3.0
void
tlb_modify(struct mm_struct *mm,
		 unsigned long address, pte_t *ptep)
{
	unsigned long phys;
	unsigned long attrib = L4_DefaultMemory;
	L4_Fpage_t fpage;
	pte_t pte = *ptep;

	phys = pte_pfn(pte) << PAGE_SHIFT;

	if (unlikely(pte_val(pte) & _PAGE_ATTRIBS)) {
		switch(pte_val(pte) & _PAGE_ATTRIBS) {
			case _PAGE_WRITECOMBINE:
				attrib = L4_IOCombinedMemory;
				break;
			case _PAGE_WRITETHROUGH:
				attrib = L4_WriteThroughMemory;
				break;
			case _PAGE_NOCACHE:
			default:
				attrib = L4_UncachedMemory;
				break;
		}
	}

	fpage = (L4_Fpage_t) (L4_FpageLog2 (
				(unsigned long) phys, PAGE_SHIFT).raw + pte_access(pte));

#ifdef ARM_PID_RELOC
	if (address < 0x2000000)
		address += ((unsigned long)mm->context.pid << 25);
#endif

#if defined(CONFIG_IGUANA)
	l4_map_page(&mm->context, fpage, address, attrib);
#elif defined(CONFIG_CELL)
	okl4_map_page(&mm->context, address, phys, pte_access(pte), attrib);
#endif
}
示例#11
0
文件: tlb.c 项目: CSU-GH/okl4_3.0
void remove_tlb_pte(pte_t *ptep, unsigned long address)
{
	if (pte_mapped(*ptep)) {
		struct mm_struct * curr_mm = current->mm;

#ifdef ARM_PID_RELOC
		if (address < 0x2000000)
			address += ((unsigned long)curr_mm->context.pid << 25);
#endif
#if defined(CONFIG_CELL)
		okl4_unmap_page(&curr_mm->context, address);
#elif defined(CONFIG_IGUANA)
		{
			L4_Fpage_t fpage;
			fpage = L4_FpageLog2(address, PAGE_SHIFT);
			eas_unmap(curr_mm->context.eas, fpage);
		}
#else
#error
#endif
		*ptep = pte_mkunmapped(*ptep);
	}
}
示例#12
0
文件: clock.c 项目: gz/aos10
/**
 * Starts the clock driver. This will map the memory region where the
 * registers are in uncached mode, start the time stamp timer register
 * and enable the interrupts for our time stamp timer and the general
 * purpose timer 0.
 *
 * @return CLOCK_R_OK if the timer is started successfully
 * 		   CLOCK_R_FAIL if the memory region could not be mapped
 */
int start_timer(void) {
	assert(!driver_initialized);

	// initialize variables
	timestamp_irq_tid =  L4_GlobalId(NSLU2_TIMESTAMP_IRQ, 1);
	timer0_irq_tid = L4_GlobalId(NSLU2_TIMER0_IRQ, 1);
	registers_fpage = L4_FpageLog2(NSLU2_OSTS_PHYS_BASE, 12);

	// Set up uncached memory mapping for registers
	L4_Set_Rights(&registers_fpage, L4_FullyAccessible);
	L4_PhysDesc_t phys = L4_PhysDesc(NSLU2_OSTS_PHYS_BASE, L4_UncachedMemory);

	if(L4_MapFpage(L4_Pager(), registers_fpage, phys)) {

		// enable timer0 interrupts
		TIMER0_ONE_SHOT(0);
		TIMER0_STOP();
		(*(L4_Word_t*)OST_STATUS) |= (0x1 << 0);
		int res = L4_AssociateInterrupt(timer0_irq_tid, root_thread_g);
		assert(res);

		// start timestamp timer
		*((L4_Word_t*)OST_TS) = 0x00000000; // reset counter

		// enable timestamp interrupts
		(*(L4_Word_t*)OST_STATUS) |= (0x1 << 2);
		res = L4_AssociateInterrupt(timestamp_irq_tid, root_thread_g);
		assert(res);

		driver_initialized = TRUE;

		return CLOCK_R_OK;
	}
	else {
		return CLOCK_R_FAIL;
	}
}
示例#13
0
文件: pager.c 项目: gapry/aos-1
/*
 * Function invoked by roottask on pagefault
 */
int
pager(L4_ThreadId_t tid, L4_Msg_t *msgP)
{
    send = 1;
    // Get the faulting address
    L4_Word_t addr = L4_MsgWord(msgP, 0);
    L4_Word_t physicalAddress = 0;
    L4_Word_t permission = 0;
    L4_MsgTag_t tag;
    // Alignment
    addr = (addr / PAGESIZE)*PAGESIZE;
    tag = L4_MsgMsgTag(msgP);
    L4_Word_t access_type = L4_Label(tag) & 0x07;

    //printf("pager invoked addr=%lx by %lx %lx for access 0x%lx\n", addr,L4_ThreadNo(tid),tid.raw,access_type);

    // Construct fpage IPC message
    L4_Fpage_t targetFpage = L4_FpageLog2(addr, 12);
    
    if(VIRTUAL(addr)) 
    {
      if(addr >= BASE_CODE_SEGMENT_ADDRESS) {
	//Code segment
	int inPage = isInPage(tid,targetFpage);
	if(inPage == -1) {
	  //It should be in page table so this should not happen
	  printf("Panic !!! Cannot load the code segment");
	} else {
	  physicalAddress = new_low + inPage*PAGESIZE;
	  permission = L4_FullyAccessible;
	}
      } else {
	//Heap and stack
    	int inPage = isInPage(tid, targetFpage);
    	if (inPage == -1)
    	{
	  //We need to check if the page is in swap
	    inPage = isInSwap(tid,targetFpage);
	    mapAddress(tid, targetFpage,inPage);
	    //We dont need to map any addresses here as mapAddresses maps the addresses
	    return send;
    	} else {
    	    physicalAddress = new_low+inPage*PAGESIZE;
	    targetFpage = page_table[inPage].pageNo;
	    page_table[inPage].referenced = 1;
	    if(access_type & L4_Writable) {
	      //We now need to set the dirty bit and provide read write access
	      page_table[inPage].dirty = 1;
	      permission = L4_ReadWriteOnly;
	    } else {
	      permission = L4_Readable;
	    }
    	}
    	
      }
    } else {
        // we need to map physical addresses 1:1
        physicalAddress = addr;
        if(addr < new_low) {
	        // This is beyond the low memory range ie the page table
	        // and some other addresses which is below the low range
	        permission = L4_FullyAccessible;
        } else {
	        // This would be the code segment between the new_low and high
	        permission = L4_Readable;
        }
    } 
    
    L4_Set_Rights(&targetFpage,permission);
    L4_PhysDesc_t phys = L4_PhysDesc(physicalAddress, L4_DefaultMemory);

    if ( !L4_MapFpage(tid, targetFpage, phys) ) {
        sos_print_error(L4_ErrorCode());
        printf(" Can't map page at %lx\n", addr);
    }
    return send;
}