error_t vmmngr_initialize(uint32 kernel_pages)
{
	pdirectory* pdir = (pdirectory*)pmmngr_alloc_block();
	if (pdir == 0)
		return ERROR_OCCUR;

	kernel_directory = pdir;
	memset(pdir, 0, sizeof(pdirectory));

	physical_addr phys = 0;		// page directory structure is allocated at the beginning (<1MB) (false)
								// so identity map the first 4MB to be sure we can point to them

	for (uint32 i = 0; i < 1024; i++, phys += 4096)
		if (vmmngr_map_page(pdir, phys, phys, DEFAULT_FLAGS) != ERROR_OK)
			return ERROR_OCCUR;

	phys = 0x100000;
	virtual_addr virt = 0xC0000000;

	for (uint32 i = 0; i < kernel_pages; i++, virt += 4096, phys += 4096)
		if (vmmngr_map_page(pdir, phys, virt, DEFAULT_FLAGS) != ERROR_OK)
			return ERROR_OCCUR;

	if (vmmngr_switch_directory(pdir, (physical_addr)&pdir->entries) != ERROR_OK)
		return ERROR_OCCUR;

	register_interrupt_handler(14, page_fault);
	register_bottom_interrupt_handler(14, page_fault_bottom);

	return ERROR_OK;
}
// TODO: Consider using invalidate page assembly instruction. When mapping a non-null page this is needed to update the hardware cache.
error_t vmmngr_alloc_page_f(virtual_addr base, uint32 flags)
{
	//TODO: cater for memory mapped IO where (in the most simple case) an identity map must be done.
	//TODO: fix this function
	physical_addr addr = base;

	if (vmmngr_is_page_present(base))
		addr = vmmngr_get_phys_addr(base);
	else
	{
		if (base < 0xF0000000)		// memory mapped IO above 3GB
		{
			addr = (physical_addr)pmmngr_alloc_block();
			if (addr == 0)
				return ERROR_OCCUR;
		}

		if (!addr)
		{
			set_last_error(EINVAL, VMEM_BAD_ARGUMENT, EO_VMMNGR);
			return ERROR_OCCUR;
		}
	}

	if (vmmngr_map_page(vmmngr_get_directory(), addr, base, flags) != ERROR_OK)
		return ERROR_OCCUR;

	return ERROR_OK;
}
void page_fault_alloc_page(uint32 area_flags, virtual_addr address)
{
	uint32 flags = page_fault_calculate_present_flags(area_flags);

	if (CHK_BIT(area_flags, MMAP_IDENTITY_MAP))
		vmmngr_map_page(vmmngr_get_directory(), address, address, flags);
	else
		vmmngr_alloc_page_f(address, flags);
}
Exemple #4
0
void start(uint32_t* modulep, void* physbase, void* physfree)
{       
  int i;
    struct smap_t {
                uint64_t base, length;
                uint32_t type;
        }__attribute__((packed)) *smap;
      
  
        // initially Mark all pages used  
        mm_set(freePage,MEM_SIZE); 
 
        while(modulep[0] != 0x9001) modulep += modulep[1]+2;
        for(smap = (struct smap_t*)(modulep+2); smap < (struct smap_t*)((char*)modulep+modulep[1]+2*4); ++smap) {
            if (smap->type == 1 /* memory */ && smap->length != 0) {
                        // Mark the memory available in the free list 
                        pmmngr_init_region(smap->base, smap->length);      
                  }
        }

      /* we need to mark the kernel memory as used kernel starts from 200000 and end at 220000 */ 
      /* so we will mark the memory till 400000 as used however we can keep it till 220000 as well */ 
      pmmngr_uninit_region((uint64_t)0,(uint64_t)0x400000);  // mark all kernel memory as used 
      
      /* we need to map the kernel memory to the page table */
      
      // initialize the page table
      set_page_table(&Page_Table);

     uint64_t *addr = (uint64_t*)physbase; 
    /* we need to map the kernel memory to the page table */ 
    for(addr = (uint64_t*)physbase; addr<=(uint64_t*)physfree; addr++)  
      {
       vmmngr_map_page(addr,(uint64_t *)((uint64_t)addr + KERNEL_VIRTUAL_BASE),&Page_Table);            
      }
   
 
      /* Map the Video memory This will map to single page 80*25*2 < 1 page */
      vmmngr_map_page((uint64_t *)0xb8000,(uint64_t*)VIDEO_VIRTUAL_MEMORY,&Page_Table);  
    //  printf("Video Memory has been marked in virtual address space\n"); 

      /* pass the pml4e value to cr3 register */
      kernel_pml4e = (uint64_t *)ALIGN_DOWN((uint64_t)Page_Table.root);
      write_cr3(&Page_Table);
      init_video();

/********************************* KERNEL CREATION ********************************/
 struct task_struct *pcb0 = (struct task_struct *)kmalloc(sizeof(struct task_struct));  //kernel 
 pcb0->pml4e =(uint64_t)kernel_pml4e;  // kernel's page table   
 pcb0->pid = 0;  // I'm kernel init process  so pid 0  
 pcb0->iswait = 0; //not waiting for any one
 pcb0->stack =(uint64_t *)stack; //my stack is already created by prof :)  
 process_count = 0; //at this point we don't have any other process in ready_queue so go ahead and create one and update this 
 sleeping_process_count = 0; // at this point no body is sleeping 
 // initialize processes queue  
 for(i=0;i<100;i++)  {   
  zombie_queue[i] = 0;    // no process is zombie 
 }
 foreground_process = 3; // process with this pid will be foreground process  
 
  // put them in the ready queue
  ready_queue[0] =(uint64_t ) pcb0;  //kernel 

/*
char fname[] = "bin/hello";
malloc_pcb(fname);

char fname1[] = "bin/world";
malloc_pcb(fname1);

char fname2[] = "bin/proc3";
malloc_pcb(fname2);

char fname3[] = "bin/proc4";
malloc_pcb(fname3);    
*/
/*************************************** Please change here fname ******************/
char fname4[] = "bin/printf";
malloc_pcb(fname4); 

  idt_install();
  __asm__("sti");

  //init_context_switch();    
  asm volatile("mov $0x2b,%ax");
  asm volatile("ltr %ax");
  tarfs_init();    
  while(1);      
}
void page_fault_bottom(thread_exception te)
{
	thread_exception_print(&te);
	uint32& addr = te.data[0];
	uint32& code = te.data[1];

	serial_printf("PAGE_FALUT: PROC: %u ADDRESS: %h, THREAD: %u, CODE: %h\n", process_get_current()->id, addr, thread_get_current()->id, code);

	if (process_get_current()->contract_spinlock == 1)
		PANIC("PAge fault spinlock is already reserved\n");

	spinlock_acquire(&process_get_current()->contract_spinlock);
	vm_area* p_area = vm_contract_find_area(&thread_get_current()->parent->memory_contract, addr);

	if (p_area == 0)
	{
		serial_printf("could not find address %h in memory contract", addr);
		PANIC("");		// terminate thread and process with SIGSEGV
	}

	vm_area area = *p_area;
	spinlock_release(&process_get_current()->contract_spinlock);

	// tried to acccess inaccessible page
	if ((area.flags & MMAP_PROTECTION) == MMAP_NO_ACCESS)
	{
		serial_printf("address: %h is inaccessible\n", addr);
		PANIC("");
	}

	// tried to write to read-only or inaccessible page
	if (page_fault_error_is_write(code) && (area.flags & MMAP_WRITE) != MMAP_WRITE)
	{
		serial_printf("cannot write to address: %h\n", addr);
		PANIC("");
	}

	// tried to read a write-only or inaccesible page ???what???
	/*if (!page_fault_error_is_write(code) && CHK_BIT(area.flags, MMAP_READ))
	{
		serial_printf("cannot read from address: %h", addr);
		PANIC("");
	}*/

	// if the page is present then a violation happened (we do not implement swap out/shared anonymous yet)
	if (page_fault_error_is_page_present(code) == true)
	{
		serial_printf("memory violation at address: %h with code: %h\n", addr, code);
		serial_printf("area flags: %h\n", area.flags);
		PANIC("");
	}

	// here we found out that the page is not present, so we need to allocate it properly
	if (CHK_BIT(area.flags, MMAP_PRIVATE))
	{
		if (CHK_BIT(area.flags, MMAP_ALLOC_IMMEDIATE))
		{
			// loop through all addresses and map them
			for (virtual_addr address = area.start_addr; address < area.end_addr; address += 4096)
				//if (CHK_BIT(area.flags, MMAP_ANONYMOUS))	ALLOC_IMMEDIATE works only for anonymous (imposed in mmap)
				page_fault_alloc_page(area.flags, address);
		}
		else
		{
			if (CHK_BIT(area.flags, MMAP_ANONYMOUS))
				page_fault_alloc_page(area.flags, addr & (~0xFFF));
			else
			{
				uint32 flags = page_fault_calculate_present_flags(area.flags);
				vmmngr_alloc_page_f(addr & (~0xFFF), flags);

				uint32 read_start = area.offset + ((addr - area.start_addr) / PAGE_SIZE) * PAGE_SIZE;		// file read start
				uint32 read_size = PAGE_SIZE;		// we read one page at a time (not the whole area as this may not be necessary)

				//if (read_start < area.start_addr + PAGE_SIZE)	// we are reading the first page so subtract offset from read_size
				//	read_size -= area.offset;

				serial_printf("gfd: %u, reading at mem: %h, phys: %h file: %h, size: %u\n", area.fd, addr & (~0xfff), vmmngr_get_phys_addr(addr & (~0xfff)),
					read_start, read_size);

				gfe* entry = gft_get(area.fd);
				if (entry == 0)
				{
					serial_printf("area.fd = %u", area.fd);
					PANIC("page fault gfd entry = 0");
				}

				// read one page from the file offset given at the 4KB-aligned fault address 
				if (read_file_global(area.fd, read_start, read_size, addr & (~0xFFF), VFS_CAP_READ | VFS_CAP_CACHE) != read_size)
				{
					serial_printf("read fd: %u\n", area.fd);
					PANIC("mmap anonymous file read less bytes than expected");
				}
			}
		}
	}
	else		// MMAP_SHARED
	{
		if (CHK_BIT(area.flags, MMAP_ANONYMOUS))
			PANIC("A shared area cannot be marked as anonymous yet.");
		else
		{
			// in the shared file mapping the address to read is ignored as data are read only to page cache. 
			uint32 read_start = area.offset + ((addr & (~0xfff)) - area.start_addr);
			gfe* entry = gft_get(area.fd);

			if (read_file_global(area.fd, read_start, PAGE_SIZE, -1, VFS_CAP_READ | VFS_CAP_CACHE) != PAGE_SIZE)
				PANIC("mmap shared file failed");

			virtual_addr used_cache = page_cache_get_buffer(area.fd, read_start / PAGE_SIZE);
			//serial_printf("m%h\n", used_cache);

			uint32 flags = page_fault_calculate_present_flags(area.flags);
			vmmngr_map_page(vmmngr_get_directory(), vmmngr_get_phys_addr(used_cache), addr & (~0xfff), flags/*DEFAULT_FLAGS*/);
			//serial_printf("shared mapping fd: %u, cache: %h, phys cache: %h, read: %u, addr: %h\n", area.fd, used_cache, used_cache, read_start, addr);
		}
	}
}