Beispiel #1
0
void vm_bootstrap() {
	
	swap_initialized = 0;

	paddr_t ram_hi;
	paddr_t ram_low;

	ram_getsize(&ram_low, &ram_hi);	// get the current size of ram

	int ram_pages = (ram_hi-ram_low)/PAGE_SIZE;	// find out how many pages of ram we have

	int cm_bytes = ram_pages*sizeof(struct cm_entry);	// how many bytes our coremap will need to be

	while (cm_bytes % PAGE_SIZE != 0)	// page align the value
		cm_bytes++;

	int cm_pages = cm_bytes/PAGE_SIZE;	// number of pages we need to steal for the coremap

	ram_stealmem(cm_pages);

	paddr_t ram_start = ram_low + (cm_pages*PAGE_SIZE);	// ram will then start at address right after coremap

	ram_pages -= cm_pages;		// don't want the coremap to map to itself

	coremap_init(ram_pages,ram_start,ram_low);	// initialize the coremap

	vm_has_bootstrapped = 1;	// vm has finished initializing
}
Beispiel #2
0
void vm_bootstrap()
{
	paddr_t firstaddr, lastaddr,freeaddr;
	ram_getsize(&firstaddr, &lastaddr);
	int total_page_num = ((lastaddr - firstaddr)  / PAGE_SIZE) ;
	if(total_page_num >= 192 && total_page_num<445)
		total_page_num-=1;
	else if(total_page_num >= 445 && total_page_num<950)
		total_page_num-=3;
	else if(total_page_num >= 950)
	        total_page_num-=6;
	coremap_list = (struct coremap*)PADDR_TO_KVADDR(firstaddr);
	struct coremap *head = coremap_list ;

	vm_initialized = 0 ;

	int page_num = 0 ;
	for (page_num = 1 ; page_num < total_page_num  ; page_num++ )
	{
		freeaddr = firstaddr + page_num * sizeof(struct coremap);
		coremap_list->next = (struct coremap*)PADDR_TO_KVADDR(freeaddr);
		coremap_list->status = 0x6 ;
		coremap_list->timestamp = 0 ;
		coremap_list->as = NULL ;
		coremap_list->swap = 0;
		coremap_list = coremap_list->next ;
	}
	coremap_list->next = NULL;
	coremap_list->status = 0x10 ;
	coremap_list->timestamp = 0 ;
	coremap_list=head;


	freeaddr = firstaddr + page_num * sizeof(struct coremap);
	paddr_t page_start = freeaddr & 0xfffff000 ;
	page_start = page_start + 0x1000 ;


	while(coremap_list->next != NULL)
	{
		coremap_list->pa = page_start ;
		coremap_list->va = PADDR_TO_KVADDR(page_start);
		page_start = page_start + 0x1000 ;
		coremap_list = coremap_list->next ;
	}
	coremap_list->next=NULL;

	coremap_list=head;
	vm_initialized = 1 ;
	//file_lock=lock_create("FileLock");
	coremap_lock = lock_create("CoremapLock");
}
Beispiel #3
0
void
coremap_init()
{
	paddr_t lo,hi;
	ram_getsize(&lo, &hi);
	num_frames = (hi-lo) / PAGE_SIZE; 

	/*
	 * find out the block of memory required for the coremap
	 * rounded to the next page size
	 */
	int coremap_size = num_frames * sizeof(struct coremap_entry);
	int coremap_end_size = coremap_size / PAGE_SIZE + 1;
	coremap_ptr = (struct coremap_entry*) PADDR_TO_KVADDR(ram_stealmem(coremap_end_size));

	// finalize the memory pool
	ram_getsize(&lo, &hi);
	base = lo;
	coremap_size = num_frames * sizeof(struct coremap_entry);
	num_frames = (hi - lo) / PAGE_SIZE;

	bzero(coremap_ptr, coremap_size);
}
//----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
void
vm_bootstrap(void)
{
	/* Do nothing. */

	//kprintf("In vm_bootstrap: Done_vm_bootstrap = %d\n",done_vm_bootstrap);
	
	paddr_t firstpaddress, lastpaddress,freepaddress;
	
	ram_getsize(&firstpaddress, &lastpaddress);	//ram_getsize in ram.c
	
	//lock_alloc_kpages = sem_create("lock_alloc_kpages",1);	//lock for now

	//kprintf("before adjusting firstpaddress is %d\n",firstpaddress);
	
	while((firstpaddress % 4096) != 0)
	{
		firstpaddress++;
	}
	//kprintf("adjusting firstpaddress is %d\n",firstpaddress);	
		
	total_pages = (lastpaddress - firstpaddress) / (PAGE_SIZE);
	//kprintf("Total number of free pages in physical memory: %d\n",total_pages);
			
	//initialzing coremap entries			
	int  i = 0;
	for(i = 0;  i < total_pages; i++)
	{
		//putting in coremap entry details BUT DIDNT SET VIRTUAL ADDRESS YET!!!
		my_coremap[i].physical_address =firstpaddress + i*PAGE_SIZE;
		my_coremap[i].virtual_address =PADDR_TO_KVADDR(firstpaddress + i*PAGE_SIZE);
		my_coremap[i].state = FREE;
		my_coremap[i].num_continuous_pages = 0;
	}
	
	//Printing out coremap info
	for (i = 0; i < total_pages; i++)	//set to 51 for now as too many entries if RAM big
	{
		
		//kprintf("page entry: %d  physical adress: %u virtual address: %u state: %d\n", i, my_coremap[i].physical_address, my_coremap[i].virtual_address, my_coremap[i].state);
		
	}
	done_vm_bootstrap = 1;
	//kprintf("In vm_bootstrap: Done_vm_bootstrap = %d\n",done_vm_bootstrap);
	
	//splx(spl);//not in code
	return;
		
}
void vm_bootstrap() {

	// get the number of free pages in ram
	paddr_t last_paddr = ram_getsize();
	first_paddr = ram_getfirstfree();

	// calculate the total page_count and coremap size
	page_count = (last_paddr - first_paddr) / PAGE_SIZE; // discarding the last addresses, if any
	size_t coremap_size = (sizeof(struct core_map_entry) * page_count);

	// can't call ram_stealmem() after calling ram_getfirstfree,
	// so steal memory for coremap here and update first_paddr
	if (first_paddr + coremap_size > last_paddr) {
		panic("Unable to allocate space for coremap");
	}

	coremap = (struct core_map_entry*) PADDR_TO_KVADDR(first_paddr);
	first_paddr += coremap_size;

	// align the pages, the first page should start with an address which is a multiple of PAGE_SIZE
	if (first_paddr % PAGE_SIZE != 0) {
		first_paddr += PAGE_SIZE - (first_paddr % PAGE_SIZE);
	} else {
		//kprintf("CoreMap Size: %u bytes = %u pages\n", coremap_size,
		//coremap_size / PAGE_SIZE);
	}

	// update the page count, may reduce due to space allocation for coremap
	page_count = (last_paddr - first_paddr) / PAGE_SIZE;

	coremap_pages_free = page_count;

	// initialize the coremap
	unsigned i;
	for (i = 0; i < page_count; i++) {

		// update the state
		cm_setEntryUseState(COREMAP(i), false);
		cm_setEntryDirtyState(COREMAP(i), false);

		// let the address space identifier be NULL initially
		cm_setEntryAddrspaceIdent(COREMAP(i), NULL);

		// initial chunk start need not be initialized, will be updated when page is allocated
	}

}
Beispiel #6
0
void
vm_bootstrap(void)
{
	spinlock_init(&coremap_lock);

	paddr_t fpaddr, lpaddr;
	uint32_t coremap_size;

	lpaddr = ram_getsize();	
	fpaddr = ram_getfirstfree();	

	no_of_physical_pages = (lpaddr-fpaddr) / PAGE_SIZE; // We do not consider the memory stolen by kernel during boot.
														// Should we ?
	
	coremap_size = no_of_physical_pages * sizeof(struct coremap_entry);
	coremap_size = ROUNDUP(coremap_size, PAGE_SIZE);
	KASSERT((coremap_size & PAGE_FRAME) == coremap_size);

	coremap = (struct coremap_entry *)PADDR_TO_KVADDR(fpaddr); // Placing the coremap at first available physical address.

	fpaddr = fpaddr + coremap_size; // Moving my fpaddr to accomadate the coremap.

	no_of_coremap_entries = (lpaddr - fpaddr) / PAGE_SIZE; // Absurd to store pages containing coremap in coremap.

	free_page_start = fpaddr / PAGE_SIZE; // First free page. This page maps to 0th entry of coremap.

	for (int i=0; i<no_of_coremap_entries;i++){

		coremap[i].state = FREE;
		coremap[i].last_page = -1;
		coremap[i].as = NULL;
		coremap[i].cpu = -1;
		coremap[i].pinned = 0;
		coremap[i].page = NULL;
		coremap[i].accessed = 0;


	}

	page_buffer_lock = lock_create("page_buffer_lock");
	clock_hand = 0;

}
Beispiel #7
0
void
vm_bootstrap(void)
{
	int page_num, coremap_size;
	paddr_t coremapaddr, temp;

	ram_getsize(&firstaddr, &lastaddr);

	page_num = (lastaddr-firstaddr) / PAGE_SIZE;

	freeaddr = firstaddr + page_num * sizeof(struct page);
	freeaddr = ROUNDUP(freeaddr, PAGE_SIZE);// added for pr->nfree error

	coremap = (struct page*)PADDR_TO_KVADDR(firstaddr);
	coremapaddr = freeaddr - firstaddr;
	coremap_size = ROUNDUP(coremapaddr, PAGE_SIZE)/PAGE_SIZE;

	pages_in_coremap=page_num;

	for (int i=0;i<page_num;i++){

		if(i<coremap_size){

			coremap[i].state = 1;

		}else{
			coremap[i].state = 0;
			coremap[i].contained=false;
		}

		temp = PAGE_SIZE * i + freeaddr;
		coremap[i].pa = temp;
		coremap[i].va = PADDR_TO_KVADDR(temp);

	}

	beforeVM = false;
}
Beispiel #8
0
/*
* Logic is to find the first free physical address from where we can start to initialize our coremap.
* ram_getsize() returns total ram size, and ram_getfirstfree() returns first free physical address
* We make sure that the coremap will reserve the memory that it is present in, to any process.
* "freeAddr" variable gives us the actual physical address from which coremap will start to manage the memory.
* Memory managed by coremap = [freeAddr, lastpaddr]
**/
void
vm_bootstrap(void) {
  int i;
	paddr_t freeAddr, temp;
	int coremap_size;

  // Get the total size of the RAM
  lastpaddr = ram_getsize();
  firstpaddr= ram_getfirstfree();   // Get first free address on RAM

  // Number of pages that can be used to allocate
  coremap_page_num = (lastpaddr - firstpaddr) / PAGE_SIZE;

  // Calculate and set the first free address after coremap is allocated
  freeAddr = firstpaddr + coremap_page_num * sizeof(struct coremap_entry);
	freeAddr = ROUNDUP(freeAddr, PAGE_SIZE);

  // Allocate memory to coremap
	coremap  = (struct coremap_entry *)PADDR_TO_KVADDR(firstpaddr);
	coremap_size = ROUNDUP( (freeAddr - firstpaddr),PAGE_SIZE) / PAGE_SIZE;

  // Initiliase each page status in coremap
	for(i =0 ; i < coremap_page_num; i++ ) {
		if (i < coremap_size) {
			coremap[i].state = DIRTY;
		} else {
			coremap[i].state = CLEAN;
		}
		temp = firstpaddr + (PAGE_SIZE * i);
    coremap[i].phyAddr= temp;
    coremap[i].allocPageCount = -1;
    coremap[i].va = PADDR_TO_KVADDR(temp);
	}
  // Set coremap used size to 0
  coremap_used_size = 0;
}
Beispiel #9
0
void
coremap_bootstrap(void)
{
	uint32_t i;
	paddr_t first, last;
	uint32_t npages, coremapsize;

	ram_getsize(&first, &last);

	/* The way ram.c works, these should be page-aligned */
	KASSERT((first & PAGE_FRAME) == first);
	KASSERT((last & PAGE_FRAME) == last);

	npages = (last - first) / PAGE_SIZE;

	DEBUG(DB_VM, "coremap: first: 0x%x, last 0x%x: %u pages\n",
	      first, last, npages);

	/*
	 * The coremap contains one coremap_entry per page.  Because
	 * of the allocation constraints here, it must be rounded up
	 * to a whole number of pages.
	 * 
	 * Note that while we don't need space for coremap entries for
	 * the coremap pages, and could save a few slots that way, the
	 * operating regime of OS/161 is such that we're unlikely to
	 * need more than one page for the coremap, and certainly not
	 * more than two. So for simplicity (and robustness) we'll
	 * avoid the relaxation computations necessary to optimize the
	 * coremap size.
	 */
	coremapsize = npages * sizeof(struct coremap_entry);
	coremapsize = ROUNDUP(coremapsize, PAGE_SIZE);
	KASSERT((coremapsize & PAGE_FRAME) == coremapsize);

	/*
	 * Steal pages for the coremap.
	 */
	coremap = (struct coremap_entry *) PADDR_TO_KVADDR(first);
	first += coremapsize;

	if (first >= last) {
		/* This cannot happen unless coremap_entry gets really huge */
		panic("vm: coremap took up all of physical memory?\n");
	}

	/*
	 * Now, set things up to reflect the range of memory we're
	 * managing. Note that we skip the pages the coremap is using.
	 */
	base_coremap_page = first / PAGE_SIZE;
	num_coremap_entries = (last / PAGE_SIZE) - base_coremap_page;
	num_coremap_kernel = 0;
	num_coremap_user = 0;
	num_coremap_free = num_coremap_entries;

	KASSERT(num_coremap_entries + (coremapsize/PAGE_SIZE) == npages);

	/*
	 * Initialize the coremap entries.
	 */
	for (i=0; i < num_coremap_entries; i++) {
		coremap[i].cm_kernel = 0;
		coremap[i].cm_notlast = 0;
		coremap[i].cm_allocated = 0;
		coremap[i].cm_pinned = 0;
		coremap[i].cm_tlbix = -1;
		coremap[i].cm_cpunum = 0;
		coremap[i].cm_lpage = NULL;
	}

	coremap_pinchan = wchan_create("vmpin");
	coremap_shootchan = wchan_create("tlbshoot");
	if (coremap_pinchan == NULL || coremap_shootchan == NULL) {
		panic("Failed allocating coremap wchans\n");
	}
}	
Beispiel #10
0
void vm_bootstrap(){
    
    core_lock = lock_create("coremap-lock");
    tlb.tlb_lock = lock_create("tlb-lock");
    tlb.next_victim = 0;
    
    struct coremap *entry; //at the end should point to the same memory as pagetable in pt.h
    coremap_size =0;
    //allocating memory
    
    paddr_t firstaddr,lastaddr,freeaddr;
    ram_getsize(&firstaddr,&lastaddr);
    
    coremap_size = lastaddr/PAGE_SIZE; //addr alignment, nOTE: This rounds out to a whole number
    
    coremap = (struct coremap *)PADDR_TO_KVADDR(firstaddr); //sets the page array
    
    if(coremap== NULL){
        panic("Can't create page table, no mem\n");
    }
    
    freeaddr = firstaddr + coremap_size * sizeof(struct coremap);
    //kprintf("freeaddr\n");
    if(lastaddr-freeaddr <= 0){
        
        panic("OUT OF MEMORYn");
    }
    // freee addr to lastaddr is the systems main memory
    //the actual init
    
    struct coremap * p = (struct coremap *) PADDR_TO_KVADDR((paddr_t)coremap);
    
    entry = coremap;
    int i;
    for(i =0;i<coremap_size;i++){
        
        if(i<((freeaddr-firstaddr)/PAGE_SIZE)){
            
            coremap->used = 1;
            coremap->flag = 0x1;//fixed
            
        }
        else{
            coremap->used =0;
            coremap->flag =0;
        }
        coremap->pid = -1;
        coremap->paddr = firstaddr+(i*PAGE_SIZE);
        assert((coremap->paddr & PAGE_FRAME) == coremap->paddr); //checks if the paddr is in the frame
        coremap->len = -1;
        
        coremap++;
        
        
    }
    
    coremap = entry;
    
    pt_initialize =1;
    
    //kprintf("VM BOOTSTRAP COMPLETE: page size: %d, coremap size:%d\n",PAGE_SIZE,coremap_size);
    
}
Beispiel #11
0
/* Initialization function */
void vm_bootstrap() 
{
	
	/* Get the firstaddr and lastaddr of physical memory.
	It will most definitely be less than actual memory; becuase
	before the VM bootstraps we have to use getppages (which in turn
	calls stealmem).
	*/
	paddr_t firstaddr, lastaddr;
	ram_getsize(&firstaddr,&lastaddr);
	/* The number of pages (core map entries) will be the size of 
	physical memory (lastaddr *should* not change) divided by PAGE_SIZE
	*/
	page_count = lastaddr / PAGE_SIZE;
	/* Allocate space for the coremap *without* using kmalloc. This 
	solves the chicken-and-egg problem. Simply set the core_map pointer
	to point to the first available address of memory as of this point;
	then increment freeaddr by the size of the coremap (we're effectively 
	replicating stealmem here, but we're not grabbing a whole page) */
	core_map = (struct page*)PADDR_TO_KVADDR(firstaddr);
	paddr_t freeaddr = firstaddr + page_count * sizeof(struct page);
	// kprintf("FirstAddr: %p\n", (void*) firstaddr);
	// kprintf("Freeaddr: %p\n", (void*) freeaddr);
	// kprintf("Size of Core Map: %d\n", page_count * sizeof(struct page));
	// kprintf("Base Addr of core_map: %p\n", &core_map);
	// kprintf("Base Addr of core_map[0]: %p\n", &core_map[0]);
	// kprintf("Base Addr of core_map[127]: %p\n", &core_map[127]);
	
	/* Calculate the number of fixed pages. The number of fixed pages
	will be everything from 0x0 to freeaddr (essentially all the stolen
	memory will be FIXED). This might be a signficant amount; up until VM bootstrapping
	kmalloc will steal memory. This is the only way to solve the chicken-and-egg problem
	of the VM needing kmalloc and kmalloc needing VM.*/
	size_t num_of_fixed_pages = (freeaddr / PAGE_SIZE);
	if(freeaddr % PAGE_SIZE != 0)
	{
		/*If the stolen memory crosses a page boundry (probabably almost always will)
		mark that partially stolen page as FIXED*/
		num_of_fixed_pages++;
	}
	//Now, mark every (stolen) page from 0 to freeaddr as FIXED...
	for(size_t i = 0; i<num_of_fixed_pages; i++)
	{
		// kprintf("Address of Core Map %d: %p ",i,&core_map[i]);
		// kprintf("PA of Core Map %d:%p\n", i, (void*) (PAGE_SIZE * i));
		core_map[i].pa = i * PAGE_SIZE;
		// core_map[i].va = PADDR_TO_KVADDR(i * PAGE_SIZE);
		core_map[i].state = FIXED;
		core_map[i].as = 0x0;
	}
	/* Mark every available page (from freeaddr + offset into next page,
	if applicable) to lastaddr as FREE*/
	for(size_t i = num_of_fixed_pages; i<page_count; i++)
	{
		// kprintf("Address of Core Map %d: %p\n",i,&core_map[i]);
		// kprintf("PA of Core Map %d:%p\n", i, (void*) (PAGE_SIZE * i));
		// kprintf("KVA of Core Map %d:%p\n",i, (void*) PADDR_TO_KVADDR(PAGE_SIZE*i));
		core_map[i].state = FREE;
		core_map[i].as = 0x0;
		core_map[i].va = 0x0;
		free_pages++;
	}
	/* Set VM initialization flag. alloc_kpages and free_kpages
	should behave accordingly now*/
	vm_initialized = true;
	/* Now that the VM is initialized, create a lock */
	spinlock_cleanup(&stealmem_lock);
	spinlock_init(&stealmem_lock);
	core_map_lock = lock_create("coremap_lock");
}