/* * Initialize the coremap and bitmap to describe the page table. We steal memory * from RAM to store these structures. Initialize each of the entry of the page * table with the page address with respect to the base address of the coremap * stored in RAM. */ void init_coremap() { int i; u_int32_t ram_max = mips_ramsize(); u_int32_t ram_user_base = ram_stealmem(0); //size of the coremap in PAGE_SIZE unit coremap_size = (ram_max-ram_user_base-PAGE_SIZE)/PAGE_SIZE; //bitmap to keep track of the availability state of the coremap memory core_memmap = bitmap_create(coremap_size); //allocate the coremap into memory by stealing some memory from RAM coremap = (struct _PTE*)kmalloc(coremap_size * sizeof(struct _PTE)); //base address for the coremap in the ram, it starts from where the swaparea //ended coremap_base = ram_stealmem(0); //set each of the page address for(i = 0; i < coremap_size; i++) { coremap[i].paddr = (coremap_base + (i * PAGE_SIZE)); coremap[i].status = PAGE_FREE; coremap[i].pid = 0; } }
void coremap_init() { int v_size = ( mainbus_ramsize()-(ram_stealmem(0) + PAGE_SIZE) )/PAGE_SIZE; mem_map = bitmap_create(v_size); coremap = (struct page_table_entry*)kmalloc(v_size * sizeof(struct page_table_entry)); coremap_start = ram_stealmem(0); for(int i = 0; i < v_size; i++) { coremap[i].paddr = (coremap_start + (i * PAGE_SIZE)); } coremap_size = v_size; kprintf("COREMAP INIT: %d %d\n",coremap_start,coremap_size); }
void vm_bootstrap() { swap_initialized = 0; paddr_t ram_hi; paddr_t ram_low; ram_getsize(&ram_low, &ram_hi); // get the current size of ram int ram_pages = (ram_hi-ram_low)/PAGE_SIZE; // find out how many pages of ram we have int cm_bytes = ram_pages*sizeof(struct cm_entry); // how many bytes our coremap will need to be while (cm_bytes % PAGE_SIZE != 0) // page align the value cm_bytes++; int cm_pages = cm_bytes/PAGE_SIZE; // number of pages we need to steal for the coremap ram_stealmem(cm_pages); paddr_t ram_start = ram_low + (cm_pages*PAGE_SIZE); // ram will then start at address right after coremap ram_pages -= cm_pages; // don't want the coremap to map to itself coremap_init(ram_pages,ram_start,ram_low); // initialize the coremap vm_has_bootstrapped = 1; // vm has finished initializing }
paddr_t getppages(unsigned long npages) { int spl; paddr_t addr; spl = splhigh(); // coremap not setup if (!coremap_ready){ addr = ram_stealmem(npages); splx(spl); return addr; } // coremap setup already assert(coremap_ready == 1); if (npages == 1){ addr = coremap_alloc_one_page(NULL); splx(spl); return addr; } else { addr = coremap_alloc_multi_page(npages); splx(spl); return addr; } panic("getppages: unexpected return\n"); }
paddr_t getppages(unsigned long npages) { paddr_t addr; if(beforeVM){ spinlock_acquire(&stealmem_lock); addr = ram_stealmem(npages); }else{ spinlock_acquire(&stealmem_lock); unsigned long page_start = 0; unsigned long block_count = npages; unsigned long i; // finding first free n pages for (i=0;i<pages_in_coremap;i++){ if(coremap[i].state==0){ block_count--; if(block_count == 0){ break; } } else{ block_count=npages; page_start=i+1; } } if(i==pages_in_coremap){ spinlock_release(&stealmem_lock); return 0; } else{ for(i=0;i<npages;i++){ coremap[i+page_start].state=1; coremap[i+page_start].contained=true; coremap[i+page_start].partofpage=page_start; } addr = coremap[page_start].pa; } } spinlock_release(&stealmem_lock); return addr; }
static paddr_t getppages(unsigned long npages) { paddr_t addr; //lock_acquire(coremap_lock); addr = ram_stealmem(npages); //lock_release(coremap_lock); return addr; }
// get kernel pages before VM bootstrapps paddr_t getppages(unsigned long npages) { paddr_t addr; spinlock_acquire(&stealmem_lock); addr = ram_stealmem(npages); spinlock_release(&stealmem_lock); return addr; }
static paddr_t getppages(unsigned long npages) { int spl; paddr_t addr; spl = splhigh(); addr = ram_stealmem(npages); splx(spl); return addr; }
void coremap_init() { paddr_t lo,hi; ram_getsize(&lo, &hi); num_frames = (hi-lo) / PAGE_SIZE; /* * find out the block of memory required for the coremap * rounded to the next page size */ int coremap_size = num_frames * sizeof(struct coremap_entry); int coremap_end_size = coremap_size / PAGE_SIZE + 1; coremap_ptr = (struct coremap_entry*) PADDR_TO_KVADDR(ram_stealmem(coremap_end_size)); // finalize the memory pool ram_getsize(&lo, &hi); base = lo; coremap_size = num_frames * sizeof(struct coremap_entry); num_frames = (hi - lo) / PAGE_SIZE; bzero(coremap_ptr, coremap_size); }
/* Does most of the work for alloc */ static paddr_t getppages(unsigned long npages) { #if OPT_A3 //lock_acquire(table_lock); //alloc_kpages can be called before vm_bootstrap so //we just stealmem if(pt_initialize != 1){ //kprintf("pt unintilize\n"); int spl; paddr_t addr; spl = splhigh(); addr = ram_stealmem(npages); splx(spl); return addr; } lock_acquire(core_lock); int i,j; time_t secs; u_int32_t nano; unsigned long count_pages; for(i = 0; i< coremap_size; i++){ j = i - npages + 1; if(!(coremap[i].used)){ count_pages++; if(count_pages == npages){ gettime(&secs,&nano); coremap[j].len = npages; coremap[j].secs = secs; coremap[j].nano = nano; //assert(curthread != NULL); //assert(curthread->t_process != NULL); if(curthread != NULL && curthread->t_process !=NULL){ coremap[j].pid = curthread->t_process->PID; } break; } } else{ count_pages = 0; } } if(count_pages == npages){ // int j; // kprintf("coremap: j: %d secs: %ld nano: %ld\n",j,(long)coremap[j].secs,(long )coremap[j].nano); for(j =i - npages +1;j<(i+1);j++){ //coremap[j].pid = curthread->t_process->PID; coremap[j].used= 1; } // kprintf("paddr& %p, paddr %p\n",coremap[i-npages+1].paddr & PAGE_FRAME,coremap[i-npages+1].paddr); assert((coremap[i-npages+1].paddr & PAGE_FRAME) == coremap[i-npages+1].paddr);//make sure the address is in the frame lock_release(core_lock); return coremap[i-npages+1].paddr; } /* If we reached here, then it means we cannot find contigous block, i.e we need to swap stuff */ /* paddr_t pa; pa = page_algorithmn(); return pa; */ lock_release(core_lock); return 0; //if not successful #else int spl; paddr_t addr; spl = splhigh(); addr = ram_stealmem(npages); splx(spl); return addr; #endif }
paddr_t getppages(unsigned long npages) //called from alloc_kpages { /* OLD CODE - this works int spl; paddr_t addr; spl = splhigh(); addr = ram_stealmem(npages); //in ram.c - just moves firstpaddr by that many pages splx(spl); return addr; */ //NEW CODE ///* int spl; spl = splhigh(); paddr_t addr; if(done_vm_bootstrap == 0) { //Cant print anytihg here, cause everything else not set up when enters here addr = ram_stealmem(npages); //in ram.c - just moves firstpaddr by that many pages splx(spl); return addr; } else //(done_vm_bootstrap ==1) { //kprintf("in getppages where done_vm_bootstrap = %d\n",done_vm_bootstrap); //kprintf("npages in getppages= %d\n",npages); int consectuve_pages_found = 0; int i; int starting_page = 0; //looping through all pages to find npages free consecutive pages for (i = 0; i < total_pages; i++) //was will (lastpaddr / PAGE_SIZE) { { if (my_coremap[i].state == FREE)//Found a free page { consectuve_pages_found++; //kprintf("FOUND A FREE PAGE, its page no. is : %d\n and consecutive pages found right now is %d\n",i,consectuve_pages_found); } else //not a free page { consectuve_pages_found = 0; //kprintf("page %d has state %d so cant use\n",i,element_access_ptr->state); } } if(consectuve_pages_found == npages) //found npages consecutive pages { starting_page = i- npages + 1; //kprintf("FOUND %d consecutive pages and starting page is %d so BREAK\n",consectuve_pages_found,starting_page); break; } } //change this later - now doing tricks if(consectuve_pages_found != npages)//did not find npages consecutive pages { //kprintf("only found %d consecutive pages so return back with 0\n",consectuve_pages_found); /* for (i = 0; i <npages; i++) { my_coremap[i].state = FREE; my_coremap[i].num_continuous_pages = 0; } for(i=0;i<npages;i++) { my_coremap[i].state = FIXED; } my_coremap[0].num_continuous_pages = 0; addr = my_coremap[0].physical_address; */ splx(spl); //return 0; return 0; } //found enough consecutive pages for(i=0;i<npages;i++) { //no vitrual right now, MAY NEED LATER my_coremap[starting_page + i].state = FIXED; //kprintf("physical address for %dth page being assigned is %u and hex representation is %x\n",i,element_access_ptr->physical_address,element_access_ptr->physical_address); } //finding physical address of first page of the ones chosen addr = my_coremap[starting_page].physical_address; my_coremap[starting_page].num_continuous_pages = npages; //kprintf("Physical address being returned: %u and hex representation is %x\n",addr,addr); //Printing out coremap info for (i = 0; i < total_pages; i++) //set to 51 for now as too many entries if RAM big { //kprintf("page entry: %d physical adress: %u virtual address: %u state: %d\n", i, my_coremap[i].physical_address,my_coremap[i].virtual_address,my_coremap[i].state); } splx(spl); return addr; } //*/ }