Exemple #1
0
void
free_kpages(vaddr_t addr)
{
    int i,index;
    int number_of_pages_deallocated=0;
    paddr_t pa= KVADDR_TO_PADDR(addr);
    index=PADDR_TO_CM(pa);

    spinlock_acquire(&coremap_lock);

    for(i=index;i<no_of_coremap_entries;i++) {
    	//bzero((void *)PADDR_TO_KVADDR(CM_TO_PADDR(i)), PAGE_SIZE);
    	int backup_last_page = coremap[i].last_page;
		coremap[i].state = FREE;
		coremap[i].last_page = -1;
		coremap[i].as = NULL;
		coremap[i].vaddr = 0;
		coremap[i].cpu = -1;
		coremap[i].pinned = 0;
		coremap[i].page = NULL;
		coremap[i].accessed = 0;

		
		number_of_pages_deallocated++;

		if(backup_last_page)
			break;

    }

    coremap_used -= (PAGE_SIZE * number_of_pages_deallocated);

    spinlock_release(&coremap_lock);
}
Exemple #2
0
void
free_kpages(vaddr_t addr)
{
     paddr_t pa = KVADDR_TO_PADDR(addr);

     if (pa < start_addr) (void)addr; // dont do anything
                                      // we have no access to these memory
     else coremap_free(pa);
}
Exemple #3
0
static
void
coremapthread(void *sm, unsigned long num)
{
	struct semaphore *sem = sm;
	uint32_t page;
	uint32_t oldpage = 0;
	uint32_t oldpage2 = 0;
	int i;

	for (i=0; i<NTRIES; i++) {
		page = alloc_kpages(NPAGES);
		if (page==0) {
			if (sem) {
				kprintf("thread %lu: alloc_kpages failed\n",
					num);
				V(sem);
				return;
			}
			kprintf("alloc_kpages failed; test failed.\n");
			return;
		}
		if (oldpage2) {
			coremap_free(KVADDR_TO_PADDR(oldpage2), true /* iskern */);
		}
		oldpage2 = oldpage;
		oldpage = page;
	}
	if (oldpage2) {
		coremap_free(KVADDR_TO_PADDR(oldpage2), true /* iskern */);
	}
	if (oldpage) {
		coremap_free(KVADDR_TO_PADDR(oldpage), true /* iskern */);
	}
	if (sem) {
		V(sem);
	}
}
Exemple #4
0
void coremap_kfree(vaddr_t addr) {
	paddr_t a = KVADDR_TO_PADDR(addr);

	if (a<coremap) {
		return;	// this memory was allocated before the VM bootstrapped, so just leak it
	} else if (a>=coremap && a<ram_start) {
		panic("coremap_kfree tried to deallocate the coremap!!");
	}

	KASSERT((a&PAGE_FRAME)==a);

	int entry = (a-ram_start)/PAGE_SIZE;

	spinlock_acquire(&cm_lock);

	struct cm_entry* cm_begin = (struct cm_entry*)PADDR_TO_KVADDR(coremap);

	int pgs = cm_begin[entry].contig_pages;

	//int before = coremap_kcount();
	
	//kprintf("********************\nFreeing %x kpage(s)...\n",pgs);
	//coremap_dump();
	
	KASSERT(pgs>0);
	
	free_mem_pages += pgs;

	int i;
	for (i=0; i<pgs; i++) {	// mark all pages as not in use
		cm_begin[i+entry].in_use = 0;
		cm_begin[i+entry].is_kern = 0;
		cm_begin[i+entry].contig_pages = 0;
		coremap_clean_page(addr+(i*PAGE_SIZE));
	}

	//coremap_dump();

	//int after = coremap_kcount();

	//KASSERT(before-pgs == after);

	spinlock_release(&cm_lock);
}
Exemple #5
0
/*
 * free_kpages
 *
 * Free pages allocated with alloc_kpages.
 * Synchronization: takes coremap_spinlock. Does not block.
 */
void 
free_kpages(vaddr_t addr)
{
	coremap_free(KVADDR_TO_PADDR(addr), true /* iskern */);
}
int
as_copy(struct addrspace *old, struct addrspace **ret)
{
    struct addrspace *newas;

    newas = as_create();
    if (newas==NULL) {
        return ENOMEM;
    }

    //	kprintf(" **** inside as copy ****  \n");
    //	spinlock_acquire(newas->as_splock);
    //	spinlock_acquire(old->as_splock);

    if(use_small_lock == true && swapping_started == true)
    {
        lock_acquire(newas->as_lock);
        lock_acquire(old->as_lock);
    }
    else if(use_big_lock == true && swapping_started == true)
        lock_acquire(as_lock);
    struct as_region* r_old = old->as_region_list;
    while(r_old != NULL)
    {
        struct as_region* r_new = (struct as_region*)kmalloc(sizeof(struct as_region));
        if(r_new == NULL)
        {
            if(use_big_lock == true && swapping_started == true)
                lock_release(as_lock);
            else if(use_small_lock == true && swapping_started == true)
            {
                lock_release(old->as_lock);
                lock_release(newas->as_lock);
            }
            //spinlock_release(old->as_splock);
            //spinlock_release(newas->as_splock);
            as_destroy(newas);
            return ENOMEM;
        }

        r_new->region_base = r_old->region_base;
        r_new->region_npages = r_old->region_npages;
        r_new->can_read = r_old->can_read;
        r_new->can_write = r_old->can_write;
        r_new->can_execute = r_old->can_execute;

        int ret = region_list_add_node(&newas->as_region_list,r_new);
        if(ret == -1)
        {
            if(use_big_lock == true && swapping_started == true)
                lock_release(as_lock);
            else if(use_small_lock == true  && swapping_started == true)
            {
                lock_release(old->as_lock);
                lock_release(newas->as_lock);
            }
            //	spinlock_release(old->as_splock);
            //	spinlock_release(newas->as_splock);
            as_destroy(newas);
            return ENOMEM;
        }
        r_old = r_old->next;
    }

    struct page_table_entry* p_old = old->as_page_list;
    while(p_old != NULL)
    {
        struct page_table_entry* p_new = (struct page_table_entry*)kmalloc(sizeof(struct page_table_entry));
        if(p_new == NULL)
        {
            if(use_big_lock == true && swapping_started == true)
                lock_release(as_lock);
            else if(use_small_lock == true && swapping_started == true)
            {
                lock_release(old->as_lock);
                lock_release(newas->as_lock);
            }
            //		spinlock_release(old->as_splock);
            //		spinlock_release(newas->as_splock);
            as_destroy(newas);

            return ENOMEM;
        }
        p_new->vaddr = p_old->vaddr;
        p_new->swap_pos = -1;

        KASSERT(p_old->page_state != SWAPPING);
        while(p_old->page_state == SWAPPING)
        {

            thread_yield();

        }

        //	if(!spinlock_do_i_hold)
        //	KASSERT(p_old->page_state != SWAPPING);

        if(p_old->page_state == MAPPED)
        {
            if(use_page_lock == true && swapping_started == true)
                lock_acquire(coremap[(p_old->paddr)/PAGE_SIZE].page_lock);

            if(p_old->page_state == MAPPED)
            {
                paddr_t paddr = get_user_page(p_old->vaddr, false, newas);
                KASSERT(p_old->page_state == MAPPED);
                //	int spl = splhigh();
                if(use_small_lock == true && swapping_started == true)
                {
                    if(lock_do_i_hold(newas->as_lock) == false)
                        lock_acquire(newas->as_lock);
                    if(lock_do_i_hold(old->as_lock) == false)
                        lock_acquire(newas->as_lock);
                }
                else if(use_big_lock == true && swapping_started == true)
                {
                    if(lock_do_i_hold(as_lock) == false)
                        lock_acquire(as_lock);
                }
                if(paddr == 0)
                {
                    if(use_big_lock == true && swapping_started == true)
                        lock_release(as_lock);
                    else if(use_small_lock == true && swapping_started == true)
                    {
                        lock_release(old->as_lock);
                        lock_release(newas->as_lock);
                    }
                    //				spinlock_release(old->as_splock);
                    //				spinlock_release(newas->as_splock);
                    as_destroy(newas);
                    return ENOMEM;
                }
                uint32_t old_index = p_old->paddr/PAGE_SIZE;
                KASSERT(coremap[old_index].is_victim == false);
                KASSERT(coremap[paddr/PAGE_SIZE].is_victim == false);
                memmove((void*)PADDR_TO_KVADDR(paddr),
                        (const void *)PADDR_TO_KVADDR(p_old->paddr), //use this? or PADDR_TO_KVADDR like dumbvm does?. But why does dumbvm do that in the first place.
                        PAGE_SIZE);									// i know why, cannot call functions on user memory addresses. So convert it into a kv address.
                // the function will translate it into a physical address again and free it. ugly Hack. but no other way.

                p_new->paddr = paddr;
                p_new->page_state = MAPPED;

                //	splx(spl);

                int ret = page_list_add_node(&newas->as_page_list,p_new);
                if(ret == -1)
                {
                    if(use_big_lock == true && swapping_started == true)
                        lock_release(as_lock);
                    else if(use_small_lock == true && swapping_started == true)
                    {
                        lock_release(old->as_lock);
                        lock_release(newas->as_lock);
                    }
                    //			spinlock_release(old->as_splock);
                    //			spinlock_release(newas->as_splock);
                    as_destroy(newas);
                    return ENOMEM;
                }

                if(use_page_lock == true && swapping_started == true)
                {

                    if(lock_do_i_hold(coremap[paddr/PAGE_SIZE].page_lock) == true)
                        lock_release(coremap[paddr/PAGE_SIZE].page_lock);


                    if(lock_do_i_hold(coremap[(p_old->paddr/PAGE_SIZE)].page_lock) == true)
                        lock_release(coremap[(p_old->paddr/PAGE_SIZE)].page_lock);
                }

            }
        }

        if(p_old->page_state == SWAPPED)
        {
            // this page is in disk, so we need to create a copy of that page somewhere in disk and then update the page table entry of the new process.
            // going with the disk->memory->disk approach suggested in a recitation video by jinghao shi.
            // Allocate a buffer at vm_bootstrap of size 4k (1 page). Use this buffer to temporarily copy data from disk to here and then to disk again
            // then clear the buffer. This buffer is a shared resource, so we need a lock around it.

            //	kprintf("in as_copy swap code \n");
            //	spinlock_release(old->as_splock);
            //	spinlock_release(newas->as_splock);
            swap_in(p_old->vaddr,old,copy_buffer_vaddr, p_old->swap_pos);
            //	kprintf("completed swap in \n");
            int pos = mark_swap_pos(p_new->vaddr, newas);
            KASSERT(pos != -1);
            int err = write_to_disk(KVADDR_TO_PADDR(copy_buffer_vaddr)/PAGE_SIZE, pos);
            //	kprintf("completed writing to disk \n");
            KASSERT(err == 0);
            //		spinlock_acquire(newas->as_splock);
            //		spinlock_acquire(old->as_splock);
            //	as_zero_region(KVADDR_TO_PADDR(copy_buffer_vaddr),1);
            p_new->page_state = SWAPPED;
            p_new->swap_pos = pos;
            p_new->paddr = 0;


            if(use_page_lock == true && swapping_started == true)
            {

                if(lock_do_i_hold(coremap[(p_old->paddr/PAGE_SIZE)].page_lock) == true)
                    lock_release(coremap[(p_old->paddr/PAGE_SIZE)].page_lock);
            }
        }
        p_old = p_old->next;

    }

    newas->as_heap_start = old->as_heap_start;
    newas->as_heap_end = old->as_heap_end;
    *ret = newas;


    if(use_big_lock == true && swapping_started == true)
        lock_release(as_lock);
    else if(use_small_lock == true && swapping_started == true)
    {
        lock_release(old->as_lock);
        lock_release(newas->as_lock);
    }

//	kprintf("exiting as copy \n");
    //	spinlock_release(old->as_splock);
    //	spinlock_release(newas->as_splock);
    return 0;
}