void
as_activate(void)
{
    struct addrspace *as;

    as = proc_getas();
    if (as == NULL) {
        /*
         * Kernel thread without an address space; leave the
         * prior address space in place.
         */
        return;
    }

    /*
     * Write this.
     */

    int i, spl;
    spl = splhigh();

    for (i=0; i<NUM_TLB; i++) {
        tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
    }

    splx(spl);
}
Exemple #2
0
/*
 * tlb_invalidate: marks a given tlb entry as invalid.
 *
 * Synchronization: assumes we hold coremap_spinlock. Does not block.
 */
static
void
tlb_invalidate(int tlbix)
{
	uint32_t elo, ehi;
	paddr_t pa;
	unsigned cmix;

	KASSERT(spinlock_do_i_hold(&coremap_spinlock));

	tlb_read(&ehi, &elo, tlbix);
	if (elo & TLBLO_VALID) {
		pa = elo & TLBLO_PPAGE;
		cmix = PADDR_TO_COREMAP(pa);
		KASSERT(cmix < num_coremap_entries);
		KASSERT(coremap[cmix].cm_tlbix == tlbix);
		KASSERT(coremap[cmix].cm_cpunum == curcpu->c_number);
		coremap[cmix].cm_tlbix = -1;
		coremap[cmix].cm_cpunum = 0;
		DEBUG(DB_TLB, "... pa 0x%05lx --> tlb --\n", 
			(unsigned long) COREMAP_TO_PADDR(cmix));
	}

	tlb_write(TLBHI_INVALID(tlbix), TLBLO_INVALID(), tlbix);
	DEBUG(DB_TLB, "... pa ------- <-- tlb %d\n", tlbix);
}
Exemple #3
0
void
as_activate(struct addrspace *as)
{
	/*
	 * Write this.
	 */
	//dumbvm implementation to get kernel compiling initially:
	int i, spl;
	
	//to check if address space is different from old one
	static struct addrspace *old_as = NULL;

	
	(void)as;

	spl = splhigh();
#if OPT_A3
	if (old_as != as) {

	for (i=0; i<NUM_TLB; i++) {
		TLB_Write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}
	vmstats_inc(3);
	
	}
#else
	

	for (i=0; i<NUM_TLB; i++) {
		TLB_Write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}
	

#endif

	
	//make the one passed in as old
	old_as = as;

	splx(spl);
	
	//end dumbvm implementation. add/fix later
	
	(void)as;  // suppress warning until code gets written
}
Exemple #4
0
void
tlb_invalidate(int slot)
{
    int spl = splhigh();
    u_int32_t elo,ehi;
    TLB_Read(&ehi,&elo,slot);
    if (elo & TLBLO_VALID) TLB_Write(TLBHI_INVALID(slot),TLBLO_INVALID(),slot);
    splx(spl);
}
Exemple #5
0
void
vm_tlbshootdown_all(void)
{
	//panic("dumbvm tried to do tlb shootdown?!\n");
  int i;
    for (i=0; i<NUM_TLB; i++) {
  		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  }
}
Exemple #6
0
void vm_flushtlb(void) {
	int i;
	
	//int spl = splhigh();	// lock TLB while we mess with it
	spinlock_acquire(&tlb_lock);
	for (i=0; i<NUM_TLB;i++) {
		tlb_write(TLBHI_INVALID(i),TLBLO_INVALID(),i);	// invalidate all TLB entries
	}
	spinlock_release(&tlb_lock);
	//splx(spl);
}
Exemple #7
0
void 
vm_tlbflush(vaddr_t target) 
{
    int spl;
    int index;

    spl = splhigh();
    index = tlb_probe(target & PAGE_FRAME, 0);
    if (index >=0) 
        tlb_write(TLBHI_INVALID(index), TLBLO_INVALID(), index);
    splx(spl);
}
Exemple #8
0
void
vm_tlbflush_all(void)
{
	int i, spl;

	spl = splhigh();

	for (i=0; i<NUM_TLB; i++) {
		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}
	splx(spl);
}
void freePage(struct page* page) {
	if(page->pt_state == PT_STATE_MAPPED) {
		coremap_freeuserpages(page->pt_pagebase * PAGE_SIZE);
		int spl = splhigh();
		int tlbpos = tlb_probe(page->pt_pagebase * PAGE_SIZE, 0);
		if (tlbpos >= 0) {
			tlb_write(TLBHI_INVALID(tlbpos), TLBLO_INVALID(), tlbpos);
		}
		splx(spl);
	} else if(page->pt_state == PT_STATE_MAPPED) {
		swapfree(page);
	}
}
Exemple #10
0
void vm_tlbshootdown_all(void)
{
	int i, spl;

	/* Disable interrupts on this CPU while frobbing the TLB. */
	spl = splhigh();

	for (i=0; i<NUM_TLB; i++) {
		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}

	splx(spl);
}
Exemple #11
0
/*Shoot down a TLB entry based on given virtual address*/
void
tlb_shootdown_page_table_entry(vaddr_t va) {
  int i;
  uint32_t ehi, elo;
  KASSERT((va & PAGE_FRAME ) == va); //assert that va is a valid virtual address
  spinlock_acquire(&stealmem_lock);
  for(i=0; i < NUM_TLB; i++) {
    tlb_read(&ehi, &elo, i);
    if (ehi  == va) {
      tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
      break;
    }

  }
  spinlock_release(&stealmem_lock);
}
Exemple #12
0
/* TLB shootdown handling called from interprocessor_interrupt */
void vm_tlbshootdown_all(void)
{
	int spl;
	/* Disable interrupts on this CPU while frobbing the TLB. */
	bool lock = get_coremap_spinlock();
	spl = splhigh();

	/* Shoot down all the TLB entries. */
	for (int i = 0; i < NUM_TLB; i++) {
		tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
	}

	splx(spl);
	release_coremap_spinlock(lock);

	return;
}
Exemple #13
0
void vm_tlbshootdown(const struct tlbshootdown *ts)
{
    	int i, spl;

    /* Disable interrupts on this CPU while frobbing the TLB. */
	spl = splhigh();

	uint32_t ehi, elo;
	for (i=0; i<NUM_TLB; i++) {
		tlb_read(&ehi, &elo, i);
		if (ehi == ts->ts_vaddr)
		{
			tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
			break ;
		}
	}
	splx(spl);
}
static void swaponepagein(int idx, struct addrspace* as) {
	(void) as;
	// 3. clear their tlb entries if present TODO
	cm_setEntryDirtyState(COREMAP(idx),true);
	struct page* pg = findPageFromCoreMap(COREMAP(idx), idx);
	int spl = splhigh();
	int tlbpos = tlb_probe(pg->pt_pagebase * PAGE_SIZE, 0);
	if (tlbpos >= 0) {
		tlb_write(TLBHI_INVALID(tlbpos), TLBLO_INVALID(), tlbpos);
	} else {
		//kprintf("was not on tlb\n");
	}
	splx(spl);

	int swapPageindex = getOneSwapPage();
	kprintf("Swap in :\tswap= %x,\tpage=%x \n",swapPageindex,pg->pt_virtbase);
	//kprintf("Swap in page Vaddr = %x\n", pg->pt_virtbase);
	struct iovec iov;
	struct uio kuio;
	iov.iov_kbase = (void*) PADDR_TO_KVADDR(cm_getEntryPaddr(idx));
	iov.iov_len = PAGE_SIZE; // length of the memory space
	kuio.uio_iov = &iov;
	kuio.uio_iovcnt = 1;
	kuio.uio_resid = PAGE_SIZE; // amount to write to the file
	kuio.uio_space = NULL;
	kuio.uio_offset = swap_map[swapPageindex].se_paddr * PAGE_SIZE;
	kuio.uio_segflg = UIO_SYSSPACE;
	kuio.uio_rw = UIO_WRITE;
	//kprintf("before write \n");
	// 4. write them to disk
	spinlock_release(&coremap_lock);
	int result = VOP_WRITE(swap_vnode, &kuio);
	spinlock_acquire(&coremap_lock);
	if (result) {
		// release lock on the vnode
		panic("WRITE FAILED!\n");
		return;
	}
	cm_setEntryDirtyState(COREMAP(idx),false);
	//kprintf("write complete\n");

	pg->pt_state = PT_STATE_SWAPPED;
	pg->pt_pagebase = swap_map[swapPageindex].se_paddr;
}
Exemple #15
0
void vm_tlbshootdown(const struct tlbshootdown *ts)
{
	//(void)ts;

	int tlb_entry, spl;

	/* Disable interrupts on this CPU while frobbing the TLB. */
	bool lock = get_coremap_spinlock();
	spl = splhigh();
	// Probe TLB so see if particular VA is present.
	tlb_entry = tlb_probe(VA_TO_VPF(ts->ts_vaddr), 0);
	if(tlb_entry < 0) {
		// No entry found, so shoot down succeeded
		splx(spl);
		return;
	}

	// Invalidate the particular TLB entry
	tlb_write(TLBHI_INVALID(tlb_entry), TLBLO_INVALID(), tlb_entry);

	splx(spl);
	release_coremap_spinlock(lock);

}
Exemple #16
0
void *
sys_sbrk(intptr_t amount, int *err){

    vaddr_t retval = curproc->p_addrspace->heap_end;

    if (amount < 0 && amount <= -4096*1024*256) {
        *err = EINVAL;
        return (void *)-1;
    }

    if (curproc->p_addrspace->heap_end + amount < curproc->p_addrspace->heap_start)  {
        *err = EINVAL;
        return (void *)-1;
    }

    if (curproc->p_addrspace->heap_start + amount >= USERSTACK - 1024 * PAGE_SIZE)  {
        *err = ENOMEM;
        return (void *)-1;
    }

    /* Align in multiples of 4 */
    if (amount % 4 != 0) {
        *err = EINVAL;
        return (void *)-1;
    }

    int num_pages = (amount < 0) ? (amount * -1)/PAGE_SIZE : amount/PAGE_SIZE;

    if (amount % PAGE_SIZE != 0)
        num_pages++;

    struct page_table *proc_pg_table = curproc->p_addrspace->page_table_entry;
    struct page_table *prev_proc_pg_table;
    struct page_table *free_proc_pg_table;

    if(amount < 0) {
        for (int i = 1; i <= num_pages; i++) {

            // Page Table removal loop through the page table and remove entries correspondingly.

            proc_pg_table = curproc->p_addrspace->page_table_entry;
            prev_proc_pg_table = proc_pg_table;

            while(proc_pg_table != NULL) {
                if (proc_pg_table->vpn < USERSTACK - 1024 * PAGE_SIZE &&
                    proc_pg_table->vpn >= (curproc->p_addrspace->heap_end + amount)) {

                    prev_proc_pg_table->next = proc_pg_table->next;

                    free_proc_pg_table = proc_pg_table;
                    kfree((void *)PADDR_TO_KVADDR(free_proc_pg_table->ppn));
                    kfree(free_proc_pg_table);

                    break;
                }

                prev_proc_pg_table = proc_pg_table;
                proc_pg_table = proc_pg_table->next;
            }

        }

        // TLB cleanup code
        int spl;

        spl = splhigh();

        for (int i=0; i < NUM_TLB; i++) {
            tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
        }

        splx(spl);
    }

    curproc->p_addrspace->heap_end += amount;

    if (amount < 0)
        as_activate();

    return (void *)retval;
}