Beispiel #1
0
/* Allocate a page in a user address space */
static
void
allocate_nonfixed_page(size_t page_num, struct addrspace *as, vaddr_t va, int permissions)
{
	// KASSERT(spinlock_do_i_hold(&stealmem_lock));
	KASSERT(core_map[page_num].state == FREE);
	//Allocate a page
	core_map[page_num].state = LOCKED;
	paddr_t pa = page_num * PAGE_SIZE;
	core_map[page_num].pa = pa;
	core_map[page_num].va = va;
	core_map[page_num].as = as;

	//Get the page table for the virtual address.
	struct page_table *pt = pgdir_walk(as,va,true);

	KASSERT(pt != NULL);
	KASSERT(pt != 0x0);

	//Update the page table entry to point to the page we made.
	size_t pt_index = VA_TO_PT_INDEX(va);
	vaddr_t page_location = PADDR_TO_KVADDR(core_map[page_num].pa);
	pt->table[pt_index] = PAGEVA_TO_PTE(page_location);
	// DEBUG(DB_VM, "VA:%p\n", (void*) va);
	// DEBUG(DB_VM, "PTE:%p\n", (void*) pt->table[pt_index]);
	// DEBUG(DB_VM, "PFN:%p\n", (void*) PTE_TO_PFN(pt->table[pt_index]));
	//Add in permissions
	pt->table[pt_index] |= permissions;

	zero_page(page_num);
	free_pages--;
	// DEBUG(DB_VM, "A:%d\n",free_pages);
}
Beispiel #2
0
/*
 * Allocates a page off one of our lists.
 *
 * Parameters:
 * - uiFlags = Flags for the page allocation.
 *
 * Returns:
 * INVALID_PAGE if the page could not be allocated, otherwise the index of the allocated page.
 */
static UINT32 allocate_page(UINT32 uiFlags)
{
  UINT32 rc;
  PPAGELIST ppgl = NULL;
  BOOL bZero = FALSE;

  if (uiFlags & PGALLOC_ZERO)
  { /* try zeroed list first, then free (but need to zero afterwards) */
    if (g_pglZeroed.cpg > 0)
      ppgl = &g_pglZeroed;
    else if (g_pglFree.cpg > 0)
    {
      ppgl = &g_pglFree;
      bZero = TRUE;
    }
  }
  else
  { /* try free list first, then zeroed */
    if (g_pglFree.cpg > 0)
      ppgl = &g_pglFree;
    else if (g_pglZeroed.cpg > 0)
      ppgl = &g_pglZeroed;
  }
  /* TODO: apply additional strategy if we don't yet have a page list */

  if (!ppgl)
    return INVALID_PAGE;
  rc = g_pMasterPageDB[ppgl->ndxLast].d.next;  /* take first page on list */
  remove_from_list(ppgl, rc);
  if (bZero)
    zero_page(rc);
  return rc;
}
Beispiel #3
0
/* Allocate a page for use by the kernel */
static
void 
allocate_fixed_page(size_t page_num)
{
	// KASSERT(spinlock_do_i_hold(&stealmem_lock));
	KASSERT(core_map[page_num].state == FREE);
	core_map[page_num].state = FIXED;
	paddr_t pa = page_num * PAGE_SIZE;
	core_map[page_num].pa = pa;
	core_map[page_num].va = 0x0;
	core_map[page_num].as = NULL;
	zero_page(page_num);
	free_pages--;
	// DEBUG(DB_VM, "AF:%d\n",free_pages);
}
Beispiel #4
0
int
cpu_pte_manipulate(struct mm_pte_manipulate *data) {
	pte_t					**pdep;
	pte_t					*pdep_l2base;
	pte_t					*ptep;
	pte_t					pte;
	pte_t					orig_pte;
	uintptr_t				l2_vaddr;
	unsigned				bits;
	unsigned				l1bits;
	pte_t					**l1pagetable;
	struct pa_quantum		*pq;
	unsigned				pa_status;
	int						flushed;
	ADDRESS					*adp;
	int						r;
	part_id_t			mpid;
	PROCESS *				prp;

	// Assume alignment has been checked by the caller

	adp = data->adp;
	if(adp == NULL) crash();
	l1pagetable = adp->cpu.pgdir;

	l1bits = 0x1;  // validity bit
	if(data->op & PTE_OP_BAD) {
		bits = PPC800_RPN_CI;
	} else if(data->prot & (PROT_READ|PROT_WRITE|PROT_EXEC)) {
		bits = 0xf1;
		//RUSH3: if PTE_OP_TEMP, mark PTE as accessable from procnto only if possible
		if(data->prot & PROT_WRITE) {
			bits |= (0x2<<PPC800_RPN_PP1_SHIFT) | (0x1<<PPC800_RPN_PP2_SHIFT);
		} else if(data->prot & (PROT_READ|PROT_EXEC)) {
			bits |= 0x3<<PPC800_RPN_PP1_SHIFT;
		}
		if(data->shmem_flags & SHMCTL_HAS_SPECIAL) {
			if(data->special & ~PPC_SPECIAL_MASK) {
				return EINVAL;
			}
			//RUSH1: If PPC_SPECIAL_E/W/M/G is on, should I report an error?
			if((data->special & PPC_SPECIAL_I)) {
				bits |= PPC800_RPN_CI;
			}
		}
		if(data->prot & PROT_NOCACHE) {
			bits |= PPC800_RPN_CI;
			l1bits |= PPC800_TWC_G;
		}
	} else {
		bits = 0;
	}

	r = EOK;
	flushed = 0;
	prp = adp ? object_from_data(adp, address_cookie) : NULL;
	mpid = mempart_getid(prp, sys_memclass_id);
	for( ;; ) {
		if(data->start >= data->end) break;
		pdep = &l1pagetable[L1PAGEIDX(data->start)];
		l2_vaddr = (uintptr_t)*pdep;
		if(l2_vaddr == 0) {
			memsize_t  resv = 0;

			if(!(data->op & (PTE_OP_MAP|PTE_OP_PREALLOC|PTE_OP_BAD))) {
				//Move vaddr to next page directory
				data->start = (data->start + PDE_SIZE) & ~(PDE_SIZE - 1);
				if(data->start == 0) data->start = ~0;
				continue;
			}
			if (MEMPART_CHK_and_INCR(mpid, __PAGESIZE, &resv) != EOK) {
				return ENOMEM;
			}
			pq = pa_alloc(__PAGESIZE, __PAGESIZE, 0, 0, &pa_status, restrict_proc, resv);
			if(pq == NULL) {
				MEMPART_UNDO_INCR(mpid, __PAGESIZE, resv);
				return ENOMEM;
			}
			MEMCLASS_PID_USE(prp, mempart_get_classid(mpid), __PAGESIZE);
			pq->flags |= PAQ_FLAG_SYSTEM;
			pq->u.inuse.next = adp->cpu.l2_list;
			adp->cpu.l2_list = pq;
			l2_vaddr = pa_quantum_to_paddr(pq);
			if(pa_status & PAA_STATUS_NOT_ZEROED) {
				zero_page((uint32_t *)l2_vaddr, __PAGESIZE, NULL);
			}
		}
		*pdep = (pte_t *)(l2_vaddr | l1bits);
		if(data->op & PTE_OP_PREALLOC) {
			//Move vaddr to next page directory
			data->start = (data->start + PDE_SIZE) & ~(PDE_SIZE - 1);
			if(data->start == 0) data->start = ~0;
			continue;
		}
		pdep_l2base = PDE_ADDR(*pdep);
		ptep = &(pdep_l2base[L2PAGEIDX(data->start)]);
		orig_pte = *ptep;
		if(data->op & (PTE_OP_MAP|PTE_OP_BAD)) {
			pte = data->paddr | bits;
		} else if(data->op & PTE_OP_UNMAP) {
			pte = 0;
		} else if(orig_pte & (0xfff & ~PPC800_RPN_CI)) {
			// PTE_OP_PROT
			pte = (orig_pte & ~0xfff) | bits;
		}  else {
			// We don't change PTE permissions if we haven't mapped the
			// page yet...
			pte = orig_pte;
		}
		*ptep = pte;
		if((orig_pte != 0) && (pte != orig_pte)) {
			flushed = 1;
			ppc_tlbie(data->start);
		}

		data->start += __PAGESIZE;
		data->paddr += __PAGESIZE;
		if((data->op & PTE_OP_PREEMPT) && KerextNeedPreempt()) {
			r = EINTR;
			break;
		}
	}
	if(flushed) {
		ppc_isync();
	}
	return r;
}