Exemple #1
0
bool IPT_Replace(
        PID outProcessID,
        LPN outPageNumber,
        PID inProcessID,
        LPN inPageNumber,
        MMFI inFrame)
{
    ASSERT_PRINT("Entering:IPT_Replace()\n");
    int line = 0;
    if(!IPT_FindIPTLine(0,outProcessID,outPageNumber, &line))
        return FALSE;

/* YANIV.. why do you do this? i couldnt understand...
    IPT_t_p newIPTLine;
    if(IPT_CreateIPT_t_p(inProcessID,inPageNumber,inFrame,&newIPTLine))
    {
        ASSERT_PRINT("Exiting:IPT_Replace() - Cannot allocate memory for IPT line\n");
        return FALSE;
    }
*/
    IPT_t_p lineToDelete = IPT[line];
    IPT[line] = NULL;
    MemoryAddress_t mem;
    mem.pageNumber = inPageNumber;
    mem.processID = inProcessID;
    int HATStartIndex = HAT_PRIVATE_Hash(mem);
    IPT_Add(HATStartIndex,inProcessID,inPageNumber,inFrame);
    free(lineToDelete);
    ASSERT_PRINT("Exiting:IPT_Replace() with return value: TRUE\n");
    return TRUE;
}
Exemple #2
0
int IPT_FindEmptyFrame()
{
    ASSERT_PRINT("Entering:IPT_FindEmptyFrame()\n");
    int i=0;
    bool* frameArry = calloc(SIZE_OF_IPT, sizeof(bool));
    for(i;i<SIZE_OF_IPT; i++)
        frameArry[i] = FALSE;

    for (i=0;i<SIZE_OF_IPT; i++)
        if(IPT[i] != NULL && IPT[i]->frame!=-1)
            frameArry[IPT[i]->frame] = TRUE;

    i=0;
    while (frameArry[i] && i<SIZE_OF_IPT)
        i++;

    if (i>=SIZE_OF_IPT)
    {
        ASSERT_PRINT("Exiting:IPT_FindEmptyFrame() with return value: FALSE\n");
        i=-1;
    }
    free(frameArry);
    ASSERT_PRINT("Exiting:IPT_FindEmptyFrame() with return value: TRUE, frame = %d\n",i);
    return i;
}
Exemple #3
0
bool IPT_Remove(
        int HATPointedIndex,
        PID processID,
        LPN pageNumber)
{
    ASSERT_PRINT("Entering:IPT_Remove()\n");
    int line = -1;
    if (!IPT_FindIPTLine(HATPointedIndex, processID, pageNumber, &line))
    {
        // the entry is not in the IPT.
        return FALSE;
    }
    IPT_t_p toDelete = IPT[line];
    IPT_t_p father = IPT[line]->prev;
    IPT_t_p son = IPT[line]->next;
    if (!father)
        IPT[line] = son;
    else if(!son)
        father->next = NULL;
    else
    {
        father->next = son;
        son->prev = father;
    }
    IPT[line] = NULL;
    //toDelete = NULL;
    free(toDelete);
    totalPagesInIPT--;
    ASSERT_PRINT("Exiting:IPT_Remove() with return value: TRUE\n");
    return TRUE;
}
Exemple #4
0
/*!	Called by UnmapPage() after performing the architecture specific part.
	Looks up the page, updates its flags, removes the page-area mapping, and
	requeues the page, if necessary.
*/
void
VMTranslationMap::PageUnmapped(VMArea* area, page_num_t pageNumber,
	bool accessed, bool modified, bool updatePageQueue)
{
	if (area->cache_type == CACHE_TYPE_DEVICE) {
		recursive_lock_unlock(&fLock);
		return;
	}

	// get the page
	vm_page* page = vm_lookup_page(pageNumber);
	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR
		", accessed: %d, modified: %d", pageNumber, accessed, modified);

	// transfer the accessed/dirty flags to the page
	page->accessed |= accessed;
	page->modified |= modified;

	// remove the mapping object/decrement the wired_count of the page
	vm_page_mapping* mapping = NULL;
	if (area->wiring == B_NO_LOCK) {
		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
		while ((mapping = iterator.Next()) != NULL) {
			if (mapping->area == area) {
				area->mappings.Remove(mapping);
				page->mappings.Remove(mapping);
				break;
			}
		}

		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
			B_PRIxPHYSADDR ", accessed: %d, modified: %d", page,
			pageNumber, accessed, modified);
	} else
		page->DecrementWiredCount();

	recursive_lock_unlock(&fLock);

	if (!page->IsMapped()) {
		atomic_add(&gMappedPagesCount, -1);

		if (updatePageQueue) {
			if (page->Cache()->temporary)
				vm_page_set_state(page, PAGE_STATE_INACTIVE);
			else if (page->modified)
				vm_page_set_state(page, PAGE_STATE_MODIFIED);
			else
				vm_page_set_state(page, PAGE_STATE_CACHED);
		}
	}

	if (mapping != NULL) {
		bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
		object_cache_free(gPageMappingsObjectCache, mapping,
			CACHE_DONT_WAIT_FOR_MEMORY
				| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
	}
}
bool QUEUES_Init() {
    BufferSize = 0x200;

    PROCESSES_mutex = calloc(2, sizeof (sem_t*));

    PROCESSES_mutex[0] = calloc(MaxNumOfProcesses, sizeof (sem_t));
    PROCESSES_empty = calloc(MaxNumOfProcesses, sizeof (sem_t));
    PROCESSES_full = calloc(MaxNumOfProcesses, sizeof (sem_t));

    PROCESSES_mutex[1] = calloc(MaxNumOfProcesses, sizeof (sem_t));


    int i;
    for (i = 0; i < MaxNumOfProcesses; i++) {
        sem_init(&PROCESSES_mutex[0][i], 0, 1); // Controls access to critical section
        sem_init(&PROCESSES_empty[i], 0, BufferSize); // counts number of empty buffer slots
        sem_init(&PROCESSES_full[i], 0, 0); // counts number of full buffer slots
    }

    for (i = 0; i < MaxNumOfProcesses; i++)
        sem_init(&PROCESSES_mutex[1][i], 0, 0);

    sem_init(&PRM_mutex, 0, 1); // Controls access to critical section
    sem_init(&PRM_empty, 0, BufferSize); // counts number of empty buffer slots
    sem_init(&PRM_full, 0, 0); // counts number of full buffer slots


    //init process queue - mailing box
    ProcessQueues = calloc(MaxNumOfProcesses, sizeof (Queue_t_p));
    if (ProcessQueues == 0) {
        ASSERT_PRINT("Error While creating ProcessQueues\n");
        return FALSE;
    }
    for (i = 0; i < MaxNumOfProcesses; i++) {
        ProcessQueues[i] = malloc(sizeof (Queue_t));
        ProcessQueues[i]->head = NULL;
        if (ProcessQueues[i] == 0) {
            ASSERT_PRINT("Error While creating ProcessQueues[%d]\n", i);
            return FALSE;
        }
    }

    PRMQueue = malloc(sizeof (Queue_t));
    if ((PRMQueue) == 0) {
        ASSERT_PRINT("Error While creating MMUQueue or PRMQueue\n");
        return FALSE;
    }

    PRMQueue->head = NULL;

    ASSERT_PRINT("Exiting:QUEUES_Init\n");
    return TRUE;
}
Exemple #6
0
PCB_t_p PCB_GetByProcessID(PID id)
{
    ASSERT_PRINT("Entering:PCB_GetByProcessID(%d)\n",id);
    int i=0;
    for(i=0; i<MaxNumOfProcesses; i++)
        if(PCBArray[i].processID == id)
        {
            ASSERT_PRINT("Exiting:PCB_GetByProcessID(%d)->%d\n",id,i);
            return &PCBArray[i];
        }
    ASSERT_PRINT("Exiting:PCB_GetByProcessID(%d)->NULL\n",id);
    return NULL;
}
Exemple #7
0
int IPT_FindLineByFrame(MMFI frame)
{
    ASSERT_PRINT("Entering:IPT_FindLineByFrame()\n");
    int i=0;
    int line = -1;
    for (i;i<SIZE_OF_IPT && line==-1; i++)
        if(IPT[i] != NULL && IPT[i]->frame == frame)
        {
            ASSERT_PRINT("Exiting:IPT_FindLineByFrame() with return value: TRUE, line = %d\n",*line);
            return i;
        }
    ASSERT_PRINT("Exiting:IPT_FindLineByFrame() with return value: FALSE\n");
    return line;
}
Exemple #8
0
bool IPT_Init()
{
    ASSERT_PRINT("Entering:IPT_Init()\n");
    IPT = calloc(SIZE_OF_IPT, sizeof (IPT_t));
    if (IPT == NULL)
        return FALSE;
    int i = 0;
    for (i = 0; i < SIZE_OF_IPT; i++) {
        IPT[i] = 0;
    }
    totalPagesInIPT = 0;
    ASSERT_PRINT("Exiting:IPT_Init()\n");
    return TRUE;
}
int VertexInfo::intersection_find_next_facet(Plane iplane, int facet_id)
{
	DEBUG_START;
	
	if (auto polyhedron = parentPolyhedron.lock())
	{
		int sgn_curr = polyhedron->signum(
				polyhedron->vertices[indFacets[2 * numFacets]], iplane);
		for (int i = 0; i < numFacets; ++i)
		{
			int sgn_prev = sgn_curr;
			sgn_curr = polyhedron->signum(
					polyhedron->vertices[indFacets[i + numFacets + 1]],
					iplane);
			if (sgn_curr != sgn_prev)
			{
				if (indFacets[i] != facet_id)
				{
					DEBUG_END;
					return indFacets[i];
				}
			}
		}
	}
	else
	{
		ASSERT_PRINT(0, "parentPolyhedron expired!");
		DEBUG_END;
	}
	return -1;
}
status_t
X86VMTranslationMap64Bit::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
	uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
{
	TRACE("X86VMTranslationMap64Bit::Map(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
		")\n", virtualAddress, physicalAddress);

	ThreadCPUPinner pinner(thread_get_current_thread());

	// Look up the page table for the virtual address, allocating new tables
	// if required. Shouldn't fail.
	uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
		fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap,
		true, reservation, fPageMapper, fMapCount);
	ASSERT(entry != NULL);

	// The entry should not already exist.
	ASSERT_PRINT((*entry & X86_64_PTE_PRESENT) == 0,
		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
		virtualAddress, *entry);

	// Fill in the table entry.
	X86PagingMethod64Bit::PutPageTableEntryInTable(entry, physicalAddress,
		attributes, memoryType, fIsKernelMap);

	// Note: We don't need to invalidate the TLB for this address, as previously
	// the entry was not present and the TLB doesn't cache those entries.

	fMapCount++;

	return 0;
}
Exemple #11
0
bool IPT_FindFrame(
        int HATPointedIndex,
        PID processID,
        LPN pageNumber,
        OUT MMFI *frame)
{
    ASSERT_PRINT("Entering:IPT_FindFramPRM_ReplaceMMFrameWithDiskFramee()\n");
    int line = -1;
    if (IPT_FindIPTLine(HATPointedIndex,processID,pageNumber,&line))
    {
        *frame = IPT[line]->frame;
        ASSERT_PRINT("Exiting:IPT_FindFrame() with return value: TRUE, frame=%d\n", *frame);
        return TRUE;
    }
    ASSERT_PRINT("Exiting:IPT_FindFrame() with return value: FALSE\n");
    return FALSE;
}
Exemple #12
0
bool PCB_Free()
{
    ASSERT_PRINT("Entering:PCB_Free\n");
    if(PCBArray==NULL)
        return TRUE;
    free(PCBArray);
    return TRUE;
}
Exemple #13
0
int PCB_GetFreeProcessID()
{
    ASSERT_PRINT("Entering:PCB_GetFreeProcessID\n");
    int i=0;
    for(i=0; i<MaxNumOfProcesses; i++)
        if(PCBArray[i].active == FALSE)
            return i;
    return -1;
}
Exemple #14
0
PCB_t_p PCB_AllocateProcess(PID id,int start,int end)
{
    ASSERT_PRINT("Entering:PCB_AllocateProcess(%d,%d,%d)\n",id,start,end);
    PCBArray[id].end=end;
    PCBArray[id].processID = id;
    PCBArray[id].start = start;
    PCBArray[id].active = TRUE;
    return &PCBArray[id];
}
Exemple #15
0
IPT_t_p IPT_CreateIPT_t_p(
        PID processID,
        LPN pageNumber,
        MMFI frame)
{
    ASSERT_PRINT("Entering:IPT_CreateIPPRM_ReplaceMMFrameWithDiskFrameT_t_p()\n");
    IPT_t_p newIPTLine;
    if (!(newIPTLine = malloc(sizeof (IPT_t))))
        return FALSE;

    (newIPTLine)->dirtyBit = 0;
    (newIPTLine)->frame = frame;
    (newIPTLine)->next = NULL;
    (newIPTLine)->pageNumber = pageNumber;
    (newIPTLine)->processID = processID;
    (newIPTLine)->referenceBit = 0;
    ASSERT_PRINT("Exiting:IPT_CreateIPT_t_p()\n");
    return newIPTLine;
}
Exemple #16
0
/*!	Called by ClearAccessedAndModified() after performing the architecture
	specific part.
	Looks up the page and removes the page-area mapping.
*/
void
VMTranslationMap::UnaccessedPageUnmapped(VMArea* area, page_num_t pageNumber)
{
	if (area->cache_type == CACHE_TYPE_DEVICE) {
		recursive_lock_unlock(&fLock);
		return;
	}

	// get the page
	vm_page* page = vm_lookup_page(pageNumber);
	ASSERT_PRINT(page != NULL, "page number: %#" B_PRIxPHYSADDR, pageNumber);

	// remove the mapping object/decrement the wired_count of the page
	vm_page_mapping* mapping = NULL;
	if (area->wiring == B_NO_LOCK) {
		vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
		while ((mapping = iterator.Next()) != NULL) {
			if (mapping->area == area) {
				area->mappings.Remove(mapping);
				page->mappings.Remove(mapping);
				break;
			}
		}

		ASSERT_PRINT(mapping != NULL, "page: %p, page number: %#"
			B_PRIxPHYSADDR, page, pageNumber);
	} else
		page->DecrementWiredCount();

	recursive_lock_unlock(&fLock);

	if (!page->IsMapped())
		atomic_add(&gMappedPagesCount, -1);

	if (mapping != NULL) {
		object_cache_free(gPageMappingsObjectCache, mapping,
			CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
			// Since this is called by the page daemon, we never want to lock
			// the kernel address space.
	}
}
Exemple #17
0
bool PCB_Init()
{
    ASSERT_PRINT("Entering:PCB_Init\n");
    PCBArray = calloc(MaxNumOfProcesses,sizeof(PCB_t));

    if(PCBArray==NULL)
        return FALSE;
    int i=0;
    for(i=0; i<MaxNumOfProcesses; i++)
        PCBArray[i].active = FALSE;
    return TRUE;
}
Exemple #18
0
bool IPT_Add(
        int HATPointedIndex,
        PID processID,
        LPN pageNumber,
        MMFI frame)
{
    ASSERT_PRINT("Entering:IPT_Add()\n");
    IPT_t_p newIPTLine;
    newIPTLine = IPT_CreateIPT_t_p(processID, pageNumber, frame);
    IPT_t_p pointer = IPT[HATPointedIndex];
    if (pointer == NULL) //the field was never invoked. 
    {
        newIPTLine->prev = 0;
        newIPTLine->next = 0;
        IPT[HATPointedIndex] = newIPTLine;
        HAT[HATPointedIndex] = newIPTLine;
        totalPagesInIPT++;
        return TRUE;
    }

    bool foundFrame = FALSE;
    int iterations = 0;
    int temp = HATPointedIndex;
    while (IPT[temp] != NULL && iterations <= SIZE_OF_IPT) {
        INDEX_INC(temp);
        iterations++;
    }
    if (iterations > SIZE_OF_IPT) {
        return FALSE;
    } else
        foundFrame = TRUE;
    newIPTLine->next = pointer;
    pointer->prev = newIPTLine;
    newIPTLine->prev = 0;
    IPT[temp] = newIPTLine;
    HAT[HATPointedIndex] = newIPTLine;
    totalPagesInIPT++;
    ASSERT_PRINT("Exiting:IPT_Add()\n");
    return TRUE;
}
Exemple #19
0
bool IPT_FindIPTLine(
        int HATPointedIndex,
        PID processID,
        LPN pageNumber,
        OUT int *line)
{
    ASSERT_PRINT("Entering:IPT_FindIPTLine()\n");
    int iterations = 0;
    while (IPT[HATPointedIndex] != 0 && iterations <= SIZE_OF_IPT)
    {
        if (IPT[HATPointedIndex]->processID == processID && IPT[HATPointedIndex]->pageNumber == pageNumber)
        {
            *line = HATPointedIndex;
            ASSERT_PRINT("Exiting:IPT_FindIPTLine() with return value: TRUE\n");
            return TRUE;
        }
        INDEX_INC(HATPointedIndex);
        iterations++;
    }
    //the page is not in the IPT, i.e. not in the MM
    ASSERT_PRINT("Exiting:IPT_FindIPTLine() with return value: FALSE\n");
    return FALSE;

}
Exemple #20
0
status_t
ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
	uint32 memoryType, vm_page_reservation* reservation)
{
	TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);

/*
	dprintf("pgdir at 0x%x\n", pgdir);
	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
*/
	page_directory_entry* pd = fPagingStructures->pgdir_virt;

	// check to see if a page table exists for this range
	uint32 index = VADDR_TO_PDENT(va);
	if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
		phys_addr_t pgtable;
		vm_page *page;

		// we need to allocate a pgtable
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);

		// put it in the pgdir
		ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
			attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

		// update any other page directories, if it maps kernel space
		if (index >= FIRST_KERNEL_PGDIR_ENT
			&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
			ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
		}

		fMapCount++;
	}

	// now, fill in the pentry
	Thread* thread = thread_get_current_thread();
	ThreadCPUPinner pinner(thread);

	page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
		pd[index] & ARM_PDE_ADDRESS_MASK);
	index = VADDR_TO_PTENT(va);

	ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
		pt[index]);

	ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
		memoryType, fIsKernelMap);

	pinner.Unlock();

	// Note: We don't need to invalidate the TLB for this address, as previously
	// the entry was not present and the TLB doesn't cache those entries.

	fMapCount++;

	return 0;
}
status_t
M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
	uint32 memoryType, vm_page_reservation* reservation)
{
	TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);

/*
	dprintf("pgdir at 0x%x\n", pgdir);
	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
*/
	page_root_entry *pr = fPagingStructures->pgroot_virt;
	page_directory_entry *pd;
	page_table_entry *pt;
	addr_t pd_pg, pt_pg;
	uint32 rindex, dindex, pindex;


	// check to see if a page directory exists for this range
	rindex = VADDR_TO_PRENT(va);
	if (PRE_TYPE(pr[rindex]) != DT_ROOT) {
		phys_addr_t pgdir;
		vm_page *page;
		uint32 i;

		// we need to allocate a pgdir group
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgdir = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir);

		// for each pgdir on the allocated page:
		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
			uint32 aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
			page_root_entry *apr = &pr[aindex + i];

			// put in the pgroot
			M68KPagingMethod040::PutPageDirInPageRoot(apr, pgdir, attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

			// update any other page roots, if it maps kernel space
			//XXX: suboptimal, should batch them
			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT && (aindex+i)
					< (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
				M68KPagingStructures040::UpdateAllPageDirs((aindex+i),
					pr[aindex+i]);

			pgdir += SIZ_DIRTBL;
		}
		fMapCount++;
	}
	// now, fill in the pentry
	//XXX: is this required?
	Thread* thread = thread_get_current_thread();
	ThreadCPUPinner pinner(thread);

	pd = (page_directory_entry*)MapperGetPageTableAt(
		PRE_TO_PA(pr[rindex]));

	//pinner.Unlock();

	// we want the table at rindex, not at rindex%(tbl/page)
	//pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;

	// check to see if a page table exists for this range
	dindex = VADDR_TO_PDENT(va);
	if (PDE_TYPE(pd[dindex]) != DT_DIR) {
		phys_addr_t pgtable;
		vm_page *page;
		uint32 i;

		// we need to allocate a pgtable group
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable);

		// for each pgtable on the allocated page:
		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
			uint32 aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
			page_directory_entry *apd = &pd[aindex + i];

			// put in the pgdir
			M68KPagingMethod040::PutPageTableInPageDir(apd, pgtable, attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

			// no need to update other page directories for kernel space;
			// the root-level already point to us.

			pgtable += SIZ_PAGETBL;
		}

#warning M68K: really mean map_count++ ??
		fMapCount++;
	}

	// now, fill in the pentry
	//ThreadCPUPinner pinner(thread);

	pt = (page_table_entry*)MapperGetPageTableAt(PDE_TO_PA(pd[dindex]));
	// we want the table at rindex, not at rindex%(tbl/page)
	//pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;

	pindex = VADDR_TO_PTENT(va);

	ASSERT_PRINT((PTE_TYPE(pt[pindex]) != DT_INVALID) == 0,
		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
		pt[pindex]);

	M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
		memoryType, fIsKernelMap);

	pinner.Unlock();

	// Note: We don't need to invalidate the TLB for this address, as previously
	// the entry was not present and the TLB doesn't cache those entries.

	fMapCount++;

	return B_OK;
}