コード例 #1
0
status_t
M68KVMTranslationMap040::Unmap(addr_t start, addr_t end)
{
	start = ROUNDDOWN(start, B_PAGE_SIZE);
	if (start >= end)
		return B_OK;

	TRACE("M68KVMTranslationMap040::Unmap: asked to free pages 0x%lx to 0x%lx\n", start, end);

	page_root_entry *pr = fPagingStructures->pgroot_virt;
	page_directory_entry *pd;
	page_table_entry *pt;
	int index;

	do {
		index = VADDR_TO_PRENT(start);
		if (PRE_TYPE(pr[index]) != DT_ROOT) {
			// no pagedir here, move the start up to access the next page
			// dir group
			start = ROUNDUP(start + 1, kPageDirAlignment);
			continue;
		}

		Thread* thread = thread_get_current_thread();
		ThreadCPUPinner pinner(thread);

		pd = (page_directory_entry*)MapperGetPageTableAt(
			PRE_TO_PA(pr[index]));
		// we want the table at rindex, not at rindex%(tbl/page)
		//pd += (index % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;


		index = VADDR_TO_PDENT(start);
		if (PDE_TYPE(pd[index]) != DT_DIR) {
			// no pagedir here, move the start up to access the next page
			// table group
			start = ROUNDUP(start + 1, kPageTableAlignment);
			continue;
		}

		pt = (page_table_entry*)MapperGetPageTableAt(
			PDE_TO_PA(pd[index]));
		// we want the table at rindex, not at rindex%(tbl/page)
		//pt += (index % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;

		for (index = VADDR_TO_PTENT(start);
				(index < NUM_PAGEENT_PER_TBL) && (start < end);
				index++, start += B_PAGE_SIZE) {
			if (PTE_TYPE(pt[index]) != DT_PAGE
				&& PTE_TYPE(pt[index]) != DT_INDIRECT) {
				// page mapping not valid
				continue;
			}

			TRACE("::Unmap: removing page 0x%lx\n", start);

			page_table_entry oldEntry
				= M68KPagingMethod040::ClearPageTableEntry(&pt[index]);
			fMapCount--;

			if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
				// Note, that we only need to invalidate the address, if the
				// accessed flags was set, since only then the entry could have
				// been in any TLB.
				InvalidatePage(start);
			}
		}
	} while (start != 0 && start < end);

	return B_OK;
}
コード例 #2
0
bool
M68KVMTranslationMap040::ClearAccessedAndModified(VMArea* area, addr_t address,
	bool unmapIfUnaccessed, bool& _modified)
{
	ASSERT(address % B_PAGE_SIZE == 0);

	page_root_entry* pr = fPagingStructures->pgroot_virt;

	TRACE("M68KVMTranslationMap040::ClearAccessedAndModified(%#" B_PRIxADDR
		")\n", address);

#if 0
	RecursiveLocker locker(fLock);

	int index = VADDR_TO_PDENT(address);
	if ((pd[index] & M68K_PDE_PRESENT) == 0)
		return false;

	ThreadCPUPinner pinner(thread_get_current_thread());

	page_table_entry* pt = (page_table_entry*)MapperGetPageTableAt(
		pd[index] & M68K_PDE_ADDRESS_MASK);

	index = VADDR_TO_PTENT(address);

	// perform the deed
	page_table_entry oldEntry;

	if (unmapIfUnaccessed) {
		while (true) {
			oldEntry = pt[index];
			if ((oldEntry & M68K_PTE_PRESENT) == 0) {
				// page mapping not valid
				return false;
			}

			if (oldEntry & M68K_PTE_ACCESSED) {
				// page was accessed -- just clear the flags
				oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(
					&pt[index], M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
				break;
			}

			// page hasn't been accessed -- unmap it
			if (M68KPagingMethod040::TestAndSetPageTableEntry(&pt[index], 0,
					oldEntry) == oldEntry) {
				break;
			}

			// something changed -- check again
		}
	} else {
		oldEntry = M68KPagingMethod040::ClearPageTableEntryFlags(&pt[index],
			M68K_PTE_ACCESSED | M68K_PTE_DIRTY);
	}

	pinner.Unlock();

	_modified = (oldEntry & M68K_PTE_DIRTY) != 0;

	if ((oldEntry & M68K_PTE_ACCESSED) != 0) {
		// Note, that we only need to invalidate the address, if the
		// accessed flags was set, since only then the entry could have been
		// in any TLB.
		InvalidatePage(address);

		Flush();

		return true;
	}

	if (!unmapIfUnaccessed)
		return false;

	// We have unmapped the address. Do the "high level" stuff.

	fMapCount--;

	locker.Detach();
		// UnaccessedPageUnmapped() will unlock for us

	UnaccessedPageUnmapped(area,
		(oldEntry & M68K_PTE_ADDRESS_MASK) / B_PAGE_SIZE);

#endif
	return false;
}
コード例 #3
0
status_t
M68KVMTranslationMap040::Map(addr_t va, phys_addr_t pa, uint32 attributes,
	uint32 memoryType, vm_page_reservation* reservation)
{
	TRACE("M68KVMTranslationMap040::Map: entry pa 0x%lx va 0x%lx\n", pa, va);

/*
	dprintf("pgdir at 0x%x\n", pgdir);
	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
*/
	page_root_entry *pr = fPagingStructures->pgroot_virt;
	page_directory_entry *pd;
	page_table_entry *pt;
	addr_t pd_pg, pt_pg;
	uint32 rindex, dindex, pindex;


	// check to see if a page directory exists for this range
	rindex = VADDR_TO_PRENT(va);
	if (PRE_TYPE(pr[rindex]) != DT_ROOT) {
		phys_addr_t pgdir;
		vm_page *page;
		uint32 i;

		// we need to allocate a pgdir group
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgdir = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("::Map: asked for free page for pgdir. 0x%lx\n", pgdir);

		// for each pgdir on the allocated page:
		for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
			uint32 aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
			page_root_entry *apr = &pr[aindex + i];

			// put in the pgroot
			M68KPagingMethod040::PutPageDirInPageRoot(apr, pgdir, attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

			// update any other page roots, if it maps kernel space
			//XXX: suboptimal, should batch them
			if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT && (aindex+i)
					< (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
				M68KPagingStructures040::UpdateAllPageDirs((aindex+i),
					pr[aindex+i]);

			pgdir += SIZ_DIRTBL;
		}
		fMapCount++;
	}
	// now, fill in the pentry
	//XXX: is this required?
	Thread* thread = thread_get_current_thread();
	ThreadCPUPinner pinner(thread);

	pd = (page_directory_entry*)MapperGetPageTableAt(
		PRE_TO_PA(pr[rindex]));

	//pinner.Unlock();

	// we want the table at rindex, not at rindex%(tbl/page)
	//pd += (rindex % NUM_DIRTBL_PER_PAGE) * NUM_DIRENT_PER_TBL;

	// check to see if a page table exists for this range
	dindex = VADDR_TO_PDENT(va);
	if (PDE_TYPE(pd[dindex]) != DT_DIR) {
		phys_addr_t pgtable;
		vm_page *page;
		uint32 i;

		// we need to allocate a pgtable group
		page = vm_page_allocate_page(reservation,
			PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

		DEBUG_PAGE_ACCESS_END(page);

		pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

		TRACE("::Map: asked for free page for pgtable. 0x%lx\n", pgtable);

		// for each pgtable on the allocated page:
		for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
			uint32 aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
			page_directory_entry *apd = &pd[aindex + i];

			// put in the pgdir
			M68KPagingMethod040::PutPageTableInPageDir(apd, pgtable, attributes
				| ((attributes & B_USER_PROTECTION) != 0
						? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

			// no need to update other page directories for kernel space;
			// the root-level already point to us.

			pgtable += SIZ_PAGETBL;
		}

#warning M68K: really mean map_count++ ??
		fMapCount++;
	}

	// now, fill in the pentry
	//ThreadCPUPinner pinner(thread);

	pt = (page_table_entry*)MapperGetPageTableAt(PDE_TO_PA(pd[dindex]));
	// we want the table at rindex, not at rindex%(tbl/page)
	//pt += (dindex % NUM_PAGETBL_PER_PAGE) * NUM_PAGEENT_PER_TBL;

	pindex = VADDR_TO_PTENT(va);

	ASSERT_PRINT((PTE_TYPE(pt[pindex]) != DT_INVALID) == 0,
		"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
		pt[pindex]);

	M68KPagingMethod040::PutPageTableEntryInTable(&pt[pindex], pa, attributes,
		memoryType, fIsKernelMap);

	pinner.Unlock();

	// Note: We don't need to invalidate the TLB for this address, as previously
	// the entry was not present and the TLB doesn't cache those entries.

	fMapCount++;

	return B_OK;
}
コード例 #4
0
status_t
ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
                                  uint32 memoryType)
{
    start = ROUNDDOWN(start, B_PAGE_SIZE);
    if (start >= end)
        return B_OK;

    TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
          attributes);
#if 0 //IRA
    // compute protection flags
    uint32 newProtectionFlags = 0;
    if ((attributes & B_USER_PROTECTION) != 0) {
        newProtectionFlags = ARM_PTE_USER;
        if ((attributes & B_WRITE_AREA) != 0)
            newProtectionFlags |= ARM_PTE_WRITABLE;
    } else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
        newProtectionFlags = ARM_PTE_WRITABLE;

    page_directory_entry *pd = fPagingStructures->pgdir_virt;

    do {
        int index = VADDR_TO_PDENT(start);
        if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
            // no page table here, move the start up to access the next page
            // table
            start = ROUNDUP(start + 1, kPageTableAlignment);
            continue;
        }

        Thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);

        page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
                                   pd[index] & ARM_PDE_ADDRESS_MASK);

        for (index = VADDR_TO_PTENT(start); index < 256 && start < end;
                index++, start += B_PAGE_SIZE) {
            page_table_entry entry = pt[index];
            if ((entry & ARM_PTE_PRESENT) == 0) {
                // page mapping not valid
                continue;
            }

            TRACE("protect_tmap: protect page 0x%lx\n", start);

            // set the new protection flags -- we want to do that atomically,
            // without changing the accessed or dirty flag
            page_table_entry oldEntry;
            while (true) {
                oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
                               &pt[index],
                               (entry & ~(ARM_PTE_PROTECTION_MASK
                                          | ARM_PTE_MEMORY_TYPE_MASK))
                               | newProtectionFlags
                               | ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
                                   memoryType),
                               entry);
                if (oldEntry == entry)
                    break;
                entry = oldEntry;
            }

            if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
                // Note, that we only need to invalidate the address, if the
                // accessed flag was set, since only then the entry could have
                // been in any TLB.
                InvalidatePage(start);
            }
        }
    } while (start != 0 && start < end);
#endif
    return B_OK;
}
コード例 #5
0
void
ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
                                    bool ignoreTopCachePageFlags)
{
    if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
        ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
                                             true);
        return;
    }

    bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;

    page_directory_entry* pd = fPagingStructures->pgdir_virt;

    RecursiveLocker locker(fLock);

    VMAreaMappings mappings;
    mappings.MoveFrom(&area->mappings);

    for (VMAreaMappings::Iterator it = mappings.GetIterator();
            vm_page_mapping* mapping = it.Next();) {
        vm_page* page = mapping->page;
        page->mappings.Remove(mapping);

        VMCache* cache = page->Cache();

        bool pageFullyUnmapped = false;
        if (!page->IsMapped()) {
            atomic_add(&gMappedPagesCount, -1);
            pageFullyUnmapped = true;
        }

        if (unmapPages || cache != area->cache) {
            addr_t address = area->Base()
                             + ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);

            int index = VADDR_TO_PDENT(address);
            if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
                panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
                      "has no page dir entry", page, area, address);
                continue;
            }

            ThreadCPUPinner pinner(thread_get_current_thread());

            page_table_entry* pt
                = (page_table_entry*)fPageMapper->GetPageTableAt(
                      pd[index] & ARM_PDE_ADDRESS_MASK);
            page_table_entry oldEntry
                = ARMPagingMethod32Bit::ClearPageTableEntry(
                      &pt[VADDR_TO_PTENT(address)]);

            pinner.Unlock();

            if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
                panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
                      "has no page table entry", page, area, address);
                continue;
            }

            // transfer the accessed/dirty flags to the page and invalidate
            // the mapping, if necessary
            if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
                page->accessed = true;

                if (!deletingAddressSpace)
                    InvalidatePage(address);
            }

            if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
                page->modified = true;

            if (pageFullyUnmapped) {
                DEBUG_PAGE_ACCESS_START(page);

                if (cache->temporary)
                    vm_page_set_state(page, PAGE_STATE_INACTIVE);
                else if (page->modified)
                    vm_page_set_state(page, PAGE_STATE_MODIFIED);
                else
                    vm_page_set_state(page, PAGE_STATE_CACHED);

                DEBUG_PAGE_ACCESS_END(page);
            }
        }

        fMapCount--;
    }

    Flush();
    // flush explicitely, since we directly use the lock

    locker.Unlock();

    bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
    uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
                       | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
    while (vm_page_mapping* mapping = mappings.RemoveHead())
        object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
}
コード例 #6
0
void
ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
                                     bool updatePageQueue)
{
    if (size == 0)
        return;

    addr_t start = base;
    addr_t end = base + size - 1;

    TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
          B_PRIxADDR ")\n", area, start, end);

    page_directory_entry* pd = fPagingStructures->pgdir_virt;

    VMAreaMappings queue;

    RecursiveLocker locker(fLock);

    do {
        int index = VADDR_TO_PDENT(start);
        if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
            // no page table here, move the start up to access the next page
            // table
            start = ROUNDUP(start + 1, kPageTableAlignment);
            continue;
        }

        Thread* thread = thread_get_current_thread();
        ThreadCPUPinner pinner(thread);

        page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
                                   pd[index] & ARM_PDE_ADDRESS_MASK);

        for (index = VADDR_TO_PTENT(start); (index < 256) && (start < end);
                index++, start += B_PAGE_SIZE) {
            page_table_entry oldEntry
                = ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
            if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
                continue;

            fMapCount--;

            if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
                // Note, that we only need to invalidate the address, if the
                // accessed flags was set, since only then the entry could have
                // been in any TLB.
                InvalidatePage(start);
            }

            if (area->cache_type != CACHE_TYPE_DEVICE) {
                // get the page
                vm_page* page = vm_lookup_page(
                                    (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
                ASSERT(page != NULL);

                DEBUG_PAGE_ACCESS_START(page);

                // transfer the accessed/dirty flags to the page
                if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
                    page->accessed = true;
                if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
                    page->modified = true;

                // remove the mapping object/decrement the wired_count of the
                // page
                if (area->wiring == B_NO_LOCK) {
                    vm_page_mapping* mapping = NULL;
                    vm_page_mappings::Iterator iterator
                        = page->mappings.GetIterator();
                    while ((mapping = iterator.Next()) != NULL) {
                        if (mapping->area == area)
                            break;
                    }

                    ASSERT(mapping != NULL);

                    area->mappings.Remove(mapping);
                    page->mappings.Remove(mapping);
                    queue.Add(mapping);
                } else
                    page->DecrementWiredCount();

                if (!page->IsMapped()) {
                    atomic_add(&gMappedPagesCount, -1);

                    if (updatePageQueue) {
                        if (page->Cache()->temporary)
                            vm_page_set_state(page, PAGE_STATE_INACTIVE);
                        else if (page->modified)
                            vm_page_set_state(page, PAGE_STATE_MODIFIED);
                        else
                            vm_page_set_state(page, PAGE_STATE_CACHED);
                    }
                }

                DEBUG_PAGE_ACCESS_END(page);
            }
        }

        Flush();
        // flush explicitly, since we directly use the lock
    } while (start != 0 && start < end);

    // TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
    // really critical here, as in all cases this method is used, the unmapped
    // area range is unmapped for good (resized/cut) and the pages will likely
    // be freed.

    locker.Unlock();

    // free removed mappings
    bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
    uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
                       | (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
    while (vm_page_mapping* mapping = queue.RemoveHead())
        object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
}
コード例 #7
0
/*!	Caller must have locked the cache of the page to be unmapped.
	This object shouldn't be locked.
*/
status_t
ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
                                    bool updatePageQueue)
{
    ASSERT(address % B_PAGE_SIZE == 0);

    page_directory_entry* pd = fPagingStructures->pgdir_virt;

    TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);

    RecursiveLocker locker(fLock);

    int index = VADDR_TO_PDENT(address);
    if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
        return B_ENTRY_NOT_FOUND;

    ThreadCPUPinner pinner(thread_get_current_thread());

    page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
                               pd[index] & ARM_PDE_ADDRESS_MASK);

    index = VADDR_TO_PTENT(address);
    page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
                                    &pt[index]);

    pinner.Unlock();

    if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
        // page mapping not valid
        return B_ENTRY_NOT_FOUND;
    }

    fMapCount--;


    if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
        // Note, that we only need to invalidate the address, if the
        // accessed flags was set, since only then the entry could have been
        // in any TLB.
        InvalidatePage(address);
        Flush();

        // NOTE: Between clearing the page table entry and Flush() other
        // processors (actually even this processor with another thread of the
        // same team) could still access the page in question via their cached
        // entry. We can obviously lose a modified flag in this case, with the
        // effect that the page looks unmodified (and might thus be recycled),
        // but is actually modified.
        // In most cases this is harmless, but for vm_remove_all_page_mappings()
        // this is actually a problem.
        // Interestingly FreeBSD seems to ignore this problem as well
        // (cf. pmap_remove_all()), unless I've missed something.
    }

    locker.Detach();
    // PageUnmapped() will unlock for us

    PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
                 true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
                 updatePageQueue);

    return B_OK;
}
コード例 #8
0
status_t
ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
                              uint32 memoryType, vm_page_reservation* reservation)
{
    TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);

    /*
    	dprintf("pgdir at 0x%x\n", pgdir);
    	dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
    	dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
    	dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
    	dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
    	dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
    */
    page_directory_entry* pd = fPagingStructures->pgdir_virt;

    // check to see if a page table exists for this range
    uint32 index = VADDR_TO_PDENT(va);
    if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
        phys_addr_t pgtable;
        vm_page *page;

        // we need to allocate a pgtable
        page = vm_page_allocate_page(reservation,
                                     PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);

        DEBUG_PAGE_ACCESS_END(page);

        pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;

        TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);

        // put it in the pgdir
        ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
                attributes
                | ((attributes & B_USER_PROTECTION) != 0
                   ? B_WRITE_AREA : B_KERNEL_WRITE_AREA));

        // update any other page directories, if it maps kernel space
        if (index >= FIRST_KERNEL_PGDIR_ENT
                && index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
            ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
        }

        fMapCount++;
    }

    // now, fill in the pentry
    Thread* thread = thread_get_current_thread();
    ThreadCPUPinner pinner(thread);

    page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
                               pd[index] & ARM_PDE_ADDRESS_MASK);
    index = VADDR_TO_PTENT(va);

    ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
                 "virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
                 pt[index]);

    ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
            memoryType, fIsKernelMap);

    pinner.Unlock();

    // Note: We don't need to invalidate the TLB for this address, as previously
    // the entry was not present and the TLB doesn't cache those entries.

    fMapCount++;

    return 0;
}