static uintptr_t alloc_pgtable(void) { irqstate_t flags; uintptr_t paddr; FAR uint32_t *l2table; #ifndef CONFIG_ARCH_PGPOOL_MAPPING uint32_t l1save; #endif /* Allocate one physical page for the L2 page table */ paddr = mm_pgalloc(1); if (paddr) { DEBUGASSERT(MM_ISALIGNED(paddr)); flags = irqsave(); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page address */ l2table = (FAR uint32_t *)arm_pgvaddr(paddr); #else /* Temporarily map the page into the virtual address space */ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE); mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif /* Initialize the page table */ memset(l2table, 0, MM_PGSIZE); /* Make sure that the initialized L2 table is flushed to physical * memory. */ arch_flush_dcache((uintptr_t)l2table, (uintptr_t)l2table + MM_PGSIZE); #ifndef CONFIG_ARCH_PGPOOL_MAPPING /* Restore the scratch section page table entry */ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); } return paddr; }
static int up_addrenv_initdata(uintptr_t l2table) { irqstate_t flags; FAR uint32_t *virtptr; uintptr_t paddr; #ifndef CONFIG_ARCH_PGPOOL_MAPPING uint32_t l1save; #endif DEBUGASSERT(l2table); flags = enter_critical_section(); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page table address */ virtptr = (FAR uint32_t *)arm_pgvaddr(l2table); #else /* Temporarily map the page into the virtual address space */ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE); mmu_l1_setentry(l2table & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); virtptr = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (l2table & SECTION_MASK)); #endif /* Invalidate D-Cache so that we read from the physical memory */ arch_invalidate_dcache((uintptr_t)virtptr, (uintptr_t)virtptr + sizeof(uint32_t)); /* Get the physical address of the first page of of .bss/.data */ paddr = (uintptr_t)(*virtptr) & PTE_SMALL_PADDR_MASK; DEBUGASSERT(paddr); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page address */ virtptr = (FAR uint32_t *)arm_pgvaddr(paddr); #else /* Temporarily map the page into the virtual address space */ mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); virtptr = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif /* Finally, after of all of that, we can initialize the tiny region at * the beginning of .bss/.data by setting it to zero. */ memset(virtptr, 0, ARCH_DATA_RESERVE_SIZE); /* Make sure that the initialized data is flushed to physical memory. */ arch_flush_dcache((uintptr_t)virtptr, (uintptr_t)virtptr + ARCH_DATA_RESERVE_SIZE); #ifndef CONFIG_ARCH_PGPOOL_MAPPING /* Restore the scratch section L1 page table entry */ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif leave_critical_section(flags); return OK; }
void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen, uintptr_t vaddr) { irqstate_t flags; uintptr_t paddr; FAR uint32_t *l2table; #ifndef CONFIG_ARCH_PGPOOL_MAPPING uint32_t l1save; #endif int i; int j; bvdbg("listlen=%d vaddr=%08lx\n", listlen, (unsigned long)vaddr); for (i = 0; i < listlen; vaddr += SECTION_SIZE, list++, i++) { /* Unhook the L2 page table from the L1 page table */ mmu_l1_clrentry(vaddr); /* Has this page table been allocated? */ paddr = (uintptr_t)list[i]; if (paddr != 0) { flags = irqsave(); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page address */ l2table = (FAR uint32_t *)arm_pgvaddr(paddr); #else /* Temporarily map the page into the virtual address space */ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE); mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif /* Return the allocated pages to the page allocator */ for (j = 0; j < ENTRIES_PER_L2TABLE; j++) { paddr = *l2table++; if (paddr != 0) { paddr &= PTE_SMALL_PADDR_MASK; mm_pgfree(paddr, 1); } } #ifndef CONFIG_ARCH_PGPOOL_MAPPING /* Restore the scratch section L1 page table entry */ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); /* And free the L2 page table itself */ mm_pgfree((uintptr_t)list[i], 1); } } }
uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages) { FAR struct tcb_s *tcb = sched_self(); FAR struct task_group_s *group; FAR uint32_t *l2table; irqstate_t flags; uintptr_t paddr; #ifndef CONFIG_ARCH_PGPOOL_MAPPING uint32_t l1save; #endif unsigned int index; DEBUGASSERT(tcb && tcb->group); group = tcb->group; /* The current implementation only supports extending the user heap * region as part of the implementation of user sbrk(). This function * needs to be expanded to also handle (1) extending the user stack * space and (2) extending the kernel memory regions as well. */ DEBUGASSERT((group->tg_flags & GROUP_FLAG_ADDRENV) != 0); /* brkaddr = 0 means that no heap has yet been allocated */ if (brkaddr == 0) { brkaddr = CONFIG_ARCH_HEAP_VBASE; } DEBUGASSERT(brkaddr >= CONFIG_ARCH_HEAP_VBASE && brkaddr < ARCH_HEAP_VEND); DEBUGASSERT(MM_ISALIGNED(brkaddr)); for (; npages > 0; npages--) { /* Get the physical address of the level 2 page table */ paddr = get_pgtable(&group->addrenv, brkaddr); if (paddr == 0) { return 0; } flags = irqsave(); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page address */ l2table = (FAR uint32_t *)arm_pgvaddr(paddr); #else /* Temporarily map the level 2 page table into the "scratch" virtual * address space */ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE); mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif /* Back up L2 entry with physical memory */ paddr = mm_pgalloc(1); if (paddr == 0) { #ifndef CONFIG_ARCH_PGPOOL_MAPPING mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); return 0; } /* The table divides a 1Mb address space up into 256 entries, each * corresponding to 4Kb of address space. The page table index is * related to the offset from the beginning of 1Mb region. */ index = (brkaddr & 0x000ff000) >> 12; /* Map the .text region virtual address to this physical address */ DEBUGASSERT(l2table[index] == 0); l2table[index] = paddr | MMU_L2_UDATAFLAGS; brkaddr += MM_PGSIZE; /* Make sure that the modified L2 table is flushed to physical * memory. */ arch_flush_dcache((uintptr_t)&l2table[index], (uintptr_t)&l2table[index] + sizeof(uint32_t)); #ifndef CONFIG_ARCH_PGPOOL_MAPPING /* Restore the scratch L1 page table entry */ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); } return brkaddr; }
int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen, uintptr_t vaddr, size_t regionsize, uint32_t mmuflags) { irqstate_t flags; uintptr_t paddr; FAR uint32_t *l2table; #ifndef CONFIG_ARCH_PGPOOL_MAPPING uint32_t l1save; #endif size_t nmapped; unsigned int npages; unsigned int i; unsigned int j; bvdbg("listlen=%d vaddr=%08lx regionsize=%ld, mmuflags=%08x\n", listlen, (unsigned long)vaddr, (unsigned long)regionsize, (unsigned int)mmuflags); /* Verify that we are configured with enough virtual address space to * support this memory region. * * npages pages correspondes to (npages << MM_PGSHIFT) bytes * listlen sections corresponds to (listlen << 20) bytes */ npages = MM_NPAGES(regionsize); if (npages > (listlen << (20 - MM_PGSHIFT))) { bdbg("ERROR: npages=%u listlen=%u\n", npages, listlen); return -E2BIG; } /* Back the allocation up with physical pages and set up the level mapping * (which of course does nothing until the L2 page table is hooked into * the L1 page table). */ nmapped = 0; for (i = 0; i < npages; i += ENTRIES_PER_L2TABLE) { /* Allocate one physical page for the L2 page table */ paddr = mm_pgalloc(1); if (!paddr) { return -ENOMEM; } DEBUGASSERT(MM_ISALIGNED(paddr)); list[i] = (FAR uintptr_t *)paddr; flags = irqsave(); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page address */ l2table = (FAR uint32_t *)arm_pgvaddr(paddr); #else /* Temporarily map the page into the virtual address space */ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE); mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif /* Initialize the page table */ memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t)); /* Back up L2 entries with physical memory */ for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < regionsize; j++) { /* Allocate one physical page for region data */ paddr = mm_pgalloc(1); if (!paddr) { #ifndef CONFIG_ARCH_PGPOOL_MAPPING mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); return -ENOMEM; } /* Map the .text region virtual address to this physical address */ set_l2_entry(l2table, paddr, vaddr, mmuflags); nmapped += MM_PGSIZE; vaddr += MM_PGSIZE; } /* Make sure that the initialized L2 table is flushed to physical * memory. */ arch_flush_dcache((uintptr_t)l2table, (uintptr_t)l2table + ENTRIES_PER_L2TABLE * sizeof(uint32_t)); #ifndef CONFIG_ARCH_PGPOOL_MAPPING /* Restore the scratch section L1 page table entry */ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); } return npages; }