void shm_destroy(int shmid) { FAR struct shm_region_s *region = &g_shminfo.si_region[shmid]; int i; /* Free all of the allocated physical pages */ for (i = 0; i < CONFIG_ARCH_SHM_NPAGES && region->sr_pages[i] != 0; i++) { mm_pgfree(region->sr_pages[i], 1); } /* Reset the region entry to its initial state */ sem_destroy(®ion->sr_sem); memset(region, 0, sizeof(struct shm_region_s)); }
void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen, uintptr_t vaddr) { irqstate_t flags; uintptr_t paddr; FAR uint32_t *l2table; #ifndef CONFIG_ARCH_PGPOOL_MAPPING uint32_t l1save; #endif int i; int j; bvdbg("listlen=%d vaddr=%08lx\n", listlen, (unsigned long)vaddr); for (i = 0; i < listlen; vaddr += SECTION_SIZE, list++, i++) { /* Unhook the L2 page table from the L1 page table */ mmu_l1_clrentry(vaddr); /* Has this page table been allocated? */ paddr = (uintptr_t)list[i]; if (paddr != 0) { flags = irqsave(); #ifdef CONFIG_ARCH_PGPOOL_MAPPING /* Get the virtual address corresponding to the physical page address */ l2table = (FAR uint32_t *)arm_pgvaddr(paddr); #else /* Temporarily map the page into the virtual address space */ l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE); mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS); l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK)); #endif /* Return the allocated pages to the page allocator */ for (j = 0; j < ENTRIES_PER_L2TABLE; j++) { paddr = *l2table++; if (paddr != 0) { paddr &= PTE_SMALL_PADDR_MASK; mm_pgfree(paddr, 1); } } #ifndef CONFIG_ARCH_PGPOOL_MAPPING /* Restore the scratch section L1 page table entry */ mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save); #endif irqrestore(flags); /* And free the L2 page table itself */ mm_pgfree((uintptr_t)list[i], 1); } } }