int up_addrenv_ustackselect(FAR const struct tcb_s *tcb)
{
  uintptr_t vaddr;
  uintptr_t paddr;
  int i;

  DEBUGASSERT(tcb);

  for (vaddr = CONFIG_ARCH_STACK_VBASE, i = 0;
       i < ARCH_TEXT_NSECTS;
       vaddr += ARCH_STACK_NSECTS, i++)
    {
      /* Set (or clear) the new page table entry */

      paddr = (uintptr_t)tcb->xcp.ustack[i];
      if (paddr)
        {
          mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS);
        }
      else
        {
          mmu_l1_clrentry(vaddr);
        }
    }

  return OK;
}
static void sam_vectormapping(void)
{
  uint32_t vector_paddr = SAM_VECTOR_PADDR & PTE_SMALL_PADDR_MASK;
  uint32_t vector_vaddr = SAM_VECTOR_VADDR & PTE_SMALL_PADDR_MASK;
  uint32_t vector_size  = (uint32_t)&_vector_end - (uint32_t)&_vector_start;
  uint32_t end_paddr    = SAM_VECTOR_PADDR + vector_size;

  /* REVISIT:  Cannot really assert in this context */

  DEBUGASSERT (vector_size <= VECTOR_TABLE_SIZE);

  /* We want to keep our interrupt vectors and interrupt-related logic in
   * zero-wait state internal SRAM (ISRAM).  The SAMA5 has 128Kb of ISRAM
   * positioned at physical address 0x0300:0000; we need to map this to
   * 0xffff:0000.
   */

  while (vector_paddr < end_paddr)
    {
      mmu_l2_setentry(VECTOR_L2_VBASE,  vector_paddr, vector_vaddr,
                      MMU_L2_VECTORFLAGS);
      vector_paddr += 4096;
      vector_vaddr += 4096;
    }

  /* Now set the level 1 descriptor to refer to the level 2 page table. */

  mmu_l1_setentry(VECTOR_L2_PBASE & PMD_PTE_PADDR_MASK,
                  SAM_VECTOR_VADDR & PMD_PTE_PADDR_MASK,
                  MMU_L1_VECTORFLAGS);
}
Beispiel #3
0
static uintptr_t alloc_pgtable(void)
{
  irqstate_t flags;
  uintptr_t paddr;
  FAR uint32_t *l2table;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif

  /* Allocate one physical page for the L2 page table */

  paddr = mm_pgalloc(1);
  if (paddr)
    {
      DEBUGASSERT(MM_ISALIGNED(paddr));

      flags = irqsave();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
      /* Get the virtual address corresponding to the physical page address */

      l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
      /* Temporarily map the page into the virtual address space */

      l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
      mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
      l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

      /* Initialize the page table */

      memset(l2table, 0, MM_PGSIZE);

      /* Make sure that the initialized L2 table is flushed to physical
       * memory.
       */

      arch_flush_dcache((uintptr_t)l2table,
                        (uintptr_t)l2table + MM_PGSIZE);

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
      /* Restore the scratch section page table entry */

      mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
      irqrestore(flags);
    }

  return paddr;
}
Beispiel #4
0
int up_addrenv_select(FAR const group_addrenv_t *addrenv,
                      FAR save_addrenv_t *oldenv)
{
  uintptr_t vaddr;
  uintptr_t paddr;
  int i;

  DEBUGASSERT(addrenv);

  for (vaddr = CONFIG_ARCH_TEXT_VBASE, i = 0;
       i < ARCH_TEXT_NSECTS;
       vaddr += SECTION_SIZE, i++)
    {
      /* Save the old L1 page table entry */

      if (oldenv)
        {
          oldenv->text[i] = mmu_l1_getentry(vaddr);
        }

      /* Set (or clear) the new page table entry */

      paddr = (uintptr_t)addrenv->text[i];
      if (paddr)
        {
          mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS);
        }
      else
        {
          mmu_l1_clrentry(vaddr);
        }
    }

  for (vaddr = CONFIG_ARCH_DATA_VBASE, i = 0;
       i < ARCH_DATA_NSECTS;
       vaddr += SECTION_SIZE, i++)
    {
      /* Save the old L1 page table entry */

      if (oldenv)
        {
          oldenv->data[i] = mmu_l1_getentry(vaddr);
        }

      /* Set (or clear) the new page table entry */

      paddr = (uintptr_t)addrenv->data[i];
      if (paddr)
        {
          mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS);
        }
      else
        {
          mmu_l1_clrentry(vaddr);
        }
    }

#ifdef CONFIG_BUILD_KERNEL
  for (vaddr = CONFIG_ARCH_HEAP_VBASE, i = 0;
       i < ARCH_HEAP_NSECTS;
       vaddr += SECTION_SIZE, i++)
    {
      /* Save the old L1 page table entry */

      if (oldenv)
        {
          oldenv->heap[i] = mmu_l1_getentry(vaddr);
        }

      /* Set (or clear) the new page table entry */

      paddr = (uintptr_t)addrenv->heap[i];
      if (paddr)
        {
          mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS);
        }
      else
        {
          mmu_l1_clrentry(vaddr);
        }
    }

#ifdef CONFIG_MM_SHM
  for (vaddr = CONFIG_ARCH_SHM_VBASE, i = 0;
       i < ARCH_SHM_NSECTS;
       vaddr += SECTION_SIZE, i++)
    {
      /* Save the old L1 page table entry */

      if (oldenv)
        {
          oldenv->shm[i] = mmu_l1_getentry(vaddr);
        }

      /* Set (or clear) the new page table entry */

      paddr = (uintptr_t)addrenv->shm[i];
      if (paddr)
        {
          mmu_l1_setentry(paddr, vaddr, MMU_L1_PGTABFLAGS);
        }
      else
        {
          mmu_l1_clrentry(vaddr);
        }
    }

#endif
#endif

  return OK;
}
Beispiel #5
0
static int up_addrenv_initdata(uintptr_t l2table)
{
  irqstate_t flags;
  FAR uint32_t *virtptr;
  uintptr_t paddr;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif

  DEBUGASSERT(l2table);
  flags = enter_critical_section();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
  /* Get the virtual address corresponding to the physical page table address */

  virtptr = (FAR uint32_t *)arm_pgvaddr(l2table);
#else
  /* Temporarily map the page into the virtual address space */

  l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
  mmu_l1_setentry(l2table & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
  virtptr = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (l2table & SECTION_MASK));
#endif

  /* Invalidate D-Cache so that we read from the physical memory */

  arch_invalidate_dcache((uintptr_t)virtptr,
                         (uintptr_t)virtptr + sizeof(uint32_t));

  /* Get the physical address of the first page of of .bss/.data */

  paddr = (uintptr_t)(*virtptr) & PTE_SMALL_PADDR_MASK;
  DEBUGASSERT(paddr);

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
  /* Get the virtual address corresponding to the physical page address */

  virtptr = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
  /* Temporarily map the page into the virtual address space */

  mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
  virtptr = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

  /* Finally, after of all of that, we can initialize the tiny region at
   * the beginning of .bss/.data by setting it to zero.
   */

  memset(virtptr, 0, ARCH_DATA_RESERVE_SIZE);

  /* Make sure that the initialized data is flushed to physical memory. */

  arch_flush_dcache((uintptr_t)virtptr,
                    (uintptr_t)virtptr + ARCH_DATA_RESERVE_SIZE);

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  /* Restore the scratch section L1 page table entry */

  mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
  leave_critical_section(flags);
  return OK;
}
Beispiel #6
0
uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
{
  FAR struct tcb_s *tcb = sched_self();
  FAR struct task_group_s *group;
  FAR uint32_t *l2table;
  irqstate_t flags;
  uintptr_t paddr;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif
  unsigned int index;

  DEBUGASSERT(tcb && tcb->group);
  group = tcb->group;

  /* The current implementation only supports extending the user heap
   * region as part of the implementation of user sbrk().  This function
   * needs to be expanded to also handle (1) extending the user stack
   * space and (2) extending the kernel memory regions as well.
   */

  DEBUGASSERT((group->tg_flags & GROUP_FLAG_ADDRENV) != 0);

  /* brkaddr = 0 means that no heap has yet been allocated */

  if (brkaddr == 0)
    {
      brkaddr = CONFIG_ARCH_HEAP_VBASE;
    }

  DEBUGASSERT(brkaddr >= CONFIG_ARCH_HEAP_VBASE && brkaddr < ARCH_HEAP_VEND);
  DEBUGASSERT(MM_ISALIGNED(brkaddr));

  for (; npages > 0; npages--)
    {
      /* Get the physical address of the level 2 page table */

      paddr = get_pgtable(&group->addrenv, brkaddr);
      if (paddr == 0)
        {
          return 0;
        }

      flags = irqsave();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
      /* Get the virtual address corresponding to the physical page address */

      l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
      /* Temporarily map the level 2 page table into the "scratch" virtual
       * address space
       */

      l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
      mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
      l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

      /* Back up L2 entry with physical memory */

      paddr = mm_pgalloc(1);
      if (paddr == 0)
        {
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
          mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
          irqrestore(flags);
          return 0;
        }

      /* The table divides a 1Mb address space up into 256 entries, each
       * corresponding to 4Kb of address space.  The page table index is
       * related to the offset from the beginning of 1Mb region.
       */

      index = (brkaddr & 0x000ff000) >> 12;

       /* Map the .text region virtual address to this physical address */

      DEBUGASSERT(l2table[index] == 0);
      l2table[index] = paddr | MMU_L2_UDATAFLAGS;
      brkaddr += MM_PGSIZE;

      /* Make sure that the modified L2 table is flushed to physical
       * memory.
       */

      arch_flush_dcache((uintptr_t)&l2table[index],
                        (uintptr_t)&l2table[index] + sizeof(uint32_t));

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
      /* Restore the scratch L1 page table entry */

      mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
      irqrestore(flags);
    }

  return brkaddr;
}
void arm_addrenv_destroy_region(FAR uintptr_t **list, unsigned int listlen,
                                uintptr_t vaddr)
{
  irqstate_t flags;
  uintptr_t paddr;
  FAR uint32_t *l2table;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif
  int i;
  int j;

  bvdbg("listlen=%d vaddr=%08lx\n", listlen, (unsigned long)vaddr);

  for (i = 0; i < listlen; vaddr += SECTION_SIZE, list++, i++)
    {
      /* Unhook the L2 page table from the L1 page table */

      mmu_l1_clrentry(vaddr);

      /* Has this page table been allocated? */

      paddr = (uintptr_t)list[i];
      if (paddr != 0)
        {
          flags = irqsave();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
          /* Get the virtual address corresponding to the physical page address */

          l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
          /* Temporarily map the page into the virtual address space */

          l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
          mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
          l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

          /* Return the allocated pages to the page allocator */

          for (j = 0; j < ENTRIES_PER_L2TABLE; j++)
            {
              paddr = *l2table++;
              if (paddr != 0)
                {
                  paddr &= PTE_SMALL_PADDR_MASK;
                  mm_pgfree(paddr, 1);
                }
            }

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
          /* Restore the scratch section L1 page table entry */

          mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
          irqrestore(flags);

          /* And free the L2 page table itself */

          mm_pgfree((uintptr_t)list[i], 1);
        }
    }
}
int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen,
                              uintptr_t vaddr, size_t regionsize,
                              uint32_t mmuflags)
{
  irqstate_t flags;
  uintptr_t paddr;
  FAR uint32_t *l2table;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif
  size_t nmapped;
  unsigned int npages;
  unsigned int i;
  unsigned int j;

  bvdbg("listlen=%d vaddr=%08lx regionsize=%ld, mmuflags=%08x\n",
        listlen, (unsigned long)vaddr, (unsigned long)regionsize,
        (unsigned int)mmuflags);

  /* Verify that we are configured with enough virtual address space to
   * support this memory region.
   *
   *   npages pages correspondes to (npages << MM_PGSHIFT) bytes
   *   listlen sections corresponds to (listlen << 20) bytes
   */

  npages = MM_NPAGES(regionsize);
  if (npages > (listlen << (20 - MM_PGSHIFT)))
    {
      bdbg("ERROR: npages=%u listlen=%u\n", npages, listlen);
      return -E2BIG;
    }

  /* Back the allocation up with physical pages and set up the level mapping
   * (which of course does nothing until the L2 page table is hooked into
   * the L1 page table).
   */

  nmapped = 0;
  for (i = 0; i < npages; i += ENTRIES_PER_L2TABLE)
    {
      /* Allocate one physical page for the L2 page table */

      paddr = mm_pgalloc(1);
      if (!paddr)
        {
          return -ENOMEM;
        }

      DEBUGASSERT(MM_ISALIGNED(paddr));
      list[i] = (FAR uintptr_t *)paddr;

      flags = irqsave();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
      /* Get the virtual address corresponding to the physical page address */

      l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
      /* Temporarily map the page into the virtual address space */

      l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
      mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
      l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

      /* Initialize the page table */

      memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));

      /* Back up L2 entries with physical memory */

      for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < regionsize; j++)
        {
          /* Allocate one physical page for region data */

          paddr = mm_pgalloc(1);
          if (!paddr)
            {
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
              mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
              irqrestore(flags);
              return -ENOMEM;
            }

          /* Map the .text region virtual address to this physical address */

          set_l2_entry(l2table, paddr, vaddr, mmuflags);
          nmapped += MM_PGSIZE;
          vaddr   += MM_PGSIZE;
        }

      /* Make sure that the initialized L2 table is flushed to physical
       * memory.
       */

      arch_flush_dcache((uintptr_t)l2table,
                        (uintptr_t)l2table +
                        ENTRIES_PER_L2TABLE * sizeof(uint32_t));

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
      /* Restore the scratch section L1 page table entry */

      mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
      irqrestore(flags);
    }

  return npages;
}