Example #1
0
FAR void *mm_sbrk(FAR struct mm_heap_s *heap, intptr_t incr,
                  uintptr_t maxbreak)
{
  uintptr_t brkaddr;
  uintptr_t allocbase;
  unsigned int pgincr;
  size_t bytesize;
  int err;

  DEBUGASSERT(incr >= 0);
  if (incr < 0)
    {
      err = ENOSYS;
      goto errout;
    }

  /* Get the current break address (NOTE: assumes region 0).  If
   * the memory manager is uninitialized, mm_brkaddr() will return
   * zero.
   */

  brkaddr = (uintptr_t)mm_brkaddr(heap, 0);
  if (incr > 0)
    {
      /* Convert the increment to multiples of the page size */

      pgincr = MM_NPAGES(incr);

      /* Check if this increment would exceed the maximum break value */

      if ((brkaddr > 0) && ((maxbreak - brkaddr) < (pgincr << MM_PGSHIFT)))
        {
          err = ENOMEM;
          goto errout;
        }

      /* Allocate the requested number of pages and map them to the
       * break address.  If we provide a zero brkaddr to pgalloc(),  it
       * will create the first block in the correct virtual address
       * space and return the start address of that block.
       */

      allocbase = pgalloc(brkaddr, pgincr);
      if (allocbase == 0)
        {
          err = EAGAIN;
          goto errout;
        }

      /* Has the been been initialized?  brkaddr will be zero if the
       * memory manager has not yet been initialized.
       */

      bytesize = pgincr << MM_PGSHIFT;
      if (brkaddr != 0)
        {
          /* No... then initialize it now */

          mm_initialize(heap, (FAR void *)allocbase, bytesize);
        }
      else
        {
          /* Extend the heap (region 0) */

          mm_extend(heap, (FAR void *)allocbase, bytesize, 0);
        }
    }

  return (FAR void *)brkaddr;

errout:
  set_errno(err);
  return (FAR void *)-1;
}
int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen,
                              uintptr_t vaddr, size_t regionsize,
                              uint32_t mmuflags)
{
  irqstate_t flags;
  uintptr_t paddr;
  FAR uint32_t *l2table;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif
  size_t nmapped;
  unsigned int npages;
  unsigned int i;
  unsigned int j;

  bvdbg("listlen=%d vaddr=%08lx regionsize=%ld, mmuflags=%08x\n",
        listlen, (unsigned long)vaddr, (unsigned long)regionsize,
        (unsigned int)mmuflags);

  /* Verify that we are configured with enough virtual address space to
   * support this memory region.
   *
   *   npages pages correspondes to (npages << MM_PGSHIFT) bytes
   *   listlen sections corresponds to (listlen << 20) bytes
   */

  npages = MM_NPAGES(regionsize);
  if (npages > (listlen << (20 - MM_PGSHIFT)))
    {
      bdbg("ERROR: npages=%u listlen=%u\n", npages, listlen);
      return -E2BIG;
    }

  /* Back the allocation up with physical pages and set up the level mapping
   * (which of course does nothing until the L2 page table is hooked into
   * the L1 page table).
   */

  nmapped = 0;
  for (i = 0; i < npages; i += ENTRIES_PER_L2TABLE)
    {
      /* Allocate one physical page for the L2 page table */

      paddr = mm_pgalloc(1);
      if (!paddr)
        {
          return -ENOMEM;
        }

      DEBUGASSERT(MM_ISALIGNED(paddr));
      list[i] = (FAR uintptr_t *)paddr;

      flags = irqsave();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
      /* Get the virtual address corresponding to the physical page address */

      l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
      /* Temporarily map the page into the virtual address space */

      l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
      mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
      l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

      /* Initialize the page table */

      memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));

      /* Back up L2 entries with physical memory */

      for (j = 0; j < ENTRIES_PER_L2TABLE && nmapped < regionsize; j++)
        {
          /* Allocate one physical page for region data */

          paddr = mm_pgalloc(1);
          if (!paddr)
            {
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
              mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
              irqrestore(flags);
              return -ENOMEM;
            }

          /* Map the .text region virtual address to this physical address */

          set_l2_entry(l2table, paddr, vaddr, mmuflags);
          nmapped += MM_PGSIZE;
          vaddr   += MM_PGSIZE;
        }

      /* Make sure that the initialized L2 table is flushed to physical
       * memory.
       */

      arch_flush_dcache((uintptr_t)l2table,
                        (uintptr_t)l2table +
                        ENTRIES_PER_L2TABLE * sizeof(uint32_t));

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
      /* Restore the scratch section L1 page table entry */

      mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
      irqrestore(flags);
    }

  return npages;
}