Esempio n. 1
0
FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
                      size_t size)
{
  FAR struct mm_allocnode_s *node;
  size_t rawchunk;
  size_t alignedchunk;
  size_t mask = (size_t)(alignment - 1);
  size_t allocsize;

  /* If this requested alinement's less than or equal to the natural alignment
   * of malloc, then just let malloc do the work.
   */

  if (alignment <= MM_MIN_CHUNK)
    {
      return mm_malloc(heap, size);
    }

  /* Adjust the size to account for (1) the size of the allocated node, (2)
   * to make sure that it is an even multiple of our granule size, and to
   * include the alignment amount.
   *
   * Notice that we increase the allocation size by twice the requested
   * alignment.  We do this so that there will be at least two valid
   * alignment points within the allocated memory.
   *
   * NOTE:  These are sizes given to malloc and not chunk sizes. They do
   * not include SIZEOF_MM_ALLOCNODE.
   */

  size      = MM_ALIGN_UP(size);   /* Make multiples of our granule size */
  allocsize = size + 2*alignment;  /* Add double full alignment size */

  /* Then malloc that size */

  rawchunk = (size_t)mm_malloc(heap, allocsize);
  if (rawchunk == 0)
    {
      return NULL;
    }

  /* We need to hold the MM semaphore while we muck with the chunks and
   * nodelist.
   */

  mm_takesemaphore(heap);

  /* Get the node associated with the allocation and the next node after
   * the allocation.
   */

  node = (FAR struct mm_allocnode_s *)(rawchunk - SIZEOF_MM_ALLOCNODE);

  /* Find the aligned subregion */

  alignedchunk = (rawchunk + mask) & ~mask;

  /* Check if there is free space at the beginning of the aligned chunk */

  if (alignedchunk != rawchunk)
    {
      FAR struct mm_allocnode_s *newnode;
      FAR struct mm_allocnode_s *next;
      size_t precedingsize;

      /* Get the node the next node after the allocation. */

      next = (FAR struct mm_allocnode_s *)((FAR char *)node + node->size);

      /* Make sure that there is space to convert the preceding mm_allocnode_s
       * into an mm_freenode_s.  I think that this should always be true
       */

      DEBUGASSERT(alignedchunk >= rawchunk + 8);

      newnode = (FAR struct mm_allocnode_s *)(alignedchunk - SIZEOF_MM_ALLOCNODE);

      /* Preceding size is full size of the new 'node,' including
       * SIZEOF_MM_ALLOCNODE
       */

      precedingsize = (size_t)newnode - (size_t)node;

      /* If we were unlucky, then the alignedchunk can lie in such a position
       * that precedingsize < SIZEOF_NODE_FREENODE.  We can't let that happen
       * because we are going to cast 'node' to struct mm_freenode_s below.
       * This is why we allocated memory large enough to support two
       * alignment points.  In this case, we will simply use the second
       * alignment point.
       */

      if (precedingsize < SIZEOF_MM_FREENODE)
        {
          alignedchunk += alignment;
          newnode       = (FAR struct mm_allocnode_s *)(alignedchunk - SIZEOF_MM_ALLOCNODE);
          precedingsize = (size_t)newnode - (size_t)node;
        }

      /* Set up the size of the new node */

      newnode->size = (size_t)next - (size_t)newnode;
      newnode->preceding = precedingsize | MM_ALLOC_BIT;

      /* Reduce the size of the original chunk and mark it not allocated, */

      node->size = precedingsize;
      node->preceding &= ~MM_ALLOC_BIT;

      /* Fix the preceding size of the next node */

      next->preceding = newnode->size | (next->preceding & MM_ALLOC_BIT);

      /* Convert the newnode chunk size back into malloc-compatible size by
       * subtracting the header size SIZEOF_MM_ALLOCNODE.
       */

      allocsize = newnode->size - SIZEOF_MM_ALLOCNODE;

      /* Add the original, newly freed node to the free nodelist */

      mm_addfreechunk(heap, (FAR struct mm_freenode_s *)node);

      /* Replace the original node with the newlay realloaced,
       * aligned node
       */

      node = newnode;
    }

  /* Check if there is free space at the end of the aligned chunk */

  if (allocsize > size)
    {
      /* Shrink the chunk by that much -- remember, mm_shrinkchunk wants
       * internal chunk sizes that include SIZEOF_MM_ALLOCNODE, and not the
       * malloc-compatible sizes that we have.
       */

      mm_shrinkchunk(heap, node, size + SIZEOF_MM_ALLOCNODE);
    }

  mm_givesemaphore(heap);
  return (FAR void *)alignedchunk;
}
Esempio n. 2
0
void free(FAR void *mem)
{
  FAR struct mm_freenode_s *node;
  FAR struct mm_freenode_s *prev;
  FAR struct mm_freenode_s *next;

  mvdbg("Freeing %p\n", mem);

  /* Protect against attempts to free a NULL reference */

  if (!mem)
    {
      return;
    }

  /* We need to hold the MM semaphore while we muck with the
   * nodelist.
   */

  mm_takesemaphore();

  /* Map the memory chunk into a free node */

  node = (FAR struct mm_freenode_s *)((char*)mem - SIZEOF_MM_ALLOCNODE);
  node->preceding &= ~MM_ALLOC_BIT;

  /* Check if the following node is free and, if so, merge it */

  next = (FAR struct mm_freenode_s *)((char*)node + node->size);
  if ((next->preceding & MM_ALLOC_BIT) == 0)
    {
      FAR struct mm_allocnode_s *andbeyond;

      /* Get the node following the next node (which will
       * become the new next node). We know that we can never
       * index past the tail chunk because it is always allocated.
       */

      andbeyond = (FAR struct mm_allocnode_s*)((char*)next + next->size);

      /* Remove the next node.  There must be a predecessor,
       * but there may not be a successor node.
       */

      DEBUGASSERT(next->blink);
      next->blink->flink = next->flink;
      if (next->flink)
        {
          next->flink->blink = next->blink;
        }

      /* Then merge the two chunks */

      node->size          += next->size;
      andbeyond->preceding =  node->size | (andbeyond->preceding & MM_ALLOC_BIT);
      next                 = (FAR struct mm_freenode_s *)andbeyond;
    }

  /* Check if the preceding node is also free and, if so, merge
   * it with this node
   */

  prev = (FAR struct mm_freenode_s *)((char*)node - node->preceding);
  if ((prev->preceding & MM_ALLOC_BIT) == 0)
    {
      /* Remove the node.  There must be a predecessor, but there may
       * not be a successor node.
       */

      DEBUGASSERT(prev->blink);
      prev->blink->flink = prev->flink;
      if (prev->flink)
        {
          prev->flink->blink = prev->blink;
        }

      /* Then merge the two chunks */

      prev->size     += node->size;
      next->preceding = prev->size | (next->preceding & MM_ALLOC_BIT);
      node            = prev;
    }

  /* Add the merged node to the nodelist */

  mm_addfreechunk(node);
  mm_givesemaphore();
}
Esempio n. 3
0
int mm_mallinfo(FAR struct mm_heap_s *heap, FAR struct mallinfo *info)
{
  struct mm_allocnode_s *node;
  size_t mxordblk = 0;
  int    ordblks  = 0;  /* Number of non-inuse chunks */
  size_t uordblks = 0;  /* Total allocated space */
  size_t fordblks = 0;  /* Total non-inuse space */
#if CONFIG_MM_REGIONS > 1
  int region;
#else
# define region 0
#endif

  DEBUGASSERT(info);

  /* Visit each region */

#if CONFIG_MM_REGIONS > 1
  for (region = 0; region < heap->mm_nregions; region++)
#endif
    {
      /* Visit each node in the region
       * Retake the semaphore for each region to reduce latencies
       */

      mm_takesemaphore(heap);

      for (node = heap->mm_heapstart[region];
           node < heap->mm_heapend[region];
           node = (struct mm_allocnode_s *)((char*)node + node->size))
        {
          mvdbg("region=%d node=%p size=%p preceding=%p (%c)\n",
                region, node, node->size, (node->preceding & ~MM_ALLOC_BIT),
                (node->preceding & MM_ALLOC_BIT) ? 'A' : 'F');

          /* Check if the node corresponds to an allocated memory chunk */

          if ((node->preceding & MM_ALLOC_BIT) != 0)
            {
              uordblks += node->size;
            }
          else
            {
              ordblks++;
              fordblks += node->size;
              if (node->size > mxordblk)
                {
                  mxordblk = node->size;
                }
            }
        }

      mm_givesemaphore(heap);

      mvdbg("region=%d node=%p heapend=%p\n", region, node, heap->mm_heapend[region]);
      DEBUGASSERT(node == heap->mm_heapend[region]);
      uordblks += SIZEOF_MM_ALLOCNODE; /* account for the tail node */
    }
#undef region

  DEBUGASSERT(uordblks + fordblks == heap->mm_heapsize);

  info->arena    = heap->mm_heapsize;
  info->ordblks  = ordblks;
  info->mxordblk = mxordblk;
  info->uordblks = uordblks;
  info->fordblks = fordblks;
  return OK;
}
Esempio n. 4
0
FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
                     size_t size)
{
  FAR struct mm_allocnode_s *oldnode;
  FAR struct mm_freenode_s  *prev;
  FAR struct mm_freenode_s  *next;
  size_t oldsize;
  size_t prevsize = 0;
  size_t nextsize = 0;
  FAR void *newmem;

  /* If oldmem is NULL, then realloc is equivalent to malloc */

  if (!oldmem)
    {
      return mm_malloc(heap, size);
    }

  /* If size is zero, then realloc is equivalent to free */

  if (size < 1)
    {
      mm_free(heap, oldmem);
      return NULL;
    }

  /* Adjust the size to account for (1) the size of the allocated node and
   * (2) to make sure that it is an even multiple of our granule size.
   */

  size = MM_ALIGN_UP(size + SIZEOF_MM_ALLOCNODE);

  /* Map the memory chunk into an allocated node structure */

  oldnode = (FAR struct mm_allocnode_s *)((FAR char*)oldmem - SIZEOF_MM_ALLOCNODE);

  /* We need to hold the MM semaphore while we muck with the nodelist. */

  mm_takesemaphore(heap);

  /* Check if this is a request to reduce the size of the allocation. */

  oldsize = oldnode->size;
  if (size <= oldsize)
    {
      /* Handle the special case where we are not going to change the size
       * of the allocation.
       */

      if (size < oldsize)
        {
          mm_shrinkchunk(heap, oldnode, size);
        }

      /* Then return the original address */

      mm_givesemaphore(heap);
      return oldmem;
    }

  /* This is a request to increase the size of the allocation,  Get the
   * available sizes before and after the oldnode so that we can make the
   * best decision
   */

  next = (FAR struct mm_freenode_s *)((FAR char*)oldnode + oldnode->size);
  if ((next->preceding & MM_ALLOC_BIT) == 0)
    {
      nextsize = next->size;
    }

  prev = (FAR struct mm_freenode_s *)((FAR char*)oldnode - (oldnode->preceding & ~MM_ALLOC_BIT));
  if ((prev->preceding & MM_ALLOC_BIT) == 0)
    {
      prevsize = prev->size;
    }

  /* Now, check if we can extend the current allocation or not */

  if (nextsize + prevsize + oldsize >= size)
    {
      size_t needed   = size - oldsize;
      size_t takeprev = 0;
      size_t takenext = 0;

      /* Check if we can extend into the previous chunk and if the
       * previous chunk is smaller than the next chunk.
       */

      if (prevsize > 0 && (nextsize >= prevsize || nextsize < 1))
        {
          /* Can we get everything we need from the previous chunk? */

          if (needed > prevsize)
            {
              /* No, take the whole previous chunk and get the
               * rest that we need from the next chunk.
               */

              takeprev = prevsize;
              takenext = needed - prevsize;
            }
          else
            {
              /* Yes, take what we need from the previous chunk */

              takeprev = needed;
              takenext = 0;
            }

          needed = 0;
        }

      /* Check if we can extend into the next chunk and if we still need
       * more memory.
       */

      if (nextsize > 0 && needed)
        {
          /* Can we get everything we need from the next chunk? */

          if (needed > nextsize)
            {
              /* No, take the whole next chunk and get the rest that we
               * need from the previous chunk.
               */

              takeprev = needed - nextsize;
              takenext = nextsize;
            }
          else
            {
              /* Yes, take what we need from the previous chunk */

              takeprev = 0;
              takenext = needed;
            }
        }

      /* Extend into the previous free chunk */

      newmem = oldmem;
      if (takeprev)
        {
          FAR struct mm_allocnode_s *newnode;

          /* Remove the previous node.  There must be a predecessor, but
           * there may not be a successor node.
           */

          DEBUGASSERT(prev->blink);
          prev->blink->flink = prev->flink;
          if (prev->flink)
            {
              prev->flink->blink = prev->blink;
            }

          /* Extend the node into the previous free chunk */

          newnode = (FAR struct mm_allocnode_s *)((FAR char*)oldnode - takeprev);

          /* Did we consume the entire preceding chunk? */

          if (takeprev < prevsize)
            {
              /* No.. just take what we need from the previous chunk and put
               * it back into the free list
               */

              prev->size        -= takeprev;
              newnode->size      = oldsize + takeprev;
              newnode->preceding = prev->size | MM_ALLOC_BIT;
              next->preceding    = newnode->size | (next->preceding & MM_ALLOC_BIT);

              /* Return the previous free node to the nodelist (with the new size) */

              mm_addfreechunk(heap, prev);
            }
          else
            {
              /* Yes.. update its size (newnode->preceding is already set) */

              newnode->size      += oldsize;
              newnode->preceding |= MM_ALLOC_BIT;
              next->preceding     = newnode->size | (next->preceding & MM_ALLOC_BIT);
            }

          /* Now we want to return newnode */

          oldnode = newnode;
          oldsize = newnode->size;

          /* Now we have to move the user contents 'down' in memory.  memcpy should
           * should be save for this.
           */

          newmem = (FAR void*)((FAR char*)newnode + SIZEOF_MM_ALLOCNODE);
          memcpy(newmem, oldmem, oldsize - SIZEOF_MM_ALLOCNODE);
        }

      /* Extend into the next free chunk */

      if (takenext)
        {
          FAR struct mm_freenode_s *newnode;
          FAR struct mm_allocnode_s *andbeyond;

          /* Get the chunk following the next node (which could be the tail
           * chunk)
           */

          andbeyond = (FAR struct mm_allocnode_s*)((char*)next + nextsize);

          /* Remove the next node.  There must be a predecessor, but there
           * may not be a successor node.
           */

          DEBUGASSERT(next->blink);
          next->blink->flink = next->flink;
          if (next->flink)
            {
              next->flink->blink = next->blink;
            }

          /* Extend the node into the next chunk */

          oldnode->size = oldsize + takenext;
          newnode       = (FAR struct mm_freenode_s *)((char*)oldnode + oldnode->size);

          /* Did we consume the entire preceding chunk? */

          if (takenext < nextsize)
            {
              /* No, take what we need from the next chunk and return it to
               * the free nodelist.
               */

              newnode->size        = nextsize - takenext;
              newnode->preceding   = oldnode->size;
              andbeyond->preceding = newnode->size | (andbeyond->preceding & MM_ALLOC_BIT);

              /* Add the new free node to the nodelist (with the new size) */

              mm_addfreechunk(heap, newnode);
            }
          else
            {
              /* Yes, just update some pointers. */

              andbeyond->preceding = oldnode->size | (andbeyond->preceding & MM_ALLOC_BIT);
            }
        }

      mm_givesemaphore(heap);
      return newmem;
    }

  /* The current chunk cannot be extended.  Just allocate a new chunk and copy */

  else
    {
      /* Allocate a new block.  On failure, realloc must return NULL but
       * leave the original memory in place.
       */

      mm_givesemaphore(heap);
      newmem = (FAR void*)mm_malloc(heap, size);
      if (newmem)
        {
          memcpy(newmem, oldmem, oldsize);
          mm_free(heap, oldmem);
        }

      return newmem;
    }
}
Esempio n. 5
0
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
{
  FAR struct mm_freenode_s *node;
  void *ret = NULL;
  int ndx;

  /* Handle bad sizes */

  if (size <= 0)
    {
      return NULL;
    }

  /* Adjust the size to account for (1) the size of the allocated node and
   * (2) to make sure that it is an even multiple of our granule size.
   */

  size = MM_ALIGN_UP(size + SIZEOF_MM_ALLOCNODE);

  /* We need to hold the MM semaphore while we muck with the nodelist. */

  mm_takesemaphore(heap);

  /* Get the location in the node list to start the search. Special case
   * really big allocations
   */

  if (size >= MM_MAX_CHUNK)
    {
      ndx = MM_NNODES-1;
    }
  else
    {
      /* Convert the request size into a nodelist index */

      ndx = mm_size2ndx(size);
    }

  /* Search for a large enough chunk in the list of nodes. This list is
   * ordered by size, but will have occasional zero sized nodes as we visit
   * other mm_nodelist[] entries.
   */

  for (node = heap->mm_nodelist[ndx].flink;
       node && node->size < size;
       node = node->flink);

  /* If we found a node with non-zero size, then this is one to use. Since
   * the list is ordered, we know that is must be best fitting chunk
   * available.
   */

  if (node)
    {
      FAR struct mm_freenode_s *remainder;
      FAR struct mm_freenode_s *next;
      size_t remaining;

      /* Remove the node.  There must be a predecessor, but there may not be
       * a successor node.
       */

      DEBUGASSERT(node->blink);
      node->blink->flink = node->flink;
      if (node->flink)
        {
          node->flink->blink = node->blink;
        }

      /* Check if we have to split the free node into one of the allocated
       * size and another smaller freenode.  In some cases, the remaining
       * bytes can be smaller (they may be SIZEOF_MM_ALLOCNODE).  In that
       * case, we will just carry the few wasted bytes at the end of the
       * allocation.
       */

      remaining = node->size - size;
      if (remaining >= SIZEOF_MM_FREENODE)
        {
          /* Get a pointer to the next node in physical memory */

          next = (FAR struct mm_freenode_s*)(((char*)node) + node->size);

          /* Create the remainder node */

          remainder = (FAR struct mm_freenode_s*)(((char*)node) + size);
          remainder->size = remaining;
          remainder->preceding = size;

          /* Adjust the size of the node under consideration */

          node->size = size;

          /* Adjust the 'preceding' size of the (old) next node, preserving
           * the allocated flag.
           */

          next->preceding = remaining | (next->preceding & MM_ALLOC_BIT);

          /* Add the remainder back into the nodelist */

          mm_addfreechunk(heap, remainder);
        }

      /* Handle the case of an exact size match */

      node->preceding |= MM_ALLOC_BIT;
      ret = (void*)((char*)node + SIZEOF_MM_ALLOCNODE);
    }

  mm_givesemaphore(heap);

  /* If CONFIG_DEBUG_MM is defined, then output the result of the allocation
   * to the SYSLOG.
   */

#ifdef CONFIG_DEBUG_MM
  if (!ret)
    {
      mdbg("Allocation failed, size %d\n", size);
    }
  else
    {
      mvdbg("Allocated %p, size %d\n", ret, size);
    }
#endif

  return ret;
}
Esempio n. 6
0
void kmm_givesemaphore(void)
{
  return mm_givesemaphore(&g_kmmheap);
}