Esempio n. 1
0
/**
* @fn                   :tc_umm_heap_random_malloc
* @brief                :Allocate memory through malloc with random size
* @scenario             :Allocate memory through malloc\n
*                        with random size
* @API's covered        :malloc, free
* @passcase             :When malloc function returns non null memory and memory is null after free.
* @failcase             :When malloc function returns null memory or memory is not null after free.
* @Preconditions        :NA
*/
static void tc_umm_heap_random_malloc(struct tcb_s *st_tcb)
{
	int *mem_ptr[ALLOC_FREE_TIMES] = { NULL };
	int allocated[ALLOC_FREE_TIMES] = { 0 };
	int alloc_cnt;
	int alloc_tc_cnt;
	int allocated_size = 0;

	srand(time(NULL));

	for (alloc_tc_cnt = 0; alloc_tc_cnt < TEST_TIMES; alloc_tc_cnt++) {
		allocated_size = 0;
		for (alloc_cnt = 0; alloc_cnt < ALLOC_FREE_TIMES; alloc_cnt++) {
			allocated[alloc_cnt] = rand();
			mem_ptr[alloc_cnt] = (int *)malloc(allocated[alloc_cnt]);
			TC_ASSERT_NOT_NULL("malloc", mem_ptr[alloc_cnt]);
		}
		for (alloc_cnt = 0; alloc_cnt < ALLOC_FREE_TIMES; alloc_cnt++) {
			/* do alloc 'allocated[alloc_cnt]',
			   but allocated MM_ALIGN_UP'(allocated[alloc_cnt] + SIZEOF_MM_ALLOCNODE)',
			   because of the chunk size */
			allocated_size += MM_ALIGN_UP(allocated[alloc_cnt] + SIZEOF_MM_ALLOCNODE);
		}
		TC_ASSERT_EQ_ERROR_CLEANUP("malloc", st_tcb->curr_alloc_size, allocated_size, get_errno(), mem_deallocate_func(mem_ptr, ALLOC_FREE_TIMES));
		mem_deallocate_func(mem_ptr, ALLOC_FREE_TIMES);
		TC_ASSERT_EQ("random_malloc", st_tcb->curr_alloc_size, ALL_FREE);
	}
	TC_SUCCESS_RESULT();
}
Esempio n. 2
0
File: k_mm.c Progetto: wosayttn/aos
kstat_t krhino_add_mm_region(k_mm_head *mmhead, void *addr, size_t len)
{
    void *orig_addr;
    k_mm_region_info_t *region;
    k_mm_list_t        *firstblk, *nextblk;

    NULL_PARA_CHK(mmhead);
    NULL_PARA_CHK(addr);

    orig_addr = addr;
    addr = (void *) MM_ALIGN_UP((size_t)addr);
    len -= (size_t)addr - (size_t)orig_addr;
    len = MM_ALIGN_DOWN(len);

    if ( !len || len < sizeof(k_mm_region_info_t) + MMLIST_HEAD_SIZE * 3 + MM_MIN_SIZE) {
        return RHINO_MM_POOL_SIZE_ERR;
    }

    memset(addr, 0, len);

    MM_CRITICAL_ENTER(mmhead);

    firstblk = init_mm_region(addr, len);
    nextblk  = MM_GET_NEXT_BLK(firstblk);

    /* Inserting the area in the list of linked areas */
    region = (k_mm_region_info_t *)firstblk->mbinfo.buffer;
    region->next = mmhead->regioninfo;
    mmhead->regioninfo = region;

#if (RHINO_CONFIG_MM_DEBUG > 0u)
    nextblk->dye   = RHINO_MM_CORRUPT_DYE;
    nextblk->owner = 0;
#endif

#if (K_MM_STATISTIC > 0)
    /* keep "used_size" not changed.
       change "used_size" here then k_mm_free will decrease it. */
    mmhead->used_size += MM_GET_BLK_SIZE(nextblk);
#endif

    MM_CRITICAL_EXIT(mmhead);

    /*mark nextblk as free*/
    k_mm_free(mmhead, nextblk->mbinfo.buffer);

    return RHINO_SUCCESS;
}
Esempio n. 3
0
File: k_mm.c Progetto: wosayttn/aos
/* init a region, contain 3 mmblk
   -------------------------------------------------------------------
   | k_mm_list_t | k_mm_region_info_t | k_mm_list_t | free space |k_mm_list_t|
   -------------------------------------------------------------------

   "regionaddr" and "len" is aligned by caller */
RHINO_INLINE k_mm_list_t *init_mm_region(void *regionaddr, size_t len)
{
    k_mm_list_t        *midblk, *lastblk, *firstblk;
    k_mm_region_info_t *region;

    /* "regionaddr" and "len" is aligned by caller */

    /*first mmblk for region info*/
    firstblk = (k_mm_list_t *) regionaddr;
    firstblk->prev  = NULL;
    firstblk->buf_size = MM_ALIGN_UP(sizeof(k_mm_region_info_t))
                         | RHINO_MM_ALLOCED | RHINO_MM_PREVALLOCED;
#if (RHINO_CONFIG_MM_DEBUG > 0u)
    firstblk->dye   = RHINO_MM_CORRUPT_DYE;
    firstblk->owner = 0;
#endif

    /*last mmblk for stop merge */
    lastblk = (k_mm_list_t *)((char *)regionaddr + len - MMLIST_HEAD_SIZE);

    /*middle mmblk for heap use */
    midblk = MM_GET_NEXT_BLK(firstblk);
    midblk->buf_size = ((char *)lastblk - (char *)midblk->mbinfo.buffer)
                       | RHINO_MM_ALLOCED | RHINO_MM_PREVALLOCED;
    midblk->mbinfo.free_ptr.prev = midblk->mbinfo.free_ptr.next = 0;

    /*last mmblk for stop merge */
    lastblk->prev   = midblk;
    /* set alloced, can't be merged */
    lastblk->buf_size = 0 | RHINO_MM_ALLOCED | RHINO_MM_PREVFREE;
#if (RHINO_CONFIG_MM_DEBUG > 0u)
    lastblk->dye    = RHINO_MM_CORRUPT_DYE;
    lastblk->owner  = 0;
#endif

    region = (k_mm_region_info_t *)firstblk->mbinfo.buffer;
    region->next = 0;
    region->end = lastblk;

    return firstblk;
}
Esempio n. 4
0
static void do_mallocs(void **mem, const int *size, const int *seq, int n)
{
  int i;
  int j;

  for (i = 0; i < n; i++)
    {
      j = seq[i];
      if (!mem[j])
        {
          printf("(%d)Allocating %d bytes\n", i,  size[j]);

          mem[j] = malloc(size[j]);
          printf("(%d)Memory allocated at %p\n", i, mem[j]);

          if (mem[j] == NULL)
            {
              int allocsize = MM_ALIGN_UP(size[j] + SIZEOF_MM_ALLOCNODE);

              fprintf(stderr, "(%d)malloc failed for allocsize=%d\n", i, allocsize);
              if (allocsize > alloc_info.mxordblk)
                {
                   fprintf(stderr, "   Normal, largest free block is only %ld\n", alloc_info.mxordblk);
                }
              else
                {
                  fprintf(stderr, "   ERROR largest free block is %ld\n", alloc_info.mxordblk);
                  exit(1);
                }
            }
          else
            {
              memset(mem[j], 0xaa, size[j]);
            }

          mm_showmallinfo();
        }
    }
}
Esempio n. 5
0
static void do_reallocs(void **mem, const int *oldsize, const int *newsize, const int *seq, int n)
{
  int i;
  int j;

  for (i = 0; i < n; i++)
    {
      j = seq[i];
      printf("(%d)Re-allocating at %p from %d to %d bytes\n",
             i, mem[j], oldsize[j], newsize[j]);

      mem[j] = realloc(mem[j], newsize[j]);
      printf("(%d)Memory re-allocated at %p\n", i, mem[j]);

      if (mem[j] == NULL)
        {
          int allocsize = MM_ALIGN_UP(newsize[j] + SIZEOF_MM_ALLOCNODE);

          fprintf(stderr, "(%d)realloc failed for allocsize=%d\n", i, allocsize);
          if (allocsize > alloc_info.mxordblk)
            {
              fprintf(stderr, "   Normal, largest free block is only %ld\n", alloc_info.mxordblk);
            }
          else
            {
              fprintf(stderr, "   ERROR largest free block is %ld\n", alloc_info.mxordblk);
              exit(1);
            }
        }
      else
        {
          memset(mem[j], 0x55, newsize[j]);
        }

      mm_showmallinfo();
    }
}
Esempio n. 6
0
FAR void *mm_memalign(FAR struct mm_heap_s *heap, size_t alignment,
                      size_t size)
{
  FAR struct mm_allocnode_s *node;
  size_t rawchunk;
  size_t alignedchunk;
  size_t mask = (size_t)(alignment - 1);
  size_t allocsize;

  /* If this requested alinement's less than or equal to the natural alignment
   * of malloc, then just let malloc do the work.
   */

  if (alignment <= MM_MIN_CHUNK)
    {
      return mm_malloc(heap, size);
    }

  /* Adjust the size to account for (1) the size of the allocated node, (2)
   * to make sure that it is an even multiple of our granule size, and to
   * include the alignment amount.
   *
   * Notice that we increase the allocation size by twice the requested
   * alignment.  We do this so that there will be at least two valid
   * alignment points within the allocated memory.
   *
   * NOTE:  These are sizes given to malloc and not chunk sizes. They do
   * not include SIZEOF_MM_ALLOCNODE.
   */

  size      = MM_ALIGN_UP(size);   /* Make multiples of our granule size */
  allocsize = size + 2*alignment;  /* Add double full alignment size */

  /* Then malloc that size */

  rawchunk = (size_t)mm_malloc(heap, allocsize);
  if (rawchunk == 0)
    {
      return NULL;
    }

  /* We need to hold the MM semaphore while we muck with the chunks and
   * nodelist.
   */

  mm_takesemaphore(heap);

  /* Get the node associated with the allocation and the next node after
   * the allocation.
   */

  node = (FAR struct mm_allocnode_s *)(rawchunk - SIZEOF_MM_ALLOCNODE);

  /* Find the aligned subregion */

  alignedchunk = (rawchunk + mask) & ~mask;

  /* Check if there is free space at the beginning of the aligned chunk */

  if (alignedchunk != rawchunk)
    {
      FAR struct mm_allocnode_s *newnode;
      FAR struct mm_allocnode_s *next;
      size_t precedingsize;

      /* Get the node the next node after the allocation. */

      next = (FAR struct mm_allocnode_s *)((FAR char *)node + node->size);

      /* Make sure that there is space to convert the preceding mm_allocnode_s
       * into an mm_freenode_s.  I think that this should always be true
       */

      DEBUGASSERT(alignedchunk >= rawchunk + 8);

      newnode = (FAR struct mm_allocnode_s *)(alignedchunk - SIZEOF_MM_ALLOCNODE);

      /* Preceding size is full size of the new 'node,' including
       * SIZEOF_MM_ALLOCNODE
       */

      precedingsize = (size_t)newnode - (size_t)node;

      /* If we were unlucky, then the alignedchunk can lie in such a position
       * that precedingsize < SIZEOF_NODE_FREENODE.  We can't let that happen
       * because we are going to cast 'node' to struct mm_freenode_s below.
       * This is why we allocated memory large enough to support two
       * alignment points.  In this case, we will simply use the second
       * alignment point.
       */

      if (precedingsize < SIZEOF_MM_FREENODE)
        {
          alignedchunk += alignment;
          newnode       = (FAR struct mm_allocnode_s *)(alignedchunk - SIZEOF_MM_ALLOCNODE);
          precedingsize = (size_t)newnode - (size_t)node;
        }

      /* Set up the size of the new node */

      newnode->size = (size_t)next - (size_t)newnode;
      newnode->preceding = precedingsize | MM_ALLOC_BIT;

      /* Reduce the size of the original chunk and mark it not allocated, */

      node->size = precedingsize;
      node->preceding &= ~MM_ALLOC_BIT;

      /* Fix the preceding size of the next node */

      next->preceding = newnode->size | (next->preceding & MM_ALLOC_BIT);

      /* Convert the newnode chunk size back into malloc-compatible size by
       * subtracting the header size SIZEOF_MM_ALLOCNODE.
       */

      allocsize = newnode->size - SIZEOF_MM_ALLOCNODE;

      /* Add the original, newly freed node to the free nodelist */

      mm_addfreechunk(heap, (FAR struct mm_freenode_s *)node);

      /* Replace the original node with the newlay realloaced,
       * aligned node
       */

      node = newnode;
    }

  /* Check if there is free space at the end of the aligned chunk */

  if (allocsize > size)
    {
      /* Shrink the chunk by that much -- remember, mm_shrinkchunk wants
       * internal chunk sizes that include SIZEOF_MM_ALLOCNODE, and not the
       * malloc-compatible sizes that we have.
       */

      mm_shrinkchunk(heap, node, size + SIZEOF_MM_ALLOCNODE);
    }

  mm_givesemaphore(heap);
  return (FAR void *)alignedchunk;
}
Esempio n. 7
0
FAR void *mm_realloc(FAR struct mm_heap_s *heap, FAR void *oldmem,
                     size_t size)
{
  FAR struct mm_allocnode_s *oldnode;
  FAR struct mm_freenode_s  *prev;
  FAR struct mm_freenode_s  *next;
  size_t oldsize;
  size_t prevsize = 0;
  size_t nextsize = 0;
  FAR void *newmem;

  /* If oldmem is NULL, then realloc is equivalent to malloc */

  if (!oldmem)
    {
      return mm_malloc(heap, size);
    }

  /* If size is zero, then realloc is equivalent to free */

  if (size < 1)
    {
      mm_free(heap, oldmem);
      return NULL;
    }

  /* Adjust the size to account for (1) the size of the allocated node and
   * (2) to make sure that it is an even multiple of our granule size.
   */

  size = MM_ALIGN_UP(size + SIZEOF_MM_ALLOCNODE);

  /* Map the memory chunk into an allocated node structure */

  oldnode = (FAR struct mm_allocnode_s *)((FAR char*)oldmem - SIZEOF_MM_ALLOCNODE);

  /* We need to hold the MM semaphore while we muck with the nodelist. */

  mm_takesemaphore(heap);

  /* Check if this is a request to reduce the size of the allocation. */

  oldsize = oldnode->size;
  if (size <= oldsize)
    {
      /* Handle the special case where we are not going to change the size
       * of the allocation.
       */

      if (size < oldsize)
        {
          mm_shrinkchunk(heap, oldnode, size);
        }

      /* Then return the original address */

      mm_givesemaphore(heap);
      return oldmem;
    }

  /* This is a request to increase the size of the allocation,  Get the
   * available sizes before and after the oldnode so that we can make the
   * best decision
   */

  next = (FAR struct mm_freenode_s *)((FAR char*)oldnode + oldnode->size);
  if ((next->preceding & MM_ALLOC_BIT) == 0)
    {
      nextsize = next->size;
    }

  prev = (FAR struct mm_freenode_s *)((FAR char*)oldnode - (oldnode->preceding & ~MM_ALLOC_BIT));
  if ((prev->preceding & MM_ALLOC_BIT) == 0)
    {
      prevsize = prev->size;
    }

  /* Now, check if we can extend the current allocation or not */

  if (nextsize + prevsize + oldsize >= size)
    {
      size_t needed   = size - oldsize;
      size_t takeprev = 0;
      size_t takenext = 0;

      /* Check if we can extend into the previous chunk and if the
       * previous chunk is smaller than the next chunk.
       */

      if (prevsize > 0 && (nextsize >= prevsize || nextsize < 1))
        {
          /* Can we get everything we need from the previous chunk? */

          if (needed > prevsize)
            {
              /* No, take the whole previous chunk and get the
               * rest that we need from the next chunk.
               */

              takeprev = prevsize;
              takenext = needed - prevsize;
            }
          else
            {
              /* Yes, take what we need from the previous chunk */

              takeprev = needed;
              takenext = 0;
            }

          needed = 0;
        }

      /* Check if we can extend into the next chunk and if we still need
       * more memory.
       */

      if (nextsize > 0 && needed)
        {
          /* Can we get everything we need from the next chunk? */

          if (needed > nextsize)
            {
              /* No, take the whole next chunk and get the rest that we
               * need from the previous chunk.
               */

              takeprev = needed - nextsize;
              takenext = nextsize;
            }
          else
            {
              /* Yes, take what we need from the previous chunk */

              takeprev = 0;
              takenext = needed;
            }
        }

      /* Extend into the previous free chunk */

      newmem = oldmem;
      if (takeprev)
        {
          FAR struct mm_allocnode_s *newnode;

          /* Remove the previous node.  There must be a predecessor, but
           * there may not be a successor node.
           */

          DEBUGASSERT(prev->blink);
          prev->blink->flink = prev->flink;
          if (prev->flink)
            {
              prev->flink->blink = prev->blink;
            }

          /* Extend the node into the previous free chunk */

          newnode = (FAR struct mm_allocnode_s *)((FAR char*)oldnode - takeprev);

          /* Did we consume the entire preceding chunk? */

          if (takeprev < prevsize)
            {
              /* No.. just take what we need from the previous chunk and put
               * it back into the free list
               */

              prev->size        -= takeprev;
              newnode->size      = oldsize + takeprev;
              newnode->preceding = prev->size | MM_ALLOC_BIT;
              next->preceding    = newnode->size | (next->preceding & MM_ALLOC_BIT);

              /* Return the previous free node to the nodelist (with the new size) */

              mm_addfreechunk(heap, prev);
            }
          else
            {
              /* Yes.. update its size (newnode->preceding is already set) */

              newnode->size      += oldsize;
              newnode->preceding |= MM_ALLOC_BIT;
              next->preceding     = newnode->size | (next->preceding & MM_ALLOC_BIT);
            }

          /* Now we want to return newnode */

          oldnode = newnode;
          oldsize = newnode->size;

          /* Now we have to move the user contents 'down' in memory.  memcpy should
           * should be save for this.
           */

          newmem = (FAR void*)((FAR char*)newnode + SIZEOF_MM_ALLOCNODE);
          memcpy(newmem, oldmem, oldsize - SIZEOF_MM_ALLOCNODE);
        }

      /* Extend into the next free chunk */

      if (takenext)
        {
          FAR struct mm_freenode_s *newnode;
          FAR struct mm_allocnode_s *andbeyond;

          /* Get the chunk following the next node (which could be the tail
           * chunk)
           */

          andbeyond = (FAR struct mm_allocnode_s*)((char*)next + nextsize);

          /* Remove the next node.  There must be a predecessor, but there
           * may not be a successor node.
           */

          DEBUGASSERT(next->blink);
          next->blink->flink = next->flink;
          if (next->flink)
            {
              next->flink->blink = next->blink;
            }

          /* Extend the node into the next chunk */

          oldnode->size = oldsize + takenext;
          newnode       = (FAR struct mm_freenode_s *)((char*)oldnode + oldnode->size);

          /* Did we consume the entire preceding chunk? */

          if (takenext < nextsize)
            {
              /* No, take what we need from the next chunk and return it to
               * the free nodelist.
               */

              newnode->size        = nextsize - takenext;
              newnode->preceding   = oldnode->size;
              andbeyond->preceding = newnode->size | (andbeyond->preceding & MM_ALLOC_BIT);

              /* Add the new free node to the nodelist (with the new size) */

              mm_addfreechunk(heap, newnode);
            }
          else
            {
              /* Yes, just update some pointers. */

              andbeyond->preceding = oldnode->size | (andbeyond->preceding & MM_ALLOC_BIT);
            }
        }

      mm_givesemaphore(heap);
      return newmem;
    }

  /* The current chunk cannot be extended.  Just allocate a new chunk and copy */

  else
    {
      /* Allocate a new block.  On failure, realloc must return NULL but
       * leave the original memory in place.
       */

      mm_givesemaphore(heap);
      newmem = (FAR void*)mm_malloc(heap, size);
      if (newmem)
        {
          memcpy(newmem, oldmem, oldsize);
          mm_free(heap, oldmem);
        }

      return newmem;
    }
}
Esempio n. 8
0
void mm_addregion(FAR void *heapstart, size_t heapsize)
{
  FAR struct mm_freenode_s *node;
  uintptr_t heapbase;
  uintptr_t heapend;
#if CONFIG_MM_REGIONS > 1
  int IDX = g_nregions;
#else
# define IDX 0
#endif

  /* If the MCU handles wide addresses but the memory manager
   * is configured for a small heap, then verify that the caller
   * not doing something crazy.
   */

#if defined(CONFIG_MM_SMALL) && !defined(CONFIG_SMALL_MEMORY)
  DEBUGASSERT(heapsize <= MMSIZE_MAX+1);
#endif

  /* Adjust the provide heap start and size so that they are
   * both aligned with the MM_MIN_CHUNK size.
   */

  heapbase = MM_ALIGN_UP((uintptr_t)heapstart);
  heapend  = MM_ALIGN_DOWN((uintptr_t)heapstart + (uintptr_t)heapsize);
  heapsize = heapend - heapbase;

  mlldbg("Region %d: base=%p size=%u\n", IDX+1, heapstart, heapsize);

  /* Add the size of this region to the total size of the heap */

  g_heapsize += heapsize;

  /* Create two "allocated" guard nodes at the beginning and end of
   * the heap.  These only serve to keep us from allocating outside
   * of the heap.
   * 
   * And create one free node between the guard nodes that contains
   * all available memory.
   */

  g_heapstart[IDX]            = (FAR struct mm_allocnode_s *)heapbase;
  g_heapstart[IDX]->size      = SIZEOF_MM_ALLOCNODE;
  g_heapstart[IDX]->preceding = MM_ALLOC_BIT;

  node                        = (FAR struct mm_freenode_s *)(heapbase + SIZEOF_MM_ALLOCNODE);
  node->size                  = heapsize - 2*SIZEOF_MM_ALLOCNODE;
  node->preceding             = SIZEOF_MM_ALLOCNODE;

  g_heapend[IDX]              = (FAR struct mm_allocnode_s *)(heapend - SIZEOF_MM_ALLOCNODE);
  g_heapend[IDX]->size        = SIZEOF_MM_ALLOCNODE;
  g_heapend[IDX]->preceding   = node->size | MM_ALLOC_BIT;

#undef IDX

#if CONFIG_MM_REGIONS > 1
  g_nregions++;
#endif

  /* Add the single, large free node to the nodelist */

  mm_addfreechunk(node);
}
Esempio n. 9
0
FAR void *mm_malloc(FAR struct mm_heap_s *heap, size_t size)
{
  FAR struct mm_freenode_s *node;
  void *ret = NULL;
  int ndx;

  /* Handle bad sizes */

  if (size <= 0)
    {
      return NULL;
    }

  /* Adjust the size to account for (1) the size of the allocated node and
   * (2) to make sure that it is an even multiple of our granule size.
   */

  size = MM_ALIGN_UP(size + SIZEOF_MM_ALLOCNODE);

  /* We need to hold the MM semaphore while we muck with the nodelist. */

  mm_takesemaphore(heap);

  /* Get the location in the node list to start the search. Special case
   * really big allocations
   */

  if (size >= MM_MAX_CHUNK)
    {
      ndx = MM_NNODES-1;
    }
  else
    {
      /* Convert the request size into a nodelist index */

      ndx = mm_size2ndx(size);
    }

  /* Search for a large enough chunk in the list of nodes. This list is
   * ordered by size, but will have occasional zero sized nodes as we visit
   * other mm_nodelist[] entries.
   */

  for (node = heap->mm_nodelist[ndx].flink;
       node && node->size < size;
       node = node->flink);

  /* If we found a node with non-zero size, then this is one to use. Since
   * the list is ordered, we know that is must be best fitting chunk
   * available.
   */

  if (node)
    {
      FAR struct mm_freenode_s *remainder;
      FAR struct mm_freenode_s *next;
      size_t remaining;

      /* Remove the node.  There must be a predecessor, but there may not be
       * a successor node.
       */

      DEBUGASSERT(node->blink);
      node->blink->flink = node->flink;
      if (node->flink)
        {
          node->flink->blink = node->blink;
        }

      /* Check if we have to split the free node into one of the allocated
       * size and another smaller freenode.  In some cases, the remaining
       * bytes can be smaller (they may be SIZEOF_MM_ALLOCNODE).  In that
       * case, we will just carry the few wasted bytes at the end of the
       * allocation.
       */

      remaining = node->size - size;
      if (remaining >= SIZEOF_MM_FREENODE)
        {
          /* Get a pointer to the next node in physical memory */

          next = (FAR struct mm_freenode_s*)(((char*)node) + node->size);

          /* Create the remainder node */

          remainder = (FAR struct mm_freenode_s*)(((char*)node) + size);
          remainder->size = remaining;
          remainder->preceding = size;

          /* Adjust the size of the node under consideration */

          node->size = size;

          /* Adjust the 'preceding' size of the (old) next node, preserving
           * the allocated flag.
           */

          next->preceding = remaining | (next->preceding & MM_ALLOC_BIT);

          /* Add the remainder back into the nodelist */

          mm_addfreechunk(heap, remainder);
        }

      /* Handle the case of an exact size match */

      node->preceding |= MM_ALLOC_BIT;
      ret = (void*)((char*)node + SIZEOF_MM_ALLOCNODE);
    }

  mm_givesemaphore(heap);

  /* If CONFIG_DEBUG_MM is defined, then output the result of the allocation
   * to the SYSLOG.
   */

#ifdef CONFIG_DEBUG_MM
  if (!ret)
    {
      mdbg("Allocation failed, size %d\n", size);
    }
  else
    {
      mvdbg("Allocated %p, size %d\n", ret, size);
    }
#endif

  return ret;
}
Esempio n. 10
0
File: k_mm.c Progetto: wosayttn/aos
void *k_mm_realloc(k_mm_head *mmhead, void *oldmem, size_t new_size)
{
    void        *ptr_aux = NULL;
    uint32_t     cpsize;
    k_mm_list_t *this_b, *split_b, *next_b;
    size_t       old_size, split_size;
    size_t       req_size = 0;

    (void)req_size;

    if (oldmem == NULL) {
        if (new_size > 0) {
            return (void *) k_mm_alloc(mmhead, new_size);
        } else {
            return NULL;
        }
    } else if (new_size == 0) {
        k_mm_free(mmhead, oldmem);
        return NULL;
    }

    req_size =  new_size;

    MM_CRITICAL_ENTER(mmhead);

#if (RHINO_CONFIG_MM_BLK > 0)
    /*begin of oldmem in mmblk case*/
    if (krhino_mblk_check(mmhead->fix_pool, oldmem)) {
        /*it's fixed size memory block*/
        if (new_size <= RHINO_CONFIG_MM_BLK_SIZE) {
            ptr_aux = oldmem;
            MM_CRITICAL_EXIT(mmhead);
        } else {
            MM_CRITICAL_EXIT(mmhead);
            ptr_aux  = k_mm_alloc(mmhead, new_size);
            if (ptr_aux) {
                memcpy(ptr_aux, oldmem, RHINO_CONFIG_MM_BLK_SIZE);
                k_mm_smallblk_free(mmhead, oldmem);
            }
        }
        return ptr_aux;
    }
    /*end of mmblk case*/
#endif

    /*check if there more free block behind oldmem  */
    this_b   = MM_GET_THIS_BLK(oldmem);
    old_size = MM_GET_BUF_SIZE(this_b);
    next_b   = MM_GET_NEXT_BLK(this_b);
    new_size = MM_ALIGN_UP(new_size);
    new_size = new_size < MM_MIN_SIZE ? MM_MIN_SIZE : new_size;

    if (new_size <= old_size) {
        /* shrink blk */
        stats_removesize(mmhead, MM_GET_BLK_SIZE(this_b));
        if (next_b->buf_size & RHINO_MM_FREE) {
            /* merge next free */
            k_mm_freelist_delete(mmhead, next_b);
            old_size += MM_GET_BLK_SIZE(next_b);
            next_b = MM_GET_NEXT_BLK(next_b);
        }
        if (old_size >= new_size + MMLIST_HEAD_SIZE + MM_MIN_SIZE) {
            /* split blk */
            split_size = old_size - new_size - MMLIST_HEAD_SIZE;

            this_b->buf_size = new_size | (this_b->buf_size & RHINO_MM_PRESTAT_MASK);
            split_b = MM_GET_NEXT_BLK(this_b);

            split_b->prev  = this_b;
            split_b->buf_size = split_size | RHINO_MM_FREE | RHINO_MM_PREVALLOCED;
#if (RHINO_CONFIG_MM_DEBUG > 0u)
            split_b->dye   = RHINO_MM_FREE_DYE;
            split_b->owner = 0;
#endif
            next_b->prev = split_b;
            next_b->buf_size |= RHINO_MM_PREVFREE;
            k_mm_freelist_insert(mmhead, split_b);
        }
        stats_addsize(mmhead, MM_GET_BLK_SIZE(this_b), req_size);
        ptr_aux = (void *)this_b->mbinfo.buffer;
    } else if ((next_b->buf_size & RHINO_MM_FREE)) {
        /* enlarge blk */
        if (new_size <= (old_size + MM_GET_BLK_SIZE(next_b))) {
            stats_removesize(mmhead, MM_GET_BLK_SIZE(this_b));

            /* delete next blk from freelist */
            k_mm_freelist_delete(mmhead, next_b);

            /* enlarge this blk */
            this_b->buf_size += MM_GET_BLK_SIZE(next_b);

            next_b = MM_GET_NEXT_BLK(this_b);
            next_b->prev = this_b;
            next_b->buf_size &= ~RHINO_MM_PREVFREE;

            if (MM_GET_BUF_SIZE(this_b) >= new_size + MMLIST_HEAD_SIZE + MM_MIN_SIZE) {
                /* split blk */
                split_size = MM_GET_BUF_SIZE(this_b) - new_size - MMLIST_HEAD_SIZE;

                this_b->buf_size = new_size | (this_b->buf_size & RHINO_MM_PRESTAT_MASK);
                split_b = MM_GET_NEXT_BLK(this_b);

                split_b->prev = this_b;
                split_b->buf_size = split_size | RHINO_MM_FREE | RHINO_MM_PREVALLOCED;
#if (RHINO_CONFIG_MM_DEBUG > 0u)
                split_b->dye   = RHINO_MM_FREE_DYE;
                split_b->owner = 0;
#endif
                next_b->prev = split_b;
                next_b->buf_size |= RHINO_MM_PREVFREE;
                k_mm_freelist_insert(mmhead, split_b);
            }
            stats_addsize(mmhead, MM_GET_BLK_SIZE(this_b), req_size);
            ptr_aux = (void *)this_b->mbinfo.buffer;
        }
    }

    if (ptr_aux) {

#if (RHINO_CONFIG_MM_DEBUG > 0u)
        this_b->dye   = RHINO_MM_CORRUPT_DYE;
#endif

        MM_CRITICAL_EXIT(mmhead);
        return ptr_aux;
    }

    MM_CRITICAL_EXIT(mmhead);

    /* re alloc blk */
    ptr_aux = k_mm_alloc(mmhead, new_size);
    if (!ptr_aux) {
        return NULL;
    }

    cpsize = (MM_GET_BUF_SIZE(this_b) > new_size) ? new_size : MM_GET_BUF_SIZE(this_b);

    memcpy(ptr_aux, oldmem, cpsize);
    k_mm_free(mmhead, oldmem);

    return ptr_aux;
}
Esempio n. 11
0
File: k_mm.c Progetto: wosayttn/aos
void *k_mm_alloc(k_mm_head *mmhead, size_t size)
{
    void        *retptr;
    k_mm_list_t *get_b, *new_b, *next_b;
    int32_t      level;
    size_t       left_size;
    size_t       req_size = size;
#if (RHINO_CONFIG_MM_BLK > 0)
    mblk_pool_t *mm_pool;
#endif

    (void)req_size;

    if (!mmhead) {
        return NULL;
    }

    if (size == 0) {
        return NULL;
    }

    MM_CRITICAL_ENTER(mmhead);

#if (RHINO_CONFIG_MM_BLK > 0)
    /* little blk, try to get from mm_pool */
    if (mmhead->fix_pool != NULL) {
        mm_pool = (mblk_pool_t *)mmhead->fix_pool;
        if (size <= RHINO_CONFIG_MM_BLK_SIZE && mm_pool->blk_avail > 0) {
            retptr =  k_mm_smallblk_alloc(mmhead, size);
            if (retptr) {
                MM_CRITICAL_EXIT(mmhead);
                return retptr;
            }
        }
    }
#endif

    retptr = NULL;

    size = MM_ALIGN_UP(size);
    size = size < MM_MIN_SIZE ? MM_MIN_SIZE : size;

    if ((level = size_to_level(size)) == -1) {
        goto ALLOCEXIT;
    }

    /* try to find in higher level */
    get_b = find_up_level(mmhead, level);
    if (get_b == NULL) {
        /* try to find in same level */
        get_b = mmhead->freelist[level];
        while ( get_b != NULL ) {
            if ( MM_GET_BUF_SIZE(get_b) >= size ) {
                break;
            }
            get_b = get_b->mbinfo.free_ptr.next;
        }

        if ( get_b == NULL ) {
            /* do not find availalbe freeblk */
            goto ALLOCEXIT;
        }
    }
    k_mm_freelist_delete(mmhead, get_b);

    next_b = MM_GET_NEXT_BLK(get_b);

    /* Should the block be split? */
    if (MM_GET_BUF_SIZE(get_b) >= size + MMLIST_HEAD_SIZE + MM_MIN_SIZE) {
        left_size = MM_GET_BUF_SIZE(get_b) - size - MMLIST_HEAD_SIZE;

        get_b->buf_size = size | (get_b->buf_size & RHINO_MM_PRESTAT_MASK);
        new_b = MM_GET_NEXT_BLK(get_b);

        new_b->prev = get_b;
        new_b->buf_size = left_size | RHINO_MM_FREE | RHINO_MM_PREVALLOCED;
#if (RHINO_CONFIG_MM_DEBUG > 0u)
        new_b->dye   = RHINO_MM_FREE_DYE;
        new_b->owner = 0;
#endif
        next_b->prev = new_b;
        k_mm_freelist_insert(mmhead, new_b);
    } else {
        next_b->buf_size &= (~RHINO_MM_PREVFREE);
    }
    get_b->buf_size &= (~RHINO_MM_FREE);       /* Now it's used */

#if (RHINO_CONFIG_MM_DEBUG > 0u)
    get_b->dye   = RHINO_MM_CORRUPT_DYE;
    get_b->owner = 0;
#endif
    retptr = (void *)get_b->mbinfo.buffer;
    if (retptr != NULL) {
        stats_addsize(mmhead, MM_GET_BLK_SIZE(get_b), req_size);
    }

ALLOCEXIT:

    MM_CRITICAL_EXIT(mmhead);

    return retptr ;
}
Esempio n. 12
0
File: k_mm.c Progetto: wosayttn/aos
kstat_t krhino_init_mm_head(k_mm_head **ppmmhead, void *addr, size_t len )
{
    k_mm_list_t *nextblk;
    k_mm_list_t *firstblk;
    k_mm_head   *pmmhead;
    void        *orig_addr;
#if (RHINO_CONFIG_MM_BLK > 0)
    mblk_pool_t *mmblk_pool;
    kstat_t      stat;
#endif

    NULL_PARA_CHK(ppmmhead);
    NULL_PARA_CHK(addr);

    /*check paramters, addr and len need algin
      1.  the length at least need RHINO_CONFIG_MM_TLF_BLK_SIZE  for fixed size memory block
      2.  and also ast least have 1k for user alloced
    */
    orig_addr = addr;
    addr = (void *) MM_ALIGN_UP((size_t)addr);
    len -= (size_t)addr - (size_t)orig_addr;
    len = MM_ALIGN_DOWN(len);

    if ( len == 0
         || len < MIN_FREE_MEMORY_SIZE + RHINO_CONFIG_MM_TLF_BLK_SIZE
         || len > MM_MAX_SIZE) {
        return RHINO_MM_POOL_SIZE_ERR;
    }

    pmmhead = (k_mm_head *)addr;

    /* Zeroing the memory head */
    memset(pmmhead, 0, sizeof(k_mm_head));
#if (RHINO_CONFIG_MM_REGION_MUTEX > 0)
    krhino_mutex_create(&pmmhead->mm_mutex, "mm_mutex");
#else
    krhino_spin_lock_init(&pmmhead->mm_lock);
#endif

    firstblk = init_mm_region((void *)((size_t)addr + MM_ALIGN_UP(sizeof(k_mm_head))),
                              MM_ALIGN_DOWN(len - sizeof(k_mm_head)));


    pmmhead->regioninfo = (k_mm_region_info_t *)firstblk->mbinfo.buffer;

    nextblk = MM_GET_NEXT_BLK(firstblk);

    *ppmmhead = pmmhead;

    /*mark it as free and set it to bitmap*/
#if (RHINO_CONFIG_MM_DEBUG > 0u)
    nextblk->dye   = RHINO_MM_CORRUPT_DYE;
    nextblk->owner = 0;
#endif

    /* release free blk */
    k_mm_free(pmmhead, nextblk->mbinfo.buffer);

    /*after free, we need acess mmhead and nextblk again*/

#if (K_MM_STATISTIC > 0)
    pmmhead->free_size = MM_GET_BUF_SIZE(nextblk);
    pmmhead->used_size = len - MM_GET_BUF_SIZE(nextblk);
    pmmhead->maxused_size = pmmhead->used_size;
#endif
    /* default no fixblk */
    pmmhead->fix_pool = NULL;

#if (RHINO_CONFIG_MM_BLK > 0)
    /* note: stats_addsize inside */
    mmblk_pool = k_mm_alloc(pmmhead,
                            RHINO_CONFIG_MM_TLF_BLK_SIZE + MM_ALIGN_UP(sizeof(mblk_pool_t)));
    if (mmblk_pool) {
        stat = krhino_mblk_pool_init(mmblk_pool, "fixed_mm_blk",
                                     (void *)((size_t)mmblk_pool + MM_ALIGN_UP(sizeof(mblk_pool_t))),
                                     RHINO_CONFIG_MM_BLK_SIZE, RHINO_CONFIG_MM_TLF_BLK_SIZE);
        if (stat == RHINO_SUCCESS) {
            pmmhead->fix_pool = mmblk_pool;
#if (K_MM_STATISTIC > 0)
            stats_removesize(pmmhead, RHINO_CONFIG_MM_TLF_BLK_SIZE);
#endif
        } else {
            /* note: stats_removesize inside */
            k_mm_free(pmmhead, mmblk_pool);
        }
#if (K_MM_STATISTIC > 0)
        pmmhead->maxused_size = pmmhead->used_size;
#endif
    }
#endif

    return RHINO_SUCCESS;
}