Пример #1
0
static void t_pop_verify(void)
{
	struct stack_block *block;
	unsigned char *p;
	size_t pos, max_pos, used_size, alloc_size;

	block = current_frame_block->block[frame_pos];
	pos = block->size - current_frame_block->block_space_used[frame_pos];
	while (block != NULL) {
		used_size = block->size - block->left;
		p = STACK_BLOCK_DATA(block);
		while (pos < used_size) {
			alloc_size = *(size_t *)(p + pos);
			if (used_size - pos < alloc_size)
				i_panic("data stack: saved alloc size broken");
			pos += MEM_ALIGN(sizeof(alloc_size));
			max_pos = pos + MEM_ALIGN(alloc_size + SENTRY_COUNT);
			pos += alloc_size;

			for (; pos < max_pos; pos++) {
				if (p[pos] != CLEAR_CHR)
					i_panic("data stack: buffer overflow");
			}
		}

		/* if we had used t_buffer_get(), the rest of the buffer
		   may not contain CLEAR_CHRs. but we've already checked all
		   the allocations, so there's no need to check them anyway. */
		block = block->next;
		pos = 0;
	}
}
/**
 * Initializes the pbuf module.
 *
 * A large part of memory is allocated for holding the pool of pbufs.
 * The size of the individual pbufs in the pool is given by the size
 * parameter, and the number of pbufs in the pool by the num parameter.
 *
 * After the memory has been allocated, the pbufs are set up. The
 * ->next pointer in each pbuf is set up to point to the next pbuf in
 * the pool.
 *
 */
void
pbuf_init(void)
{
  struct pbuf *p, *q = NULL;
  u16_t i;

  pbuf_pool = (struct pbuf *)MEM_ALIGN(pbuf_pool_memory);

#if PBUF_STATS
  lwip_stats.pbuf.avail = PBUF_POOL_SIZE;
#endif /* PBUF_STATS */

  /* Set up ->next pointers to link the pbufs of the pool together */
  p = pbuf_pool;

  for(i = 0; i < PBUF_POOL_SIZE; ++i) {
    p->next = (struct pbuf *)((u8_t *)p + PBUF_POOL_BUFSIZE + sizeof(struct pbuf));
    p->len = p->tot_len = PBUF_POOL_BUFSIZE;
    p->payload = MEM_ALIGN((void *)((u8_t *)p + sizeof(struct pbuf)));
    p->flags = PBUF_FLAG_POOL;
    q = p;
    p = p->next;
  }

  /* The ->next pointer of last pbuf is NULL to indicate that there
     are no more pbufs in the pool */
  q->next = NULL;

#if !SYS_LIGHTWEIGHT_PROT
  pbuf_pool_alloc_lock = 0;
  pbuf_pool_free_lock = 0;
  pbuf_pool_free_sem = sys_sem_new(1);
#endif
}
Пример #3
0
/*-----------------------------------------------------------------------------------*/
void *
memp_malloc(memp_t type)
{
    struct memp *memp;

    ASSERT("memp_malloc: type < MEMP_MAX", type < MEMP_MAX);

    memp = memp_tab[type];

    if(memp != NULL)
    {
        memp_tab[type] = memp->next;
        memp->next = NULL;
#ifdef MEMP_STATS
        ++stats.memp[type].used;
        if(stats.memp[type].used > stats.memp[type].max) {
            stats.memp[type].max = stats.memp[type].used;
        }
#endif /* MEMP_STATS */
        ASSERT("memp_malloc: memp properly aligned",
                ((uint32_t)MEM_ALIGN((uint8_t *)memp + sizeof(struct memp)) % MEM_ALIGNMENT) == 0);

        return MEM_ALIGN((uint8_t *)memp + sizeof(struct memp));
    } else {
        DEBUGF(MEMP_DEBUG, ("memp_malloc: out of memory in pool %d\n", type));
#ifdef MEMP_STATS
        ++stats.memp[type].err;
#endif /* MEMP_STATS */
        return NULL;
    }
}
Пример #4
0
void* malloc(u32 size)
{
    mem_block_t* block = pool.current;
    while (block != NULL)
    {
        u8* mem = MEM_ALIGN(block->current, ALIGN_SIZE);
        if ((u32)(block->end - mem) >= size)
            break;

        block = pool.current->next;
    }

    if (block == NULL)
    {
        block = alloc_block(size);
        pool.last->next = block;
        pool.last = block;
    }

    u8* mem = MEM_ALIGN(block->current, ALIGN_SIZE);
    if ((u32)(block->end - mem) >= size)
    {
        block->current += size;
        return mem;
    }

    return NULL;
}
Пример #5
0
static void data_stack_last_buffer_reset(bool preserve_data ATTR_UNUSED)
{
	if (last_buffer_block != NULL) {
#ifdef DEBUG
		unsigned char *p;
		unsigned int i;

		p = STACK_BLOCK_DATA(current_block) +
			(current_block->size - current_block->left) +
			MEM_ALIGN(sizeof(size_t)) + MEM_ALIGN(last_buffer_size);
#endif
		/* reset t_buffer_get() mark - not really needed but makes it
		   easier to notice if t_malloc()/t_push()/t_pop() is called
		   between t_buffer_get() and t_buffer_alloc().
		   do this before we get to i_panic() to avoid recursive
		   panics. */
		last_buffer_block = NULL;

#ifdef DEBUG
		for (i = 0; i < SENTRY_COUNT; i++) {
			if (p[i] != CLEAR_CHR)
				i_panic("t_buffer_get(): buffer overflow");
		}

		if (!preserve_data) {
			p = STACK_BLOCK_DATA(current_block) +
				(current_block->size - current_block->left);
			memset(p, CLEAR_CHR, SENTRY_COUNT);
		}
#endif
	}
}
Пример #6
0
/* allocate some space on the stack, in the current stack frame */
void HeapUnpopStack(int Size)
{
#ifdef DEBUG_HEAP
    printf("HeapUnpopStack(%ld) at 0x%lx\n", (unsigned long)MEM_ALIGN(Size), (unsigned long)HeapStackTop);
#endif
    HeapStackTop = (void *)((char *)HeapStackTop + MEM_ALIGN(Size));
}
Пример #7
0
/* allocate a value either on the heap or the stack using space dependent on what type we want */
struct Value *VariableAllocValueAndData(struct ParseState *Parser, int DataSize, int IsLValue, struct Value *LValueFrom, int OnHeap)
{
    struct Value *NewValue = VariableAlloc(Parser, MEM_ALIGN(sizeof(struct Value)) + DataSize, OnHeap);
    NewValue->Val = (union AnyValue *)((char *)NewValue + MEM_ALIGN(sizeof(struct Value)));
    NewValue->ValOnHeap = OnHeap;
    NewValue->ValOnStack = !OnHeap;
    NewValue->IsLValue = IsLValue;
    NewValue->LValueFrom = LValueFrom;
    return NewValue;
}
void *
memp_malloc(memp_t type)
{
  struct memp *memp;
  void *mem;
#if SYS_LIGHTWEIGHT_PROT
  SYS_ARCH_DECL_PROTECT(old_level);
#endif
 
  LWIP_ASSERT("memp_malloc: type < MEMP_MAX", type < MEMP_MAX);

#if SYS_LIGHTWEIGHT_PROT
  SYS_ARCH_PROTECT(old_level);
#else /* SYS_LIGHTWEIGHT_PROT */  
  sys_sem_wait(mutex);
#endif /* SYS_LIGHTWEIGHT_PROT */  

  memp = memp_tab[type];
  
  if (memp != NULL) {    
    memp_tab[type] = memp->next;    
    memp->next = NULL;
#if MEMP_STATS
    ++lwip_stats.memp[type].used;
    if (lwip_stats.memp[type].used > lwip_stats.memp[type].max) {
      lwip_stats.memp[type].max = lwip_stats.memp[type].used;
    }
#endif /* MEMP_STATS */
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_UNPROTECT(old_level);
#else /* SYS_LIGHTWEIGHT_PROT */
    sys_sem_signal(mutex);
#endif /* SYS_LIGHTWEIGHT_PROT */  
    LWIP_ASSERT("memp_malloc: memp properly aligned",
     ((mem_ptr_t)MEM_ALIGN((u8_t *)memp + sizeof(struct memp)) % MEM_ALIGNMENT) == 0);

    mem = MEM_ALIGN((u8_t *)memp + sizeof(struct memp));
    return mem;
  } else {
    LWIP_DEBUGF(MEMP_DEBUG | 2, ("memp_malloc: out of memory in pool %d\n", type));
#if MEMP_STATS
    ++lwip_stats.memp[type].err;
#endif /* MEMP_STATS */
#if SYS_LIGHTWEIGHT_PROT
  SYS_ARCH_UNPROTECT(old_level);
#else /* SYS_LIGHTWEIGHT_PROT */
  sys_sem_signal(mutex);
#endif /* SYS_LIGHTWEIGHT_PROT */  
    return NULL;
  }
}
Пример #9
0
static void
lm32_block_move_inline (rtx dest, rtx src, HOST_WIDE_INT length, HOST_WIDE_INT alignment)
{
    HOST_WIDE_INT offset, delta;
    unsigned HOST_WIDE_INT bits;
    int i;
    enum machine_mode mode;
    rtx *regs;

    /* Work out how many bits to move at a time.  */
    switch (alignment)
    {
    case 1:
        bits = 8;
        break;
    case 2:
        bits = 16;
        break;
    case 4:
        bits = 32;
        break;
    default:
        abort ();
    }

    mode = mode_for_size (bits, MODE_INT, 0);
    delta = bits / BITS_PER_UNIT;

    /* Allocate a buffer for the temporary registers.  */
    regs = alloca (sizeof (rtx) * length / delta);

    /* Load as many BITS-sized chunks as possible.  */
    for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
    {
        regs[i] = gen_reg_rtx (mode);
        emit_move_insn (regs[i], adjust_address (src, mode, offset));
    }

    /* Copy the chunks to the destination.  */
    for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
        emit_move_insn (adjust_address (dest, mode, offset), regs[i]);

    /* Mop up any left-over bytes.  */
    if (offset < length)
    {
        src = adjust_address (src, BLKmode, offset);
        dest = adjust_address (dest, BLKmode, offset);
        move_by_pieces (dest, src, length - offset,
                        MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
    }
}
Пример #10
0
/* allocate some space on the stack, in the current stack frame
 * clears memory. can return NULL if out of stack space */
void *HeapAllocStack(int Size)
{
    char *NewMem = HeapStackTop;
    char *NewTop = (char *)HeapStackTop + MEM_ALIGN(Size);
#ifdef DEBUG_HEAP
    printf("HeapAllocStack(%ld) at 0x%lx\n", (unsigned long)MEM_ALIGN(Size), (unsigned long)HeapStackTop);
#endif
    if (NewTop > (char *)HeapBottom)
        return NULL;
        
    HeapStackTop = (void *)NewTop;
    memset((void *)NewMem, '\0', Size);
    return NewMem;
}
Пример #11
0
/*-----------------------------------------------------------------------------------*/
void
pbuf_init(void)
{
  struct pbuf *p, *q;
  uint8_t i;

  pbuf_pool = (struct pbuf *)&pbuf_pool_memory[0];
  ASSERT("pbuf_init: pool aligned", (long)pbuf_pool % MEM_ALIGNMENT == 0);

#ifdef PBUF_STATS
  stats.pbuf.avail = PBUF_POOL_SIZE;
#endif /* PBUF_STATS */

  /* Set up ->next pointers to link the pbufs of the pool together. */
  p = pbuf_pool;

  for(i = 0; i < PBUF_POOL_SIZE; ++i) {
    p->next = (struct pbuf *)((uint8_t *)p + PBUF_POOL_BUFSIZE + sizeof(struct pbuf));
    p->len = p->tot_len = PBUF_POOL_BUFSIZE;
    p->payload = MEM_ALIGN((void *)((uint8_t *)p + sizeof(struct pbuf)));
    q = p;
    p = p->next;
  }

  /* The ->next pointer of last pbuf is NULL to indicate that there
     are no more pbufs in the pool. */
  q->next = NULL;

  pbuf_pool_alloc_lock = 0;
  pbuf_pool_free_lock = 0;
  pbuf_pool_free_sem = sys_sem_new(1);

}
Пример #12
0
/* internal do-anything v[s][n]scanf() formatting system with input from strings or FILE * */
int StdioBaseScanf(struct ParseState *Parser, FILE *Stream, char *StrIn, char *Format, struct StdVararg *Args)
{
    struct Value *ThisArg = Args->Param[0];
    int ArgCount = 0;
    void *ScanfArg[MAX_SCANF_ARGS];
    
    if (Args->NumArgs > MAX_SCANF_ARGS)
        ProgramFail(Parser, "too many arguments to scanf() - %d max", MAX_SCANF_ARGS);
    
    for (ArgCount = 0; ArgCount < Args->NumArgs; ArgCount++)
    {
        ThisArg = (struct Value *)((char *)ThisArg + MEM_ALIGN(sizeof(struct Value) + TypeStackSizeValue(ThisArg)));
        
        if (ThisArg->Typ->Base == TypePointer) 
            ScanfArg[ArgCount] = ThisArg->Val->Pointer;
        
        else if (ThisArg->Typ->Base == TypeArray)
            ScanfArg[ArgCount] = &ThisArg->Val->ArrayMem[0];
        
        else
            ProgramFail(Parser, "non-pointer argument to scanf() - argument %d after format", ArgCount+1);
    }
    
    if (Stream != NULL)
        return fscanf(Stream, Format, ScanfArg[0], ScanfArg[1], ScanfArg[2], ScanfArg[3], ScanfArg[4], ScanfArg[5], ScanfArg[6], ScanfArg[7], ScanfArg[8], ScanfArg[9]);
    else
        return sscanf(StrIn, Format, ScanfArg[0], ScanfArg[1], ScanfArg[2], ScanfArg[3], ScanfArg[4], ScanfArg[5], ScanfArg[6], ScanfArg[7], ScanfArg[8], ScanfArg[9]);
}
Пример #13
0
bool t_try_realloc(void *mem, size_t size)
{
	size_t last_alloc_size;

	if (unlikely(size == 0 || size > SSIZE_T_MAX))
		i_panic("Trying to allocate %"PRIuSIZE_T" bytes", size);

	last_alloc_size = current_frame_block->last_alloc_size[frame_pos];

	/* see if we're trying to grow the memory we allocated last */
	if (STACK_BLOCK_DATA(current_block) +
	    (current_block->size - current_block->left -
	     last_alloc_size) == mem) {
		/* yeah, see if we have space to grow */
		size = MEM_ALIGN(size);
		if (current_block->left >= size - last_alloc_size) {
			/* just shrink the available size */
			current_block->left -= size - last_alloc_size;
			current_frame_block->last_alloc_size[frame_pos] = size;
			return TRUE;
		}
	}

	return FALSE;
}
Пример #14
0
void mem_dump(mem_pool_t *m){
    mheader_t *h;

    //mutex_lock (&m->lock);
    debug_printf ("\npool $%x:", m);
    mheader_t* limit = (mheader_t*)((size_t)m->store+m->size);
    for (h=(mheader_t*)m->store; h<limit; h=SUCC(h)) {
        if (h->pool != m){
            debug_printf ("bad block $%x[$%x]:$%hx on pool[$%x]\n"
                    , h, h->size
                    , h->magic, h->pool);
            break;
        }

        if (h->magic == MEMORY_BLOCK_MAGIC)
            debug_printf ("$%x[$%x] ", h, h->size);
        else if (h->magic == MEMORY_HOLE_MAGIC)
            debug_printf ("$%x[$%x]:->$%x\n", h, h->size, NEXT(h));
        else {
            debug_printf ("$%x[$%x]:bad magic %2s=$%hx\n", h, h->size, &h->magic, (int)(h->magic));
            break;
        }
        if ( (h->size != MEM_ALIGN(h->size))
             || (h->size < (MEM_HSIZE+ SIZEOF_ALIGN))
             || ( h->size > ((size_t)limit - (size_t)h) )
           )
        {
            debug_printf ("bad block $%x size $%x\n", h, h->size);
            break;
        }
    }
    //mutex_unlock (&m->lock);
}
Пример #15
0
void mem_validate(mem_pool_t *m){
    mheader_t *h;
    //mutex_lock (&m->lock);
    mheader_t* limit = (mheader_t*)((size_t)m->store+m->size);
    for (h=(mheader_t*)m->store; h<limit; h=SUCC(h)) {
        assert2( (h->pool == m)
                , "bad block $%x[$%x]:$%hx on pool[$%x]\n"
                , h, h->size
                , h->magic, h->pool
        );

        assert2( (h->magic == MEMORY_BLOCK_MAGIC) || (h->magic == MEMORY_HOLE_MAGIC)
                , "$%x[$%x]:bad magic %2s=$%hx\n"
                , h, h->size, &h->magic, (int)(h->magic)
                );
        assert2( ( (h->size == MEM_ALIGN(h->size))
                && (h->size >= (MEM_HSIZE+ SIZEOF_ALIGN))
                && (h->size <= ((size_t)limit - (size_t)h) )
                  )
                , "bad block $%x size $%x\n"
                , h, h->size
                );
    }
    //mutex_unlock (&m->lock);
}
Пример #16
0
enum fatal_test_state fatal_mempool(int stage)
{
	static pool_t pool;

	switch(stage) {
	case 0: /* forbidden size */
		test_begin("fatal_mempool");
		pool = pool_alloconly_create(MEMPOOL_GROWING"fatal", 1);
		(void)p_malloc(pool, 0);
		return FATAL_TEST_FAILURE;

	case 1: /* logically impossible size */
		(void)p_malloc(pool, SSIZE_T_MAX + 1ULL);
		return FATAL_TEST_FAILURE;

	case 2: /* physically impossible size */
		(void)p_malloc(pool, SSIZE_T_MAX - (size_t)MEM_ALIGN(1));
		return FATAL_TEST_FAILURE;

	/* Continue with other tests as follows:
	case 3:
		something_fatal();
		return FATAL_TEST_FAILURE;
	*/
	}

	/* Either our tests have finished, or the test suite has got confused. */
	pool_unref(&pool);
	test_end();
	return FATAL_TEST_FINISHED;
}
Пример #17
0
/*-----------------------------------------------------------------------------------*/
void
lwbt_memp_init(void)
{
  struct memp *m, *memp;
  u16_t i, j;
  u16_t size;

  memp = (struct memp *)&memp_memory[0];
  for(i = 0; i < MEMP_LWBT_MAX; ++i) {
    size = MEM_ALIGN_SIZE(memp_sizes[i] + sizeof(struct memp));
    if(memp_num[i] > 0) {
      memp_tab[i] = memp;
      m = memp;
      
      for(j = 0; j < memp_num[i]; ++j) {
	m->next = (struct memp *)MEM_ALIGN((u8_t *)m + size);
	memp = m;
	m = m->next;
      }
      memp->next = NULL;
      memp = m;
    } else {
      memp_tab[i] = NULL;
    }
  }
}
Пример #18
0
/**
 * Allocate a block of memory.
 * The memory may contain garbage.
 */
void *mem_alloc_dirty (mem_pool_t *m, size_t required)
{
	mheader_t *h, **hprev, *newh;

        /* All allocations need to be several bytes larger than the
         * amount requested by our caller.  They also need to be large enough
         * that they can contain a "mheader_t" and any magic values used in
         * debugging (for when the block gets freed and becomes an isolated
         * hole). */
	if (required < SIZEOF_POINTER)
		required = SIZEOF_POINTER;
	required = MEM_ALIGN (required + MEM_HSIZE);

	mutex_lock (&m->lock);

	/* Scan the list of all available memory holes and find the first
	 * one that meets our requirement. */
	h = (mheader_t*) m->free_list;
	hprev = (mheader_t**) (void*) &m->free_list;
	while (h) {
	    mem_validate_hole(m, h);
        	if (h->size >= required)
        		break;

		hprev = &NEXT(h);
		h = NEXT(h);
	}

        /* Did we find any space available? */
        if (! h) {
		mutex_unlock (&m->lock);
		/*debug_printf ("mem_alloc failed, size=%d bytes\n", required);*/
		return 0;
	}

        /* Remove a chunk of space and, if we can, release any of what's left
	 * as a new hole.  If we can't release any then allocate more than was
	 * requested and remove this hole from the hole list. */
	if (h->size >= required + MEM_HSIZE + 2*SIZEOF_POINTER) {
		newh = (mheader_t*) ((size_t)h + required);
		newh->pool = h->pool;
		newh->size = h->size - required;
#if MEM_DEBUG
        newh->magic = MEMORY_HOLE_MAGIC;
#endif
        NEXT(newh) = NEXT(h);
		h->size = required;
	} else {
	    newh = NEXT(h);
	}
    *hprev = newh;
#if MEM_DEBUG
	h->magic = MEMORY_BLOCK_MAGIC;
#endif
	m->free_size -= h->size;
	mutex_unlock (&m->lock);
	/*debug_printf ("mem %d bytes returned 0x%x\n", h->size, h+1);*/
	return BLOCK_OF(h);
}
int
misaligned_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
{
  return ((GET_CODE (op) == MEM) && (
#line 1134 "../.././gcc/config/i386/predicates.md"
(MEM_ALIGN (op) < GET_MODE_ALIGNMENT (mode)))) && (
(mode == VOIDmode || GET_MODE (op) == mode));
}
Пример #20
0
static void test_ds_buffers(void)
{
	test_begin("data-stack buffer growth");
	T_BEGIN {
		size_t i;
		unsigned char *p;
		size_t left = t_get_bytes_available();
		while (left < 10000) {
			t_malloc_no0(left); /* force a new block */
			left = t_get_bytes_available();
		}
		left -= 64; /* make room for the sentry if DEBUG */
		p = t_buffer_get(1);
		p[0] = 1;
		for (i = 2; i <= left; i++) {
			/* grow it */
			unsigned char *p2 = t_buffer_get(i);
			test_assert_idx(p == p2, i);
			p[i-1] = i;
			test_assert_idx(p[i-2] == (unsigned char)(i-1), i);
		}
		/* now fix it permanently */
		t_buffer_alloc_last_full();
		test_assert(t_get_bytes_available() < 64 + MEM_ALIGN(1));
	} T_END;
	test_end();

	test_begin("data-stack buffer interruption");
	T_BEGIN {
		void *b = t_buffer_get(1000);
		void *a = t_malloc_no0(1);
		void *b2 = t_buffer_get(1001);
		test_assert(a == b); /* expected, not guaranteed */
		test_assert(b2 != b);
	} T_END;
	test_end();

	test_begin("data-stack buffer with reallocs");
	T_BEGIN {
		size_t bigleft = t_get_bytes_available();
		size_t i;
		for (i = 1; i < bigleft-64; i += i_rand()%32) T_BEGIN {
			unsigned char *p, *p2;
			size_t left;
			t_malloc_no0(i);
			left = t_get_bytes_available();
			/* The most useful idx for the assert is 'left' */
			test_assert_idx(left <= bigleft-i, left);
			p = t_buffer_get(left/2);
			p[0] = 'Z'; p[left/2 - 1] = 'Z';
			p2 = t_buffer_get(left + left/2);
			test_assert_idx(p != p2, left);
			test_assert_idx(p[0] == 'Z', left);
			test_assert_idx(p[left/2 -1] == 'Z', left);
		} T_END;
	} T_END;
	test_end();
}
Пример #21
0
int fast_mblock_init_ex(struct fast_mblock_man *mblock, const int element_size,
		const int alloc_elements_once, fast_mblock_alloc_init_func init_func,
        const bool need_lock)
{
	int result;

	if (element_size <= 0)
	{
		logError("file: "__FILE__", line: %d, " \
			"invalid block size: %d", \
			__LINE__, element_size);
		return EINVAL;
	}

	mblock->element_size = MEM_ALIGN(element_size);
	if (alloc_elements_once > 0)
	{
		mblock->alloc_elements_once = alloc_elements_once;
	}
	else
	{
		int block_size;
		block_size = MEM_ALIGN(sizeof(struct fast_mblock_node) \
			+ mblock->element_size);
		mblock->alloc_elements_once = (1024 * 1024) / block_size;
	}

	if (need_lock && (result=init_pthread_lock(&(mblock->lock))) != 0)
	{
		logError("file: "__FILE__", line: %d, " \
			"init_pthread_lock fail, errno: %d, error info: %s", \
			__LINE__, result, STRERROR(result));
		return result;
	}

    mblock->alloc_init_func = init_func;
	mblock->malloc_chain_head = NULL;
	mblock->free_chain_head = NULL;
	mblock->delay_free_chain.head = NULL;
	mblock->delay_free_chain.tail = NULL;
    mblock->total_count = 0;
    mblock->need_lock = need_lock;

	return 0;
}
Пример #22
0
/* push a new stack frame on to the stack */
void HeapPushStackFrame()
{
#ifdef DEBUG_HEAP
    printf("Adding stack frame at 0x%lx\n", (unsigned long)HeapStackTop);
#endif
    *(void **)HeapStackTop = StackFrame;
    StackFrame = HeapStackTop;
    HeapStackTop = (void *)((char *)HeapStackTop + MEM_ALIGN(sizeof(ALIGN_TYPE)));
}
static inline int
aligned_operand_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
#line 825 "../.././gcc/config/i386/predicates.md"
{
  struct ix86_address parts;
  int ok;

  /* Registers and immediate operands are always "aligned".  */
  if (GET_CODE (op) != MEM)
    return 1;

  /* All patterns using aligned_operand on memory operands ends up
     in promoting memory operand to 64bit and thus causing memory mismatch.  */
  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_size)
    return 0;

  /* Don't even try to do any aligned optimizations with volatiles.  */
  if (MEM_VOLATILE_P (op))
    return 0;

  if (MEM_ALIGN (op) >= 32)
    return 1;

  op = XEXP (op, 0);

  /* Pushes and pops are only valid on the stack pointer.  */
  if (GET_CODE (op) == PRE_DEC
      || GET_CODE (op) == POST_INC)
    return 1;

  /* Decode the address.  */
  ok = ix86_decompose_address (op, &parts);
  gcc_assert (ok);

  /* Look for some component that isn't known to be aligned.  */
  if (parts.index)
    {
      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
	return 0;
    }
  if (parts.base)
    {
      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
	return 0;
    }
  if (parts.disp)
    {
      if (!CONST_INT_P (parts.disp)
	  || (INTVAL (parts.disp) & 3) != 0)
	return 0;
    }

  /* Didn't find one -- this must be an aligned address.  */
  return 1;
}
Пример #24
0
size_t t_get_bytes_available(void)
{
#ifndef DEBUG
	const unsigned int extra = MEM_ALIGN_SIZE-1;
#else
	const unsigned int extra = MEM_ALIGN_SIZE-1 + SENTRY_COUNT +
		MEM_ALIGN(sizeof(size_t));
#endif
	return current_block->left < extra ? current_block->left :
		current_block->left - extra;
}
Пример #25
0
/* free some dynamically allocated memory */
void HeapFreeMem(void *Mem)
{
#ifdef USE_MALLOC_HEAP
    free(Mem);
#else
    struct AllocNode *MemNode = (struct AllocNode *)((char *)Mem - MEM_ALIGN(sizeof(MemNode->Size)));
    int Bucket = MemNode->Size >> 2;
    
#ifdef DEBUG_HEAP
    printf("HeapFreeMem(0x%lx)\n", (unsigned long)Mem);
#endif
    assert((unsigned long)Mem >= (unsigned long)&HeapMemory[0] && (unsigned char *)Mem - &HeapMemory[0] < HEAP_SIZE);
    assert(MemNode->Size < HEAP_SIZE && MemNode->Size > 0);
    if (Mem == NULL)
        return;
    
    if ((void *)MemNode == HeapBottom)
    { 
        /* pop it off the bottom of the heap, reducing the heap size */
#ifdef DEBUG_HEAP
        printf("freeing %d from bottom of heap\n", MemNode->Size);
#endif
        HeapBottom = (void *)((char *)HeapBottom + MemNode->Size);
#ifdef DEBUG_HEAP
        ShowBigList();
#endif
    }
    else if (Bucket < FREELIST_BUCKETS)
    { 
        /* we can fit it in a bucket */
#ifdef DEBUG_HEAP
        printf("freeing %d to bucket\n", MemNode->Size);
#endif
        assert(FreeListBucket[Bucket] == NULL || ((unsigned long)FreeListBucket[Bucket] >= (unsigned long)&HeapMemory[0] && (unsigned char *)FreeListBucket[Bucket] - &HeapMemory[0] < HEAP_SIZE));
        *(struct AllocNode **)MemNode = FreeListBucket[Bucket];
        FreeListBucket[Bucket] = (struct AllocNode *)MemNode;
    }
    else
    { 
        /* put it in the big memory freelist */
#ifdef DEBUG_HEAP
        printf("freeing %lx:%d to freelist\n", (unsigned long)Mem, MemNode->Size);
#endif
        assert(FreeListBig == NULL || ((unsigned long)FreeListBig >= (unsigned long)&HeapMemory[0] && (unsigned char *)FreeListBig - &HeapMemory[0] < HEAP_SIZE));
        MemNode->NextFree = FreeListBig;
        FreeListBig = MemNode;
#ifdef DEBUG_HEAP
        ShowBigList();
#endif
    }
#endif
}
Пример #26
0
s8_t uip_ipfrag(struct uip_pbuf *p,struct uip_netif *netif,struct uip_ip_addr *ipaddr)
{
	struct uip_pbuf *rambuf;
	struct uip_pbuf *header;
	struct uip_ip_hdr *iphdr;
	u16_t left,cop,ofo,omf,last,tmp;
	u16_t mtu = netif->mtu;
	u16_t poff = UIP_IP_HLEN;
	u16_t nfb = 0;

	rambuf = uip_pbuf_alloc(UIP_PBUF_LINK,0,UIP_PBUF_REF);
	rambuf->tot_len = rambuf->len = mtu;
	rambuf->payload = MEM_ALIGN(buf);

	iphdr = rambuf->payload;
	UIP_MEMCPY(iphdr,p->payload,UIP_IP_HLEN);

	tmp = ntohs(UIP_IPH_OFFSET(iphdr));
	ofo = tmp&UIP_IP_OFFMASK;
	omf = tmp&UIP_IP_MF;

	left = p->tot_len - UIP_IP_HLEN;
	while(left) {
		last = (left<=(mtu-UIP_IP_HLEN));

		ofo += nfb;
		tmp = omf|(UIP_IP_OFFMASK&ofo);

		if(!last) tmp |= UIP_IP_MF;
		UIP_IPH_OFFSET_SET(iphdr,htons(tmp));

		nfb = (mtu - UIP_IP_HLEN)/8;
		cop = last?left:nfb*8;

		p = uip_copyfrom_pbuf(p,&poff,(u8_t*)iphdr+UIP_IP_HLEN,cop);

		UIP_IPH_LEN_SET(iphdr,htons(cop+UIP_IP_HLEN));
		UIP_IPH_CHKSUM_SET(iphdr,0);
		UIP_IPH_CHKSUM_SET(iphdr,uip_ipchksum(iphdr,UIP_IP_HLEN));

		if(last) uip_pbuf_realloc(rambuf,left+UIP_IP_HLEN);

		header = uip_pbuf_alloc(UIP_PBUF_LINK,0,UIP_PBUF_RAM);
		uip_pbuf_chain(header,rambuf);
		netif->output(netif,header,ipaddr);
		uip_pbuf_free(header);

		left -= cop;
	}
	uip_pbuf_free(rambuf);
	return UIP_ERR_OK;
}
Пример #27
0
/* free some space at the top of the stack */
int HeapPopStack(void *Addr, int Size)
{
    int ToLose = MEM_ALIGN(Size);
    if (ToLose > ((char *)HeapStackTop - (char *)&HeapMemory[0]))
        return FALSE;
    
#ifdef DEBUG_HEAP
    printf("HeapPopStack(0x%lx, %ld) back to 0x%lx\n", (unsigned long)Addr, (unsigned long)MEM_ALIGN(Size), (unsigned long)HeapStackTop - ToLose);
#endif
    HeapStackTop = (void *)((char *)HeapStackTop - ToLose);
    assert(Addr == NULL || HeapStackTop == Addr);
    
    return TRUE;
}
Пример #28
0
void mem_validate_block(void *p){
    mheader_t *h;
    if (! p)
        return;

    /* Make the header pointer. */
    h = H_OF( p );
    assert((size_t)h == MEM_ALIGN(h));
    mem_pool_t *m = h->pool;
    assert(uos_valid_memory_address(m));
    assert( (size_t)h >= (size_t)m->store );
    assert( ((size_t)h + h->size) <= ((size_t)m->store)+m->size );
    assert(h->magic == MEMORY_BLOCK_MAGIC);
           //, "mem block $%x have bad magic\n", p);
}
Пример #29
0
void mem_validate_hole(mem_pool_t *m, mheader_t *h){
    assert( (size_t)h >= (size_t)m->store );
    assert( ((size_t)h + h->size) <= (((size_t)m->store)+m->size) );
    assert((size_t)h == MEM_ALIGN(h));

    if (h->magic != MEMORY_HOLE_MAGIC) {
        debug_printf ("mem: bad hole magic at 0x%x\n", h);
        debug_printf ("     size=%d, pool=%p\n", h->size, h->pool);
        uos_halt(1);
    }
    if (h->pool != m) {
        debug_printf ("mem: incorect pool pointer=%p, must be %p\n",
            h->pool, m);
        uos_halt(1);
    }
}
Пример #30
0
static mem_block_t* alloc_block(u32 size)
{
    u32 minsize = size + sizeof(mem_block_t);
    minsize = MEM_ALIGN(minsize, BLOCK_SIZE);

    u32 pagenum = minsize / (4094 * 1024);
    mem_block_t* block = (mem_block_t*)alloc_pages(pagenum);
    if (block == NULL)
        return NULL;

    block->size = minsize;
    block->current = block + sizeof(mem_block_t);
    block->end = block + minsize;
    block->next = NULL;

    return block;
}