Esempio n. 1
0
/* Visualize the chunk as being partitioned into blocks of 256 bytes from the
   highest address of the chunk, downwards.  The beginning of each block tells
   us the size of the previous block, up to the actual size of the requested
   memory.  Our magic byte is right at the end of the requested size, so we
   must reach it with this iteration, otherwise we have witnessed a memory
   corruption.  */
static size_t
malloc_check_get_size (mchunkptr p)
{
  size_t size;
  unsigned char c;
  unsigned char magic = MAGICBYTE (p);

  assert (using_malloc_checking == 1);

  for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ);
       (c = ((unsigned char *) p)[size]) != magic;
       size -= c)
    {
      if (c <= 0 || size < (c + 2 * SIZE_SZ))
        {
          malloc_printerr (check_action, "malloc_check_get_size: memory corruption",
                           chunk2mem (p));
          return 0;
        }
    }

  /* chunk2mem size.  */
  return size - 2 * SIZE_SZ;
}
Esempio n. 2
0
static Void_t*
pos_int_malloc(char *name, mstate av, size_t bytes)
{
	INTERNAL_SIZE_T nb;
	unsigned int idx;
	mbinptr bin;

	mchunkptr victim;
	INTERNAL_SIZE_T size;
	int victim_index;

	mchunkptr remainder;
	unsigned long remainder_size;

	unsigned int block;
	unsigned int bit;
	unsigned int map;

	mchunkptr fwd;
	mchunkptr bck;

	//const char *errstr = NULL;

	size_t pagemask  = PAGESIZE - 1;
	
	//16바이트 단위로 정렬
	checked_request2size(bytes, nb);
	#if MALLOC_DEBUG == 1 
	printf("before fastbin\n") ; 
	#endif
	// 1. fast bin (<=144)

	// 1. fast bin (<=144)
/*	if ((unsigned long)(nb) <= (unsigned long)(get_max_fast())) {
		idx = fastbin_index(nb);
	//	printf("idx = %d\n" , idx) ; 	
		mfastbinptr* fb = &fastbin(av, idx);
		victim = *fb;

		if (victim != 0) {

		if (fastbin_index (chunksize (victim)) != idx) {
			errstr = "malloc(): memory corruption (fast)";
errout:
			malloc_printerr (check_action, errstr, chunk2mem (victim));
		}

#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)victim->fd);
#else
			*fb = victim->fd;
#endif

			void *p = chunk2mem(victim);
			return p;
		}
	}
*/
	// 2. small bin (<=1008)
	if (in_smallbin_range(nb)) {
	#if MALLOC_DEBUG == 1
	printf("inside smallbin if\n") ; 	
	#endif 	

		idx = smallbin_index(nb);
		bin = bin_at(av,idx);

		victim = last(bin);
		if ( victim != bin && victim != NULL ) {

			bck = victim->bk;

/*			if (bck->fd != victim) {
				errstr = "malloc(): smallbin double linked list corrupted";
				goto errout;
			}*/

#if CONSISTENCY == 1
			set_inuse_bit_at_offset_log(name, victim, nb);
			POS_WRITE_VAUE(name, (unsigned long *)&bin->bk, (unsigned long)bck);
			POS_WRITE_VAUE(name, (unsigned long *)&bin->fd, (unsigned long)bin);
#else
			set_inuse_bit_at_offset(victim, nb);
			bin->bk = bck;
			bck->fd = bin;
#endif

			void *p = chunk2mem(victim);
			return p;
		}
	}
	else {
		idx = largebin_index(nb);

		if (have_fastchunks(av)) {
			pos_malloc_consolidate(name, av);
		}
	}

	#if MALLOC_DEBUG==1
	printf("before unsorted bin\n") ; 	
	#endif 
	for(;;) {
		int iters = 0;
		// 3. unsorted bin
		while ((victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
			bck = victim->bk;
			
			/*if (victim->size <= 2 * SIZE_SZ || victim->size > av->system_mem)
				malloc_printerr (check_action, "malloc(): memory corruption", chunk2mem (victim));*/
			size = chunksize(victim);

			if (in_smallbin_range(nb) &&
			   bck == unsorted_chunks(av) &&
			   victim == av->last_remainder &&
			   (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {

				remainder_size = size - nb;
				remainder = chunk_at_offset(victim, nb);
#if CONSISTENCY == 1
				POS_WRITE_VAUE(name, (unsigned long *)&unsorted_chunks(av)->bk, (unsigned long)remainder);
				POS_WRITE_VAUE(name, (unsigned long *)&unsorted_chunks(av)->fd, (unsigned long)remainder);
				POS_WRITE_VAUE(name, (unsigned long *)&av->last_remainder, (unsigned long)remainder);
#else
				unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
				av->last_remainder = remainder;
#endif
				remainder->bk = remainder->fd = unsorted_chunks(av);
				if (!in_smallbin_range(remainder_size)) {
					remainder->fd_nextsize = NULL;
					remainder->bk_nextsize = NULL;
				}

// Remainder dosen't need logging...
				if (chunk_is_last(victim))
					set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
				else
					set_head(remainder, remainder_size | PREV_INUSE);

				// set PREV_INUSE flag..
#if CONSISTENCY == 1
				if (chunk_is_first(victim)) {
					set_head_log(name, victim, nb | FIRST_CHUNK | PREV_INUSE);
				} else {
					set_head_log(name, victim, nb | PREV_INUSE);
				}
				
				set_foot_log(name, remainder, remainder_size);
#else
				if (chunk_is_first(victim))
					set_head(victim, nb | FIRST_CHUNK | PREV_INUSE);
				else
					set_head(victim, nb | PREV_INUSE);

				set_foot(remainder, remainder_size);
#endif

				void *p = chunk2mem(victim);
				return p;
			}

#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)&unsorted_chunks(av)->bk, (unsigned long)bck);
			POS_WRITE_VAUE(name, (unsigned long *)&bck->fd, (unsigned long)unsorted_chunks(av));
#else
			unsorted_chunks(av)->bk = bck;
			bck->fd = unsorted_chunks(av);
#endif

			if (size == nb) {
#if CONSISTENCY == 1
				set_inuse_bit_at_offset_log(name, victim, size);
#else
				set_inuse_bit_at_offset(victim, size);
#endif

				void *p = chunk2mem(victim);
				return p;
			}
			if (in_smallbin_range(size)) {
				victim_index = smallbin_index(size);
				bck = bin_at(av, victim_index);
				fwd = bck->fd;
			}
			else {
				victim_index = largebin_index(size);
				bck = bin_at(av, victim_index);
				fwd = bck->fd;

				if (fwd != bck) {
					size |= PREV_INUSE; //In order not to use chunksize()
					if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
						fwd = bck;
						bck = bck->bk;

// Current victim was in the unsorted bin that fd_nextsize dosen't need.. so, we don't leave log.. (We don't leave log for fd_nextsize below..)
						victim->fd_nextsize = fwd->fd;
						victim->bk_nextsize = fwd->fd->bk_nextsize;
#if CONSISTENCY == 1
						POS_WRITE_VAUE(name, (unsigned long *)&fwd->fd->bk_nextsize, (unsigned long)victim);
						POS_WRITE_VAUE(name, (unsigned long *)&victim->bk_nextsize->fd_nextsize, (unsigned long)victim);
#else
						fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
#endif
					}
					else {
						while ((unsigned long) size < fwd->size) {
							fwd = fwd->fd_nextsize;
						}

						if ((unsigned long) size == (unsigned long) fwd->size)
							fwd = fwd->fd;
						else {
							victim->fd_nextsize = fwd;
							victim->bk_nextsize = fwd->bk_nextsize;
#if CONSISTENCY == 1
							POS_WRITE_VAUE(name, (unsigned long *)&fwd->bk_nextsize, (unsigned long)victim);
							POS_WRITE_VAUE(name, (unsigned long *)&victim->bk_nextsize->fd_nextsize, (unsigned long)victim);
#else
							fwd->bk_nextsize = victim;
							victim->bk_nextsize->fd_nextsize = victim;
#endif
						}
						bck = fwd->bk;
					}
				} 
				else
					victim->fd_nextsize = victim->bk_nextsize = victim;
			}

#if CONSISTENCY == 1
			mark_bin_log(name, av, victim_index);
			POS_WRITE_VAUE(name, (unsigned long *)&victim->bk, (unsigned long)bck);
			POS_WRITE_VAUE(name, (unsigned long *)&victim->fd, (unsigned long)fwd);
			POS_WRITE_VAUE(name, (unsigned long *)&fwd->bk, (unsigned long)victim);
			POS_WRITE_VAUE(name, (unsigned long *)&bck->fd, (unsigned long)victim);
#else
			mark_bin(av, victim_index);
			victim->bk = bck;
			victim->fd = fwd;
			fwd->bk = victim;
			bck->fd = victim;
#endif
#define MAX_ITERS	10000
			if (++iters >= MAX_ITERS)
				break;
		}
		#if MALLOC_DEBUG == 1
		printf("before large bin\n") ;
		#endif 
		// 4. large bin (1024<=)
		if (!in_smallbin_range(nb)) {
			bin = bin_at(av, idx);
#if MALLOC_DEBUG
			printf(" bin = [%p]\n " ,bin ) ; 	
#endif
			if ((victim = first(bin)) != bin &&
			   (unsigned long)(victim->size) >= (unsigned long)(nb)) {

				victim = victim->bk_nextsize;
				while (((unsigned long)(size = chunksize(victim)) < (unsigned long)(nb)))
					victim = victim->bk_nextsize;

				//if (victim != last(bin) && victim->size == victim->fd->size)
				if (victim != last(bin) && chunksize(victim) == chunksize(victim->fd))
					victim = victim->fd;

				remainder_size = size - nb;
#if CONSISTENCY == 1
				unlink_log(name, victim, bck, fwd);
#else
				unlink(victim, bck, fwd);
#endif

				if (remainder_size < MINSIZE)  {
#if CONSISTENCY == 1
					set_inuse_bit_at_offset_log(name, victim, size);
#else
					set_inuse_bit_at_offset(victim, size);
#endif
				}
				else {
					remainder = chunk_at_offset(victim, nb);

#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, remainder, bck, fwd, remainder_size);
#else
					insert_to_unsorted(av, remainder, bck, fwd, remainder_size);
#endif

// Remainder dosen't need logging...
					if (chunk_is_last(victim))
						set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
					else
						set_head(remainder, remainder_size | PREV_INUSE);

					// set PREV_INUSE flag..
#if CONSISTENCY == 1
					if (chunk_is_first(victim)) {
						set_head_log(name, victim, nb | FIRST_CHUNK | PREV_INUSE);
					} else {
						set_head_log(name, victim, nb | PREV_INUSE);
					}
					
					set_foot_log(name, remainder, remainder_size);
#else
					if (chunk_is_first(victim))
						set_head(victim, nb | FIRST_CHUNK | PREV_INUSE);
					else
						set_head(victim, nb | PREV_INUSE);

					set_foot(remainder, remainder_size);
#endif
				}

				void *p = chunk2mem(victim);
				return p;
			}
			
		}
		++idx;
		bin = bin_at(av,idx);
		block = idx2block(idx);
		map = av->binmap[block];
	 	bit = idx2bit(idx);
#if MALLOC_DEBUG == 1
		printf("DDD\n") ;
#endif
		for (;;) {
			if (bit > map || bit == 0) {
				do {
					if (++block >= BINMAPSIZE){
						goto new_alloc;
					}
				} while ( (map = av->binmap[block]) == 0);

				bin = bin_at(av, (block << BINMAPSHIFT));
				bit = 1;
			}
			while ((bit & map) == 0) {
				bin = next_bin(bin);
				bit <<= 1;
			}
#if MALLOC_DEBUG == 1
			printf("before victim\n") ; 
#endif 
			victim = last(bin);

			if (victim == bin) {
#if MALLOC_DEBUG == 1
				printf("victim == bin\n") ; 
#endif 
#if CONSISTENCY == 1
				POS_WRITE_VAUE(name, (unsigned long *)&av->binmap[block], (unsigned long)(map &~bit));
#else
				av->binmap[block] = map &= ~bit;
#endif
				bin = next_bin(bin);
		        	bit <<= 1;
			}
			else {
#if MALLOC_DEBUG == 1
				printf("victim != bin\n") ; 
#endif
				size = chunksize(victim);

				remainder_size = size - nb;

#if CONSISTENCY == 1
				unlink_log(name, victim, bck, fwd);
#else
				unlink(victim, bck, fwd);
#endif
#if MALLOC_DEBUG == 1
				printf("unlink\n") ; 
#endif 

				if (remainder_size < MINSIZE) {
#if CONSISTENCY == 1
					set_inuse_bit_at_offset_log(name, victim, size);
#else
#if MALLOC_DEBUG == 1
					printf("D\n") ;
					printf("victim : %p\n", victim) ; 	
					printf("size: %d\n" ,size) ; 
#endif
					set_inuse_bit_at_offset(victim, size);
#endif
				}
				else {
#if MALLOC_DEBUG == 1
					printf("remainder\n") ; 	
#endif
					remainder = chunk_at_offset(victim, nb);

#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, remainder, bck, fwd, remainder_size);
#else
#if MALLOC_DEBUG == 1
					printf("av=%p\n",av) ; 	
					printf("%p %p %p\n", remainder , bck , fwd);
					printf("%d\n" , remainder_size) ;
#endif 
					insert_to_unsorted(av, remainder, bck, fwd, remainder_size);
#endif

#if MALLOC_DEBUG == 1
					printf("%p %p %p\n", remainder , bck , fwd);

					printf("s\n") ; 
#endif
					if (in_smallbin_range(nb)) {
#if CONSISTENCY == 1
						POS_WRITE_VAUE(name, (unsigned long *)&av->last_remainder, (unsigned long)remainder);
#else
						av->last_remainder = remainder;
#endif
					}
#if MALLOC_DEBUG == 1
				printf("s\n") ; 
#endif 
					if (chunk_is_last(victim))
						set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
					else
						set_head(remainder, remainder_size | PREV_INUSE);

					// set PREV_INUSE flag..
#if CONSISTENCY == 1
					if (chunk_is_first(victim)) {
						set_head_log(name, victim, nb | FIRST_CHUNK | PREV_INUSE);
					} else {
						set_head_log(name, victim, nb | PREV_INUSE);
					}
					
#if MALLOC_DEBUG == 1
				printf("s\n") ; 
#endif 
					set_foot_log(name, remainder, remainder_size);
#else
					if (chunk_is_first(victim))
						set_head(victim, nb | FIRST_CHUNK | PREV_INUSE);
					else
						set_head(victim, nb | PREV_INUSE);

					set_foot(remainder, remainder_size);
#endif
				}
#if MALLOC_DEBUG == 1 
				printf("s\n") ; 
#endif
				void *p = chunk2mem(victim);
				return p;
			}
		}
new_alloc:
		#if MALLOC_DEBUG == 1
		printf("before newallocation\n");
		#endif 
		// 6. new allocation
		size = (nb + MINSIZE +2*SIZE_SZ + pagemask) & ~pagemask;
		size += DEFAULT_PAD;
		//char* mm = (char*)(SEG_ALLOC(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
		char *mm = (char *)pos_seg_alloc(name, size);
		
		memset(mm , 0 , size);
		#if MALLOC_DEBUG == 1
		printf(" mm = %p\n" , mm) ; 	
		#endif 
	
#if CONSISTENCY == 1
		pos_log_insert_malloc_free(name, (unsigned long)mm, size);
#endif
		//if (mm != MAP_FAILED) {
		if (mm != (char *)0) {
#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem+size));
#else
			av->system_mem += size;
#endif

		
//			printf("D!\n") ;
			mchunkptr p;

			p = (mchunkptr)mm;

			remainder_size = size - nb - 2*SIZE_SZ;
			remainder = chunk_at_offset(p, nb);

#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, remainder, bck, fwd, remainder_size);
#else
			insert_to_unsorted(av, remainder, bck, fwd, remainder_size);
#endif

			/*if (in_smallbin_range(nb))
				av->last_remainder = remainder;*/

			// set PREV_INUSE flag..
//#if CONSISTENCY == 1
//#elseif
			set_head(p, nb | FIRST_CHUNK | PREV_INUSE);
			set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);

			set_foot(remainder, remainder_size);
			clear_inuse_bit_at_offset(remainder, remainder_size);
//#endif

			//return p;
			return chunk2mem(p);
		} 
		else
			return 0;
  
	}
}
Esempio n. 3
0
////////////////////////////////////////
// WARNING!: pos_realloc has error!. FIX UP!
////////////////////////////////////////
Void_t*
pos_int_realloc(char *name, mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
	     INTERNAL_SIZE_T nb)
{
	mchunkptr newp;				/* chunk to return */
	INTERNAL_SIZE_T newsize;		/* its size */
	Void_t* newmem;				/* corresponding user mem */

	mchunkptr next;				/* next contiguous chunk after oldp */

	mchunkptr remainder;			/* extra space at end of newp */
	unsigned long remainder_size;	/* its size */

	mchunkptr bck;				/* misc temp for linking */
	mchunkptr fwd;				/* misc temp for linking */

	unsigned long copysize;		/* bytes to copy */
	unsigned int ncopies;			/* INTERNAL_SIZE_T words to copy */
	INTERNAL_SIZE_T* s;			/* copy source */
	INTERNAL_SIZE_T* d;			/* copy destination */

	const char *errstr = NULL;


	/* oldmem size */
	/*if (oldp->size <= 2 * SIZE_SZ || oldsize >= av->system_mem) {
		errstr = "realloc(): invalid old size";
errout:
		malloc_printerr (check_action, errstr, chunk2mem(oldp));
		return NULL;
	}*/

	next = chunk_at_offset(oldp, oldsize);
	INTERNAL_SIZE_T nextsize = chunksize(next);
	/*if (next->size <= 2 * SIZE_SZ || nextsize >= av->system_mem) {
		errstr = "realloc(): invalid next size";
			goto errout;
	}*/

	//old size 보다 작을 경우
	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
		/* already big enough; split below */
		newp = oldp;
		newsize = oldsize;
	}

	//old size 보다 클 경우
	else {

		/* Try to expand forward into next chunk;  split off remainder below */
		if (!inuse(next) &&
			(unsigned long)(newsize = oldsize + nextsize) >= (unsigned long)(nb)) {
			newp = oldp;
			unlink(next, bck, fwd);
		}

		/* allocate, copy, free */
		else {
			newmem = pos_int_malloc(name, av, nb - MALLOC_ALIGN_MASK);
			if (newmem == 0)
				return 0; /* propagate failure */

			newp = mem2chunk(newmem);
			newsize = chunksize(newp);

			/*
			  Avoid copy if newp is next chunk after oldp.
			*/
			if (newp == next) {
				newsize += oldsize;
				newp = oldp;
			}
			else {
				/*
				  Unroll copy of <= 36 bytes (72 if 8byte sizes)
				  We know that contents have an odd number of
				  INTERNAL_SIZE_T-sized words; minimally 3.
				*/

				copysize = oldsize - SIZE_SZ;
				s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
				d = (INTERNAL_SIZE_T*)(newmem);
				ncopies = copysize / sizeof(INTERNAL_SIZE_T);

				if (ncopies > 9)
					memcpy(d, s, copysize);
				else {
					*(d+0) = *(s+0);
					*(d+1) = *(s+1);
					*(d+2) = *(s+2);
					if (ncopies > 4) {
						*(d+3) = *(s+3);
						*(d+4) = *(s+4);
						if (ncopies > 6) {
							*(d+5) = *(s+5);
							*(d+6) = *(s+6);
							if (ncopies > 8) {
								*(d+7) = *(s+7);
								*(d+8) = *(s+8);
							}
						}
					}
				}

				pos_int_free(name, av, oldp, 1);

				return chunk2mem(newp);
			}
		}
	}

	/* If possible, free extra space in old or extended chunk */

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
		set_head_size(newp, newsize);
		set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
		remainder = chunk_at_offset(newp, nb);

		if (chunk_is_last(newp))
			set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
		else
			set_head(remainder, remainder_size | PREV_INUSE);

		// set PREV_INUSE flag..
		if (chunk_is_first(newp))
			set_head(newp, nb | FIRST_CHUNK | PREV_INUSE);
		else
			set_head(newp, nb | PREV_INUSE);
		
		//set_head_size(newp, nb);
		//set_head(remainder, remainder_size | PREV_INUSE |(av != &main_arena ? NON_MAIN_ARENA : 0));

		/* Mark remainder as inuse so free() won't complain */
		set_inuse_bit_at_offset(remainder, remainder_size);
		pos_int_free(name, av, remainder, 1);
	}

	return chunk2mem(newp);
}
Esempio n. 4
0
/* ------------------------------ realloc ------------------------------ */
void* ulibc_realloc(void* oldmem, size_t bytes)
{
    mstate av;

    size_t  nb;              /* padded request size */

    mchunkptr        oldp;            /* chunk corresponding to oldmem */
    size_t  oldsize;         /* its size */

    mchunkptr        newp;            /* chunk to return */
    size_t  newsize;         /* its size */
    void*          newmem;          /* corresponding user mem */

    mchunkptr        next;            /* next contiguous chunk after oldp */

    mchunkptr        remainder;       /* extra space at end of newp */
    unsigned long     remainder_size;  /* its size */

    mchunkptr        bck;             /* misc temp for linking */
    mchunkptr        fwd;             /* misc temp for linking */

    unsigned long     copysize;        /* bytes to copy */
    unsigned int     ncopies;         /* size_t words to copy */
    size_t* s;               /* copy source */
    size_t* d;               /* copy destination */

    void *retval;

    /* Check for special cases.  */
    if (! oldmem)
	return ulibc_malloc(bytes);
    if (! bytes) {
	ulibc_free (oldmem);
	return NULL;
    }

    av = get_malloc_state();
    checked_request2size(bytes, nb);

    oldp    = mem2chunk(oldmem);
    oldsize = chunksize(oldp);

    check_inuse_chunk(oldp);

    if (!chunk_is_mmapped(oldp)) {

	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
	    /* already big enough; split below */
	    newp = oldp;
	    newsize = oldsize;
	}

	else {
	    next = chunk_at_offset(oldp, oldsize);

	    /* Try to expand forward into top */
	    if (next == av->top &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb + MINSIZE)) {
		set_head_size(oldp, nb);
		av->top = chunk_at_offset(oldp, nb);
		set_head(av->top, (newsize - nb) | PREV_INUSE);
		retval = chunk2mem(oldp);
		goto DONE;
	    }

	    /* Try to expand forward into next chunk;  split off remainder below */
	    else if (next != av->top &&
		    !inuse(next) &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb)) {
		newp = oldp;
		unlink(next, bck, fwd);
	    }

	    /* allocate, copy, free */
	    else {
		newmem = malloc(nb - MALLOC_ALIGN_MASK);
		if (newmem == 0) {
		    retval = 0; /* propagate failure */
		    goto DONE;
		}

		newp = mem2chunk(newmem);
		newsize = chunksize(newp);

		/*
		   Avoid copy if newp is next chunk after oldp.
		   */
		if (newp == next) {
		    newsize += oldsize;
		    newp = oldp;
		}
		else {
		    /*
		       Unroll copy of <= 36 bytes (72 if 8byte sizes)
		       We know that contents have an odd number of
		       size_t-sized words; minimally 3.
		       */

		    copysize = oldsize - (sizeof(size_t));
		    s = (size_t*)(oldmem);
		    d = (size_t*)(newmem);
		    ncopies = copysize / sizeof(size_t);
		    assert(ncopies >= 3);

		    if (ncopies > 9)
			memcpy(d, s, copysize);

		    else {
			*(d+0) = *(s+0);
			*(d+1) = *(s+1);
			*(d+2) = *(s+2);
			if (ncopies > 4) {
			    *(d+3) = *(s+3);
			    *(d+4) = *(s+4);
			    if (ncopies > 6) {
				*(d+5) = *(s+5);
				*(d+6) = *(s+6);
				if (ncopies > 8) {
				    *(d+7) = *(s+7);
				    *(d+8) = *(s+8);
				}
			    }
			}
		    }

		    ulibc_free(oldmem);
		    check_inuse_chunk(newp);
		    retval = chunk2mem(newp);
		    goto DONE;
		}
	    }
	}

	/* If possible, free extra space in old or extended chunk */

	assert((unsigned long)(newsize) >= (unsigned long)(nb));

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
	    set_head_size(newp, newsize);
	    set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
	    remainder = chunk_at_offset(newp, nb);
	    set_head_size(newp, nb);
	    set_head(remainder, remainder_size | PREV_INUSE);
	    /* Mark remainder as inuse so free() won't complain */
	    set_inuse_bit_at_offset(remainder, remainder_size);
	    ulibc_free(chunk2mem(remainder));
	}

	check_inuse_chunk(newp);
	retval = chunk2mem(newp);
	goto DONE;
    }

    /*
       Handle mmap cases
       */

    else {
	size_t offset = oldp->prev_size;
	size_t pagemask = av->pagesize - 1;
	char *cp;
	unsigned long  sum;

	/* Note the extra (sizeof(size_t)) overhead */
	newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;

	/* don't need to remap if still within same page */
	if (oldsize == newsize - offset) {
	    retval = oldmem;
	    goto DONE;
	}

	cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);

	if (cp != (char*)MORECORE_FAILURE) {

	    newp = (mchunkptr)(cp + offset);
	    set_head(newp, (newsize - offset)|IS_MMAPPED);

	    assert(aligned_OK(chunk2mem(newp)));
	    assert((newp->prev_size == offset));

	    /* update statistics */
	    sum = av->mmapped_mem += newsize - oldsize;
	    if (sum > (unsigned long)(av->max_mmapped_mem))
		av->max_mmapped_mem = sum;
	    sum += av->sbrked_mem;
	    if (sum > (unsigned long)(av->max_total_mem))
		av->max_total_mem = sum;

	    retval = chunk2mem(newp);
	    goto DONE;
	}

	/* Note the extra (sizeof(size_t)) overhead. */
	if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
	    newmem = oldmem; /* do nothing */
	else {
	    /* Must alloc, copy, free. */
	    newmem = malloc(nb - MALLOC_ALIGN_MASK);
	    if (newmem != 0) {
		memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
		ulibc_free(oldmem);
	    }
	}
	retval = newmem;
    }

 DONE:
    return retval;
}
Esempio n. 5
0
static void*
realloc_check(void* oldmem, size_t bytes, const void *caller)
{
  INTERNAL_SIZE_T nb;
  void* newmem = 0;
  unsigned char *magic_p;

  if (bytes+1 == 0) {
    __set_errno (ENOMEM);
    return NULL;
  }
  if (oldmem == 0) return malloc_check(bytes, NULL);
  if (bytes == 0) {
    free_check (oldmem, NULL);
    return NULL;
  }
  (void)mutex_lock(&main_arena.mutex);
  const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p);
  (void)mutex_unlock(&main_arena.mutex);
  if(!oldp) {
    malloc_printerr(check_action, "realloc(): invalid pointer", oldmem);
    return malloc_check(bytes, NULL);
  }
  const INTERNAL_SIZE_T oldsize = chunksize(oldp);

  checked_request2size(bytes+1, nb);
  (void)mutex_lock(&main_arena.mutex);

  if (chunk_is_mmapped(oldp)) {
#if HAVE_MREMAP
    mchunkptr newp = mremap_chunk(oldp, nb);
    if(newp)
      newmem = chunk2mem(newp);
    else
#endif
    {
      /* Note the extra SIZE_SZ overhead. */
      if(oldsize - SIZE_SZ >= nb)
	newmem = oldmem; /* do nothing */
      else {
	/* Must alloc, copy, free. */
	if (top_check() >= 0)
	  newmem = _int_malloc(&main_arena, bytes+1);
	if (newmem) {
	  MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
	  munmap_chunk(oldp);
	}
      }
    }
  } else {
    if (top_check() >= 0) {
      INTERNAL_SIZE_T nb;
      checked_request2size(bytes + 1, nb);
      newmem = _int_realloc(&main_arena, oldp, oldsize, nb);
    }
  }

  /* mem2chunk_check changed the magic byte in the old chunk.
     If newmem is NULL, then the old chunk will still be used though,
     so we need to invert that change here.  */
  if (newmem == NULL) *magic_p ^= 0xFF;

  (void)mutex_unlock(&main_arena.mutex);

  return mem2mem_check(newmem, bytes);
}
Esempio n. 6
0
//This function is equal to mspace_malloc
//replacing PREACTION with 0 and POSTACTION with nothing
void* mspace_malloc_lockless(mspace msp, size_t bytes)
{
  mstate ms = (mstate)msp;
  if (!ok_magic(ms)) {
    USAGE_ERROR_ACTION(ms,ms);
    return 0;
  }
    if (!0){//PREACTION(ms)) {
    void* mem;
    size_t nb;
    if (bytes <= MAX_SMALL_REQUEST) {
      bindex_t idx;
      binmap_t smallbits;
      nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
      idx = small_index(nb);
      smallbits = ms->smallmap >> idx;

      if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
        mchunkptr b, p;
        idx += ~smallbits & 1;       /* Uses next bin if idx empty */
        b = smallbin_at(ms, idx);
        p = b->fd;
        assert(chunksize(p) == small_index2size(idx));
        unlink_first_small_chunk(ms, b, p, idx);
        set_inuse_and_pinuse(ms, p, small_index2size(idx));
        mem = chunk2mem(p);
        check_malloced_chunk(ms, mem, nb);
        goto postaction;
      }

      else if (nb > ms->dvsize) {
        if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
          mchunkptr b, p, r;
          size_t rsize;
          bindex_t i;
          binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
          binmap_t leastbit = least_bit(leftbits);
          compute_bit2idx(leastbit, i);
          b = smallbin_at(ms, i);
          p = b->fd;
          assert(chunksize(p) == small_index2size(i));
          unlink_first_small_chunk(ms, b, p, i);
          rsize = small_index2size(i) - nb;
          /* Fit here cannot be remainderless if 4byte sizes */
          if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
            set_inuse_and_pinuse(ms, p, small_index2size(i));
          else {
            set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
            r = chunk_plus_offset(p, nb);
            set_size_and_pinuse_of_free_chunk(r, rsize);
            replace_dv(ms, r, rsize);
          }
          mem = chunk2mem(p);
          check_malloced_chunk(ms, mem, nb);
          goto postaction;
        }

        else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
          check_malloced_chunk(ms, mem, nb);
          goto postaction;
        }
      }
    }
Esempio n. 7
0
void* malloc(size_t       bytes)
{
  size_t       nb  = request2size(bytes);  /* padded request size */
  mbinptr      b   = size2bin(nb);         /* corresponding bin */
  mchunkptr    hd  = &(b->hd);             /* head of its list */
  mchunkptr    p   = hd->fd;               /* chunk traverser */

  UPDATE_STATS((requested_mem+=bytes, ++n_malloc_bins));

  /* Try a (near) exact match in own bin */
  /* clean out unusable but consolidatable chunks in bin while traversing */

  while (p != hd)
  {
    UPDATE_STATS(++n_malloc_chunks);
    if (p->size >= nb)
      goto found;
    else    /* try to consolidate; same code as malloc_find_space */
    {
      mchunkptr nextp = p->fd;       /* save, in case of relinks */
      int consolidated = 0;          /* only unlink/relink if consolidated */
      
      mchunkptr t;

      while (!inuse(t = prev_chunk(p))) /* consolidate backward */
      {
        if (!consolidated) { consolidated = 1; unlink(p); }
        if (t == nextp) nextp = t->fd;
        unlink(t);
        set_size(t, t->size + p->size);
        p = t;
        UPDATE_STATS (++n_consol);
      }
      
      while (!inuse(t = next_chunk(p))) /* consolidate forward */
      {
        if (!consolidated) { consolidated = 1; unlink(p); }
        if (t == nextp) nextp = t->fd;
        unlink(t);
        set_size(p, p->size + t->size);
        UPDATE_STATS (++n_consol);
      }
      
      if (consolidated)
      {
        if (p->size >= nb)
        {
          /* make it safe to unlink again below */
          UPDATE_STATS(++n_avail);
          p->fd = p->bk = p;
          goto found;
        }
        else
          consollink(p);
      }

      p = nextp;

    }
  }

  b->dirty = 0; /* true if got here */

  /*  Scan bigger bins for a victim */

  while (++b <= malloc_maxbin)
  {
    UPDATE_STATS(++n_malloc_bins);
    if ((p = b->hd.bk) != &(b->hd))    /* no need to check size */
      goto found;
  }

  /* Consolidate or sbrk */

  p = malloc_find_space(nb);

  if (p == 0) return 0; /* allocation failure */

 found:   /* Use what we found */

  unlink(p);
  split(p, nb); 
  UPDATE_STATS(do_malloc_stats(p));
  return chunk2mem(p);
}
Esempio n. 8
0
void* memalign(size_t       alignment, size_t       bytes)
{
  mchunkptr p;
  size_t       nb = request2size(bytes);

  /* find an alignment that both we and the user can live with: */
  /* least common multiple guarantees mutual happiness */
  size_t       align = lcm(alignment, MALLOC_MIN_OVERHEAD);
  size_t       mask = align - 1;

  /* call malloc with worst case padding to hit alignment; */
  /* we will give back extra */

  size_t       req = nb + align + MINSIZE;
  void* m = malloc(req);

  if (m == 0) return m;

  p = mem2chunk(m);

  /* keep statistics on track */

  UPDATE_STATS(--n_mallocs);
  UPDATE_STATS(malloced_mem -= p->size);
  UPDATE_STATS(requested_mem -= req);
  UPDATE_STATS(requested_mem += bytes);

  if (((int)(m) & (mask)) != 0) /* misaligned */
  {

    /* find an aligned spot inside chunk */

    mchunkptr ap = (mchunkptr)(( ((int)(m) + mask) & -align) - SIZE_SZ);

    size_t       gap = (size_t      )(ap) - (size_t      )(p);
    size_t       room;

    /* we need to give back leading space in a chunk of at least MINSIZE */

    if (gap < MINSIZE)
    {
      /* This works since align >= MINSIZE */
      /* and we've malloc'd enough total room */

      ap = (mchunkptr)( (int)(ap) + align );
      gap += align;    
    }

    if (gap + nb > p->size) /* can't happen unless chunk sizes corrupted */
      malloc_user_error();

    room = p->size - gap;

    /* give back leader */
    set_size(p, gap);
    consollink(p);

    /* use the rest */
    p = ap;
    set_size(p, room);
  }

  /* also give back spare room at the end */

  split(p, nb); 
  UPDATE_STATS(do_malloc_stats(p));
  return chunk2mem(p);

}
Esempio n. 9
0
static mstate
_int_new_arena (size_t size)
{
  mstate a;
  heap_info *h;
  char *ptr;
  unsigned long misalign;

  h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT),
                mp_.top_pad);
  if (!h)
    {
      /* Maybe size is too large to fit in a single heap.  So, just try
         to create a minimally-sized arena and let _int_malloc() attempt
         to deal with the large request via mmap_chunk().  */
      h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad);
      if (!h)
        return 0;
    }
  a = h->ar_ptr = (mstate) (h + 1);
  malloc_init_state (a);
  a->attached_threads = 1;
  /*a->next = NULL;*/
  a->system_mem = a->max_system_mem = h->size;

  /* Set up the top chunk, with proper alignment. */
  ptr = (char *) (a + 1);
  misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK;
  if (misalign > 0)
    ptr += MALLOC_ALIGNMENT - misalign;
  top (a) = (mchunkptr) ptr;
  set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE);

  LIBC_PROBE (memory_arena_new, 2, a, size);
  mstate replaced_arena = thread_arena;
  thread_arena = a;
  __libc_lock_init (a->mutex);

  __libc_lock_lock (list_lock);

  /* Add the new arena to the global list.  */
  a->next = main_arena.next;
  /* FIXME: The barrier is an attempt to synchronize with read access
     in reused_arena, which does not acquire list_lock while
     traversing the list.  */
  atomic_write_barrier ();
  main_arena.next = a;

  __libc_lock_unlock (list_lock);

  __libc_lock_lock (free_list_lock);
  detach_arena (replaced_arena);
  __libc_lock_unlock (free_list_lock);

  /* Lock this arena.  NB: Another thread may have been attached to
     this arena because the arena is now accessible from the
     main_arena.next list and could have been picked by reused_arena.
     This can only happen for the last arena created (before the arena
     limit is reached).  At this point, some arena has to be attached
     to two threads.  We could acquire the arena lock before list_lock
     to make it less likely that reused_arena picks this new arena,
     but this could result in a deadlock with
     __malloc_fork_lock_parent.  */

  __libc_lock_lock (a->mutex);

  return a;
}