예제 #1
0
파일: arena.c 프로젝트: Xilinx/eglibc
static int
internal_function
heap_trim(heap_info *heap, size_t pad)
{
  mstate ar_ptr = heap->ar_ptr;
  unsigned long pagesz = GLRO(dl_pagesize);
  mchunkptr top_chunk = top(ar_ptr), p, bck, fwd;
  heap_info *prev_heap;
  long new_size, top_size, extra, prev_size, misalign;

  /* Can this heap go away completely? */
  while(top_chunk == chunk_at_offset(heap, sizeof(*heap))) {
    prev_heap = heap->prev;
    prev_size = prev_heap->size - (MINSIZE-2*SIZE_SZ);
    p = chunk_at_offset(prev_heap, prev_size);
    /* fencepost must be properly aligned.  */
    misalign = ((long) p) & MALLOC_ALIGN_MASK;
    p = chunk_at_offset(prev_heap, prev_size - misalign);
    assert(p->size == (0|PREV_INUSE)); /* must be fencepost */
    p = prev_chunk(p);
    new_size = chunksize(p) + (MINSIZE-2*SIZE_SZ) + misalign;
    assert(new_size>0 && new_size<(long)(2*MINSIZE));
    if(!prev_inuse(p))
      new_size += p->prev_size;
    assert(new_size>0 && new_size<HEAP_MAX_SIZE);
    if(new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
      break;
    ar_ptr->system_mem -= heap->size;
    arena_mem -= heap->size;
    delete_heap(heap);
    heap = prev_heap;
    if(!prev_inuse(p)) { /* consolidate backward */
      p = prev_chunk(p);
      unlink(p, bck, fwd);
    }
    assert(((unsigned long)((char*)p + new_size) & (pagesz-1)) == 0);
    assert( ((char*)p + new_size) == ((char*)heap + heap->size) );
    top(ar_ptr) = top_chunk = p;
    set_head(top_chunk, new_size | PREV_INUSE);
    /*check_chunk(ar_ptr, top_chunk);*/
  }
  top_size = chunksize(top_chunk);
  extra = (top_size - pad - MINSIZE - 1) & ~(pagesz - 1);
  if(extra < (long)pagesz)
    return 0;
  /* Try to shrink. */
  if(shrink_heap(heap, extra) != 0)
    return 0;
  ar_ptr->system_mem -= extra;
  arena_mem -= extra;

  /* Success. Adjust top accordingly. */
  set_head(top_chunk, (top_size - extra) | PREV_INUSE);
  /*check_chunk(ar_ptr, top_chunk);*/
  return 1;
}
예제 #2
0
파일: pos-malloc.c 프로젝트: kunulee/f-stm
// Call unsafe Pointer Check
void pos_check_unsafe_pointer(char *name)
{
    struct malloc_state * av;
    struct pos_name_entry *name_entry;
    mchunkptr chunk_ptr;
    struct seg_info *head;
    int i = 1;

    av = (struct malloc_state *)pos_lookup_mstate(name);	
    if(av == NULL)   
        printf("Not Allocate Memory\n");

    name_entry = pos_lookup_name_entry(name);
    name_entry -> seg_head = (struct seg_info *)malloc(sizeof(struct seg_info) * 1024);
    memset(name_entry -> seg_head, 0, sizeof(struct seg_info) * 1024);

    //Search Seg_info System call
    syscall(306 , name , name_entry -> seg_head);
    head = name_entry -> seg_head;	

    // First Segment
    chunk_ptr = chunk_at_offset(av, sizeof(struct malloc_state));
    pos_check_unsafe_segment(name, av, head, chunk_ptr);

    // Other Segment
    while(head[i].addr != 0)
    {
        pos_check_unsafe_segment(name, av, head, (Void_t *)head[i].addr);
	i++;
    }
}
예제 #3
0
파일: pos-malloc.c 프로젝트: kunulee/f-stm
//static void 
void
pos_int_free(char *name, mstate av, mchunkptr p, int flag)
{
	INTERNAL_SIZE_T size;
	mfastbinptr* fb;
	mchunkptr prevchunk;
	INTERNAL_SIZE_T prevsize;
	mchunkptr nextchunk;
	INTERNAL_SIZE_T nextsize;
	int nextinuse;
	mchunkptr bck;
	mchunkptr fwd;

	//const char *errstr = NULL;


	size = chunksize(p);
	/*if ((uintptr_t) p > (uintptr_t) -size || misaligned_chunk (p)) {
		errstr = "free(): invalid pointer";
errout:
		//malloc_printerr (check_action, errstr, chunk2mem(p));
		return;
	}*/
	/*if (size < MINSIZE) {
		errstr = "free(): invalid size";
		goto errout;
	}*/

	//check_inuse_chunk(av, p);


	// fastbin
	if (flag==1 && (unsigned long)(size) <= (unsigned long)(get_max_fast ())) {
		/*if (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
		   || chunksize (chunk_at_offset (p, size)) >= av->system_mem) {
			errstr = "free(): invalid next size (fast)";
			goto errout;
		}*/

#if CONSISTENCY == 1
		set_fastchunks_log(name, av);
#else
		set_fastchunks(av);
#endif
		fb = &fastbin(av, fastbin_index(size));

		if (*fb == p) {
			//errstr = "double free or corruption (fasttop)";
			//goto errout;
			return ;
		}

#if CONSISTENCY == 1
		POS_WRITE_VAUE(name, (unsigned long *)&p->fd, (unsigned long)*fb);
		POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)p);
#else
		p->fd = *fb;
		*fb = p;
#endif

		return ;
	}

	// 1. First chunk
	if (chunk_is_first(p)) {

		nextchunk = next_chunk(p);
		nextsize = chunksize(nextchunk);

		// 1-1. (free F), free L
		if (chunk_is_last(nextchunk) && !inuse(nextchunk)) {

			//if (av < p && p < (char *)(av+PAGESIZE)){
			if ((char*)av+sizeof(struct malloc_state) == (char*)p) {
#if CONSISTENCY == 1
				insert_to_unsorted_log(name, av, p, bck, fwd, size);

				set_foot_log(name, p, size);
				clear_inuse_bit_at_offset_log(name, p, size);
#else
				insert_to_unsorted(av, p, bck, fwd, size);

				set_foot(p, size);
				clear_inuse_bit_at_offset(p, size);
#endif

				goto out;
			}
			else {
#if CONSISTENCY == 1
				unlink_log(name, nextchunk, bck, fwd);
				size = size + nextsize + 2*SIZE_SZ;

				pos_log_insert_malloc_free(name, (unsigned long)p, size);
				//pos_seg_free(name, (void *)p, size); // Delayed pos_seg_free
				POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
				unlink(nextchunk, bck, fwd);
				size = size + nextsize + 2*SIZE_SZ;
				/*if (size%PAGESIZE != 0) {
					errstr = "free(): unmmap size is not page size";
					goto errout;
				}*/
				//FREE((char*)p, size);
				pos_seg_free(name, (void *)p, size);
				av->system_mem -= size;
#endif

				goto out;
			}

		}

		// 1-3. (free F), free M
		else if (!inuse(nextchunk)) {
#if CONSISTENCY == 1
			unlink_log(name, nextchunk, bck, fwd);
			size += nextsize;

			insert_to_unsorted_log(name, av, p, bck, fwd, size);

			set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
			set_foot_log(name, p, size);
#else
			unlink(nextchunk, bck, fwd);
			size += nextsize;

			insert_to_unsorted(av, p, bck, fwd, size);

			set_head(p, size | FIRST_CHUNK | PREV_INUSE);
			set_foot(p, size);
#endif

			goto out;
		}

		// 1-2. (free F), inuse L & 1-4. (free F), inuse M
		else {
#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, p, bck, fwd, size);

			set_foot_log(name, p, size);
			clear_inuse_bit_at_offset_log(name, p, size);
#else
			insert_to_unsorted(av, p, bck, fwd, size);

			set_foot(p, size);
			clear_inuse_bit_at_offset(p, size);
#endif

			goto out;
		}

	}

	// 2. Last chunk
	else if (chunk_is_last(p)) {

		if (!prev_inuse(p)) {
			prevchunk = prev_chunk(p);
			prevsize = chunksize(prevchunk);

			// 2-1. free F, (free L)
			if (chunk_is_first(prevchunk)) {

				//if (av < prevchunk && prevchunk < av+PAGESIZE){
				if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) {
#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
					set_foot_log(name, p, size);
					clear_inuse_bit_at_offset_log(name, p, size);
#else
					insert_to_unsorted(av, p, bck, fwd, size);
  
					set_foot(p, size);
					clear_inuse_bit_at_offset(p, size);
#endif

					goto out;
				}
				else {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					size = prevsize+size+2*SIZE_SZ;
					//pos_seg_free(name, (void *)p, size);
					pos_log_insert_malloc_free(name, (unsigned long)p, size);
					POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
					unlink(prevchunk, bck, fwd);
					size = prevsize+size+2*SIZE_SZ;
					/*if (size%PAGESIZE != 0) {
						errstr = "free(): unmmap size is not page size";
						goto errout;
					}*/
					//FREE((char*)p, size);
					pos_seg_free(name, (void *)p, size);
					av->system_mem -= size;
#endif

					goto out;
				}

			}

			// 2-3. free M, (free L)
			else {
#if CONSISTENCY == 1
				unlink_log(name, prevchunk, bck, fwd);
				size += prevsize;
				p = chunk_at_offset(p, -((long) prevsize));
  
				insert_to_unsorted_log(name, av, p, bck, fwd, size);

				set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE);
				set_foot_log(name, p, size);
				clear_inuse_bit_at_offset_log(name, p, size);
#else
				unlink(prevchunk, bck, fwd);
				size += prevsize;
				p = chunk_at_offset(p, -((long) prevsize));
  
				insert_to_unsorted(av, p, bck, fwd, size);

				set_head(p, size | LAST_CHUNK | PREV_INUSE);
				set_foot(p, size);
				clear_inuse_bit_at_offset(p, size);
#endif
				goto out;
			}

		}

		// 2-2. inuse F, (free L) & 2-4. inuse M, (free L)
		else {
#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
			set_foot_log(name, p, size);
			clear_inuse_bit_at_offset_log(name, p, size);
#else
			insert_to_unsorted(av, p, bck, fwd, size);
  
			set_foot(p, size);
			clear_inuse_bit_at_offset(p, size);
#endif

			goto out;
		}

	}

	// 3. Middle chunk
	else {

		nextchunk = next_chunk(p);
		nextsize = chunksize(nextchunk);

		if (!prev_inuse(p)) {
			prevchunk = prev_chunk(p);
			prevsize = chunksize(prevchunk);

			// 3-1. free F, (free M), free L
			if (chunk_is_first(prevchunk) && chunk_is_last(nextchunk) && !inuse(nextchunk) ) {
	
				//if (av < prevchunk && prevchunk < av+PAGESIZE){
				if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					size += prevsize;
					p = chunk_at_offset(p, -((long) prevsize));

					insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
					set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
					set_foot_log(name, p, size);
					clear_inuse_bit_at_offset_log(name, p, size);
#else
					unlink(prevchunk, bck, fwd);
					size += prevsize;
					p = chunk_at_offset(p, -((long) prevsize));

					insert_to_unsorted(av, p, bck, fwd, size);
  
					set_head(p, size | FIRST_CHUNK | PREV_INUSE);
					set_foot(p, size);
					clear_inuse_bit_at_offset(p, size);
  #endif
  
					goto out;
				}

				else {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					unlink_log(name, nextchunk, bck, fwd);
					p = chunk_at_offset(p, -((long) prevsize));
					size = prevsize+size+nextsize+2*SIZE_SZ;

					pos_log_insert_malloc_free(name, (unsigned long)p, size);
					//pos_seg_free(name, (void *)p, size);
					POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
					unlink(prevchunk, bck, fwd);
					unlink(nextchunk, bck, fwd);
					p = chunk_at_offset(p, -((long) prevsize));
					size = prevsize+size+nextsize+2*SIZE_SZ;
					/*if (size%PAGESIZE != 0) {
						errstr = "free(): unmmap size is not page size";
						goto errout;
					}*/
					//FREE((char*)p, size);
					pos_seg_free(name, (void *)p, size);
					av->system_mem -= size;
  #endif
  
					goto out;
				}
			}

#if CONSISTENCY == 1
			unlink_log(name, prevchunk, bck, fwd);
#else
			unlink(prevchunk, bck, fwd);
#endif
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));

			if (chunk_is_first(prevchunk)) {
#if CONSISTENCY == 1
				set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
#else
				set_head(p, size | FIRST_CHUNK | PREV_INUSE);
				//set_foot(p, size);
				//clear_inuse_bit_at_offset(p, size);
#endif
			}

		}

		nextinuse = inuse_bit_at_offset(nextchunk, nextsize);

		if (!nextinuse) {
#if CONSISTENCY == 1
			unlink_log(name, nextchunk, bck, fwd);
#else
			unlink(nextchunk, bck, fwd);
#endif
			size += nextsize;
		}

#if CONSISTENCY == 1
		insert_to_unsorted_log(name, av, p, bck, fwd, size);

		if (chunk_is_first(p)) {
			set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
		} else if (chunk_is_last(nextchunk)&&!nextinuse) {
			set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE);
		} else {
			set_head_log(name, p, size | PREV_INUSE);
		}
		set_foot_log(name, p, size);
		clear_inuse_bit_at_offset_log(name, p, size);
#else
		//else
		//clear_inuse_bit_at_offset(nextchunk, 0);

		insert_to_unsorted(av, p, bck, fwd, size);

		if (chunk_is_first(p)) {
			set_head(p, size | FIRST_CHUNK | PREV_INUSE);
		} else if (chunk_is_last(nextchunk)&&!nextinuse) {
			set_head(p, size | LAST_CHUNK | PREV_INUSE);
		} else {
			set_head(p, size | PREV_INUSE);
		}
		set_foot(p, size);
		clear_inuse_bit_at_offset(p, size);

		//check_free_chunk(av, p);
 #endif
	}

out: 
	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD && have_fastchunks(av)) {
		pos_malloc_consolidate(name, av);
	}
}
예제 #4
0
파일: pos-malloc.c 프로젝트: kunulee/f-stm
//static void 
void
pos_malloc_init_state(char *name, mstate av)
{
	mchunkptr	first_chunk;
	unsigned long	first_size;
	mchunkptr	last_chunk;
	unsigned long	last_size;

	mchunkptr       bck;
	mchunkptr       fwd;

	int	i;
	mbinptr	bin;


	// init mutex key
	pthread_mutex_init( &av->mutex,NULL ) ; 
	// initialize malloc_state
#if CONSISTENCY == 1
	set_init_key_log(name, av);
#else
	set_init_key(av);
#endif

// Below codes don't need logging.

	for (i=1; i < NBINS; i++) {
		bin = bin_at(av,i);
		bin->fd = bin->bk = bin;
	}

	//set_max_fast(DEFAULT_MXFAST);
	clear_fastchunks(av);
	for (i=0; i< NFASTBINS ; i++) {
		av->fastbinsY[i] = 0;
	}

	// first chunk
	first_chunk = chunk_at_offset(av, sizeof(struct malloc_state));
	first_size = (PAGESIZE - sizeof(struct malloc_state) - 2*SIZE_SZ)/2;	// 956
//#if CONSISTENCY == 1
	//first_size = (128*1024-1)*4096 + 960; //536867776
	//first_size = request2size(first_size); // 536867792
//#else
	first_size = 960;
//#endif
//insert_to_unsorted(av, first_chunk, bck, fwd, first_size);

	set_head(first_chunk, first_size | FIRST_CHUNK | PREV_INUSE);
	set_foot(first_chunk, first_size);
	clear_inuse_bit_at_offset(first_chunk, first_size);

	// last_chunk
	last_chunk = chunk_at_offset(first_chunk, first_size);
	//last_size = first_size;
//#if CONSISTENCY == 1
	////last_size = (128*1024)*4096 + 944; // 536867760
	//last_size = (256*1024)*4096 - first_size - 2*SIZE_SZ;
	//last_size = request2size(last_size); // 536874032
//#else
//	last_size = 988;
	last_size = 1800;
//#endif
	insert_to_unsorted(av, last_chunk, bck, fwd, last_size);

	set_head(last_chunk, last_size | LAST_CHUNK | PREV_INUSE);
	set_foot(last_chunk, last_size);
	clear_inuse_bit_at_offset(last_chunk, last_size);

	av->last_remainder = 0;
	for (i=0; i<BINMAPSIZE; i++) {
		av->binmap[i] = 0;
	}
	av->system_mem = PAGESIZE;

	av -> prime_obj = NULL;
}
예제 #5
0
파일: pos-malloc.c 프로젝트: kunulee/f-stm
static Void_t*
pos_int_malloc(char *name, mstate av, size_t bytes)
{
	INTERNAL_SIZE_T nb;
	unsigned int idx;
	mbinptr bin;

	mchunkptr victim;
	INTERNAL_SIZE_T size;
	int victim_index;

	mchunkptr remainder;
	unsigned long remainder_size;

	unsigned int block;
	unsigned int bit;
	unsigned int map;

	mchunkptr fwd;
	mchunkptr bck;

	//const char *errstr = NULL;

	size_t pagemask  = PAGESIZE - 1;
	
	//16바이트 단위로 정렬
	checked_request2size(bytes, nb);
	#if MALLOC_DEBUG == 1 
	printf("before fastbin\n") ; 
	#endif
	// 1. fast bin (<=144)

	// 1. fast bin (<=144)
/*	if ((unsigned long)(nb) <= (unsigned long)(get_max_fast())) {
		idx = fastbin_index(nb);
	//	printf("idx = %d\n" , idx) ; 	
		mfastbinptr* fb = &fastbin(av, idx);
		victim = *fb;

		if (victim != 0) {

		if (fastbin_index (chunksize (victim)) != idx) {
			errstr = "malloc(): memory corruption (fast)";
errout:
			malloc_printerr (check_action, errstr, chunk2mem (victim));
		}

#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)victim->fd);
#else
			*fb = victim->fd;
#endif

			void *p = chunk2mem(victim);
			return p;
		}
	}
*/
	// 2. small bin (<=1008)
	if (in_smallbin_range(nb)) {
	#if MALLOC_DEBUG == 1
	printf("inside smallbin if\n") ; 	
	#endif 	

		idx = smallbin_index(nb);
		bin = bin_at(av,idx);

		victim = last(bin);
		if ( victim != bin && victim != NULL ) {

			bck = victim->bk;

/*			if (bck->fd != victim) {
				errstr = "malloc(): smallbin double linked list corrupted";
				goto errout;
			}*/

#if CONSISTENCY == 1
			set_inuse_bit_at_offset_log(name, victim, nb);
			POS_WRITE_VAUE(name, (unsigned long *)&bin->bk, (unsigned long)bck);
			POS_WRITE_VAUE(name, (unsigned long *)&bin->fd, (unsigned long)bin);
#else
			set_inuse_bit_at_offset(victim, nb);
			bin->bk = bck;
			bck->fd = bin;
#endif

			void *p = chunk2mem(victim);
			return p;
		}
	}
	else {
		idx = largebin_index(nb);

		if (have_fastchunks(av)) {
			pos_malloc_consolidate(name, av);
		}
	}

	#if MALLOC_DEBUG==1
	printf("before unsorted bin\n") ; 	
	#endif 
	for(;;) {
		int iters = 0;
		// 3. unsorted bin
		while ((victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
			bck = victim->bk;
			
			/*if (victim->size <= 2 * SIZE_SZ || victim->size > av->system_mem)
				malloc_printerr (check_action, "malloc(): memory corruption", chunk2mem (victim));*/
			size = chunksize(victim);

			if (in_smallbin_range(nb) &&
			   bck == unsorted_chunks(av) &&
			   victim == av->last_remainder &&
			   (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {

				remainder_size = size - nb;
				remainder = chunk_at_offset(victim, nb);
#if CONSISTENCY == 1
				POS_WRITE_VAUE(name, (unsigned long *)&unsorted_chunks(av)->bk, (unsigned long)remainder);
				POS_WRITE_VAUE(name, (unsigned long *)&unsorted_chunks(av)->fd, (unsigned long)remainder);
				POS_WRITE_VAUE(name, (unsigned long *)&av->last_remainder, (unsigned long)remainder);
#else
				unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
				av->last_remainder = remainder;
#endif
				remainder->bk = remainder->fd = unsorted_chunks(av);
				if (!in_smallbin_range(remainder_size)) {
					remainder->fd_nextsize = NULL;
					remainder->bk_nextsize = NULL;
				}

// Remainder dosen't need logging...
				if (chunk_is_last(victim))
					set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
				else
					set_head(remainder, remainder_size | PREV_INUSE);

				// set PREV_INUSE flag..
#if CONSISTENCY == 1
				if (chunk_is_first(victim)) {
					set_head_log(name, victim, nb | FIRST_CHUNK | PREV_INUSE);
				} else {
					set_head_log(name, victim, nb | PREV_INUSE);
				}
				
				set_foot_log(name, remainder, remainder_size);
#else
				if (chunk_is_first(victim))
					set_head(victim, nb | FIRST_CHUNK | PREV_INUSE);
				else
					set_head(victim, nb | PREV_INUSE);

				set_foot(remainder, remainder_size);
#endif

				void *p = chunk2mem(victim);
				return p;
			}

#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)&unsorted_chunks(av)->bk, (unsigned long)bck);
			POS_WRITE_VAUE(name, (unsigned long *)&bck->fd, (unsigned long)unsorted_chunks(av));
#else
			unsorted_chunks(av)->bk = bck;
			bck->fd = unsorted_chunks(av);
#endif

			if (size == nb) {
#if CONSISTENCY == 1
				set_inuse_bit_at_offset_log(name, victim, size);
#else
				set_inuse_bit_at_offset(victim, size);
#endif

				void *p = chunk2mem(victim);
				return p;
			}
			if (in_smallbin_range(size)) {
				victim_index = smallbin_index(size);
				bck = bin_at(av, victim_index);
				fwd = bck->fd;
			}
			else {
				victim_index = largebin_index(size);
				bck = bin_at(av, victim_index);
				fwd = bck->fd;

				if (fwd != bck) {
					size |= PREV_INUSE; //In order not to use chunksize()
					if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
						fwd = bck;
						bck = bck->bk;

// Current victim was in the unsorted bin that fd_nextsize dosen't need.. so, we don't leave log.. (We don't leave log for fd_nextsize below..)
						victim->fd_nextsize = fwd->fd;
						victim->bk_nextsize = fwd->fd->bk_nextsize;
#if CONSISTENCY == 1
						POS_WRITE_VAUE(name, (unsigned long *)&fwd->fd->bk_nextsize, (unsigned long)victim);
						POS_WRITE_VAUE(name, (unsigned long *)&victim->bk_nextsize->fd_nextsize, (unsigned long)victim);
#else
						fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
#endif
					}
					else {
						while ((unsigned long) size < fwd->size) {
							fwd = fwd->fd_nextsize;
						}

						if ((unsigned long) size == (unsigned long) fwd->size)
							fwd = fwd->fd;
						else {
							victim->fd_nextsize = fwd;
							victim->bk_nextsize = fwd->bk_nextsize;
#if CONSISTENCY == 1
							POS_WRITE_VAUE(name, (unsigned long *)&fwd->bk_nextsize, (unsigned long)victim);
							POS_WRITE_VAUE(name, (unsigned long *)&victim->bk_nextsize->fd_nextsize, (unsigned long)victim);
#else
							fwd->bk_nextsize = victim;
							victim->bk_nextsize->fd_nextsize = victim;
#endif
						}
						bck = fwd->bk;
					}
				} 
				else
					victim->fd_nextsize = victim->bk_nextsize = victim;
			}

#if CONSISTENCY == 1
			mark_bin_log(name, av, victim_index);
			POS_WRITE_VAUE(name, (unsigned long *)&victim->bk, (unsigned long)bck);
			POS_WRITE_VAUE(name, (unsigned long *)&victim->fd, (unsigned long)fwd);
			POS_WRITE_VAUE(name, (unsigned long *)&fwd->bk, (unsigned long)victim);
			POS_WRITE_VAUE(name, (unsigned long *)&bck->fd, (unsigned long)victim);
#else
			mark_bin(av, victim_index);
			victim->bk = bck;
			victim->fd = fwd;
			fwd->bk = victim;
			bck->fd = victim;
#endif
#define MAX_ITERS	10000
			if (++iters >= MAX_ITERS)
				break;
		}
		#if MALLOC_DEBUG == 1
		printf("before large bin\n") ;
		#endif 
		// 4. large bin (1024<=)
		if (!in_smallbin_range(nb)) {
			bin = bin_at(av, idx);
#if MALLOC_DEBUG
			printf(" bin = [%p]\n " ,bin ) ; 	
#endif
			if ((victim = first(bin)) != bin &&
			   (unsigned long)(victim->size) >= (unsigned long)(nb)) {

				victim = victim->bk_nextsize;
				while (((unsigned long)(size = chunksize(victim)) < (unsigned long)(nb)))
					victim = victim->bk_nextsize;

				//if (victim != last(bin) && victim->size == victim->fd->size)
				if (victim != last(bin) && chunksize(victim) == chunksize(victim->fd))
					victim = victim->fd;

				remainder_size = size - nb;
#if CONSISTENCY == 1
				unlink_log(name, victim, bck, fwd);
#else
				unlink(victim, bck, fwd);
#endif

				if (remainder_size < MINSIZE)  {
#if CONSISTENCY == 1
					set_inuse_bit_at_offset_log(name, victim, size);
#else
					set_inuse_bit_at_offset(victim, size);
#endif
				}
				else {
					remainder = chunk_at_offset(victim, nb);

#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, remainder, bck, fwd, remainder_size);
#else
					insert_to_unsorted(av, remainder, bck, fwd, remainder_size);
#endif

// Remainder dosen't need logging...
					if (chunk_is_last(victim))
						set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
					else
						set_head(remainder, remainder_size | PREV_INUSE);

					// set PREV_INUSE flag..
#if CONSISTENCY == 1
					if (chunk_is_first(victim)) {
						set_head_log(name, victim, nb | FIRST_CHUNK | PREV_INUSE);
					} else {
						set_head_log(name, victim, nb | PREV_INUSE);
					}
					
					set_foot_log(name, remainder, remainder_size);
#else
					if (chunk_is_first(victim))
						set_head(victim, nb | FIRST_CHUNK | PREV_INUSE);
					else
						set_head(victim, nb | PREV_INUSE);

					set_foot(remainder, remainder_size);
#endif
				}

				void *p = chunk2mem(victim);
				return p;
			}
			
		}
		++idx;
		bin = bin_at(av,idx);
		block = idx2block(idx);
		map = av->binmap[block];
	 	bit = idx2bit(idx);
#if MALLOC_DEBUG == 1
		printf("DDD\n") ;
#endif
		for (;;) {
			if (bit > map || bit == 0) {
				do {
					if (++block >= BINMAPSIZE){
						goto new_alloc;
					}
				} while ( (map = av->binmap[block]) == 0);

				bin = bin_at(av, (block << BINMAPSHIFT));
				bit = 1;
			}
			while ((bit & map) == 0) {
				bin = next_bin(bin);
				bit <<= 1;
			}
#if MALLOC_DEBUG == 1
			printf("before victim\n") ; 
#endif 
			victim = last(bin);

			if (victim == bin) {
#if MALLOC_DEBUG == 1
				printf("victim == bin\n") ; 
#endif 
#if CONSISTENCY == 1
				POS_WRITE_VAUE(name, (unsigned long *)&av->binmap[block], (unsigned long)(map &~bit));
#else
				av->binmap[block] = map &= ~bit;
#endif
				bin = next_bin(bin);
		        	bit <<= 1;
			}
			else {
#if MALLOC_DEBUG == 1
				printf("victim != bin\n") ; 
#endif
				size = chunksize(victim);

				remainder_size = size - nb;

#if CONSISTENCY == 1
				unlink_log(name, victim, bck, fwd);
#else
				unlink(victim, bck, fwd);
#endif
#if MALLOC_DEBUG == 1
				printf("unlink\n") ; 
#endif 

				if (remainder_size < MINSIZE) {
#if CONSISTENCY == 1
					set_inuse_bit_at_offset_log(name, victim, size);
#else
#if MALLOC_DEBUG == 1
					printf("D\n") ;
					printf("victim : %p\n", victim) ; 	
					printf("size: %d\n" ,size) ; 
#endif
					set_inuse_bit_at_offset(victim, size);
#endif
				}
				else {
#if MALLOC_DEBUG == 1
					printf("remainder\n") ; 	
#endif
					remainder = chunk_at_offset(victim, nb);

#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, remainder, bck, fwd, remainder_size);
#else
#if MALLOC_DEBUG == 1
					printf("av=%p\n",av) ; 	
					printf("%p %p %p\n", remainder , bck , fwd);
					printf("%d\n" , remainder_size) ;
#endif 
					insert_to_unsorted(av, remainder, bck, fwd, remainder_size);
#endif

#if MALLOC_DEBUG == 1
					printf("%p %p %p\n", remainder , bck , fwd);

					printf("s\n") ; 
#endif
					if (in_smallbin_range(nb)) {
#if CONSISTENCY == 1
						POS_WRITE_VAUE(name, (unsigned long *)&av->last_remainder, (unsigned long)remainder);
#else
						av->last_remainder = remainder;
#endif
					}
#if MALLOC_DEBUG == 1
				printf("s\n") ; 
#endif 
					if (chunk_is_last(victim))
						set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
					else
						set_head(remainder, remainder_size | PREV_INUSE);

					// set PREV_INUSE flag..
#if CONSISTENCY == 1
					if (chunk_is_first(victim)) {
						set_head_log(name, victim, nb | FIRST_CHUNK | PREV_INUSE);
					} else {
						set_head_log(name, victim, nb | PREV_INUSE);
					}
					
#if MALLOC_DEBUG == 1
				printf("s\n") ; 
#endif 
					set_foot_log(name, remainder, remainder_size);
#else
					if (chunk_is_first(victim))
						set_head(victim, nb | FIRST_CHUNK | PREV_INUSE);
					else
						set_head(victim, nb | PREV_INUSE);

					set_foot(remainder, remainder_size);
#endif
				}
#if MALLOC_DEBUG == 1 
				printf("s\n") ; 
#endif
				void *p = chunk2mem(victim);
				return p;
			}
		}
new_alloc:
		#if MALLOC_DEBUG == 1
		printf("before newallocation\n");
		#endif 
		// 6. new allocation
		size = (nb + MINSIZE +2*SIZE_SZ + pagemask) & ~pagemask;
		size += DEFAULT_PAD;
		//char* mm = (char*)(SEG_ALLOC(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE));
		char *mm = (char *)pos_seg_alloc(name, size);
		
		memset(mm , 0 , size);
		#if MALLOC_DEBUG == 1
		printf(" mm = %p\n" , mm) ; 	
		#endif 
	
#if CONSISTENCY == 1
		pos_log_insert_malloc_free(name, (unsigned long)mm, size);
#endif
		//if (mm != MAP_FAILED) {
		if (mm != (char *)0) {
#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem+size));
#else
			av->system_mem += size;
#endif

		
//			printf("D!\n") ;
			mchunkptr p;

			p = (mchunkptr)mm;

			remainder_size = size - nb - 2*SIZE_SZ;
			remainder = chunk_at_offset(p, nb);

#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, remainder, bck, fwd, remainder_size);
#else
			insert_to_unsorted(av, remainder, bck, fwd, remainder_size);
#endif

			/*if (in_smallbin_range(nb))
				av->last_remainder = remainder;*/

			// set PREV_INUSE flag..
//#if CONSISTENCY == 1
//#elseif
			set_head(p, nb | FIRST_CHUNK | PREV_INUSE);
			set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);

			set_foot(remainder, remainder_size);
			clear_inuse_bit_at_offset(remainder, remainder_size);
//#endif

			//return p;
			return chunk2mem(p);
		} 
		else
			return 0;
  
	}
}
예제 #6
0
파일: pos-malloc.c 프로젝트: kunulee/f-stm
////////////////////////////////////////
// WARNING!: pos_realloc has error!. FIX UP!
////////////////////////////////////////
Void_t*
pos_int_realloc(char *name, mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
	     INTERNAL_SIZE_T nb)
{
	mchunkptr newp;				/* chunk to return */
	INTERNAL_SIZE_T newsize;		/* its size */
	Void_t* newmem;				/* corresponding user mem */

	mchunkptr next;				/* next contiguous chunk after oldp */

	mchunkptr remainder;			/* extra space at end of newp */
	unsigned long remainder_size;	/* its size */

	mchunkptr bck;				/* misc temp for linking */
	mchunkptr fwd;				/* misc temp for linking */

	unsigned long copysize;		/* bytes to copy */
	unsigned int ncopies;			/* INTERNAL_SIZE_T words to copy */
	INTERNAL_SIZE_T* s;			/* copy source */
	INTERNAL_SIZE_T* d;			/* copy destination */

	const char *errstr = NULL;


	/* oldmem size */
	/*if (oldp->size <= 2 * SIZE_SZ || oldsize >= av->system_mem) {
		errstr = "realloc(): invalid old size";
errout:
		malloc_printerr (check_action, errstr, chunk2mem(oldp));
		return NULL;
	}*/

	next = chunk_at_offset(oldp, oldsize);
	INTERNAL_SIZE_T nextsize = chunksize(next);
	/*if (next->size <= 2 * SIZE_SZ || nextsize >= av->system_mem) {
		errstr = "realloc(): invalid next size";
			goto errout;
	}*/

	//old size 보다 작을 경우
	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
		/* already big enough; split below */
		newp = oldp;
		newsize = oldsize;
	}

	//old size 보다 클 경우
	else {

		/* Try to expand forward into next chunk;  split off remainder below */
		if (!inuse(next) &&
			(unsigned long)(newsize = oldsize + nextsize) >= (unsigned long)(nb)) {
			newp = oldp;
			unlink(next, bck, fwd);
		}

		/* allocate, copy, free */
		else {
			newmem = pos_int_malloc(name, av, nb - MALLOC_ALIGN_MASK);
			if (newmem == 0)
				return 0; /* propagate failure */

			newp = mem2chunk(newmem);
			newsize = chunksize(newp);

			/*
			  Avoid copy if newp is next chunk after oldp.
			*/
			if (newp == next) {
				newsize += oldsize;
				newp = oldp;
			}
			else {
				/*
				  Unroll copy of <= 36 bytes (72 if 8byte sizes)
				  We know that contents have an odd number of
				  INTERNAL_SIZE_T-sized words; minimally 3.
				*/

				copysize = oldsize - SIZE_SZ;
				s = (INTERNAL_SIZE_T*)(chunk2mem(oldp));
				d = (INTERNAL_SIZE_T*)(newmem);
				ncopies = copysize / sizeof(INTERNAL_SIZE_T);

				if (ncopies > 9)
					memcpy(d, s, copysize);
				else {
					*(d+0) = *(s+0);
					*(d+1) = *(s+1);
					*(d+2) = *(s+2);
					if (ncopies > 4) {
						*(d+3) = *(s+3);
						*(d+4) = *(s+4);
						if (ncopies > 6) {
							*(d+5) = *(s+5);
							*(d+6) = *(s+6);
							if (ncopies > 8) {
								*(d+7) = *(s+7);
								*(d+8) = *(s+8);
							}
						}
					}
				}

				pos_int_free(name, av, oldp, 1);

				return chunk2mem(newp);
			}
		}
	}

	/* If possible, free extra space in old or extended chunk */

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
		set_head_size(newp, newsize);
		set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
		remainder = chunk_at_offset(newp, nb);

		if (chunk_is_last(newp))
			set_head(remainder, remainder_size | LAST_CHUNK | PREV_INUSE);
		else
			set_head(remainder, remainder_size | PREV_INUSE);

		// set PREV_INUSE flag..
		if (chunk_is_first(newp))
			set_head(newp, nb | FIRST_CHUNK | PREV_INUSE);
		else
			set_head(newp, nb | PREV_INUSE);
		
		//set_head_size(newp, nb);
		//set_head(remainder, remainder_size | PREV_INUSE |(av != &main_arena ? NON_MAIN_ARENA : 0));

		/* Mark remainder as inuse so free() won't complain */
		set_inuse_bit_at_offset(remainder, remainder_size);
		pos_int_free(name, av, remainder, 1);
	}

	return chunk2mem(newp);
}
예제 #7
0
/* ------------------------------ realloc ------------------------------ */
void* ulibc_realloc(void* oldmem, size_t bytes)
{
    mstate av;

    size_t  nb;              /* padded request size */

    mchunkptr        oldp;            /* chunk corresponding to oldmem */
    size_t  oldsize;         /* its size */

    mchunkptr        newp;            /* chunk to return */
    size_t  newsize;         /* its size */
    void*          newmem;          /* corresponding user mem */

    mchunkptr        next;            /* next contiguous chunk after oldp */

    mchunkptr        remainder;       /* extra space at end of newp */
    unsigned long     remainder_size;  /* its size */

    mchunkptr        bck;             /* misc temp for linking */
    mchunkptr        fwd;             /* misc temp for linking */

    unsigned long     copysize;        /* bytes to copy */
    unsigned int     ncopies;         /* size_t words to copy */
    size_t* s;               /* copy source */
    size_t* d;               /* copy destination */

    void *retval;

    /* Check for special cases.  */
    if (! oldmem)
	return ulibc_malloc(bytes);
    if (! bytes) {
	ulibc_free (oldmem);
	return NULL;
    }

    av = get_malloc_state();
    checked_request2size(bytes, nb);

    oldp    = mem2chunk(oldmem);
    oldsize = chunksize(oldp);

    check_inuse_chunk(oldp);

    if (!chunk_is_mmapped(oldp)) {

	if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
	    /* already big enough; split below */
	    newp = oldp;
	    newsize = oldsize;
	}

	else {
	    next = chunk_at_offset(oldp, oldsize);

	    /* Try to expand forward into top */
	    if (next == av->top &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb + MINSIZE)) {
		set_head_size(oldp, nb);
		av->top = chunk_at_offset(oldp, nb);
		set_head(av->top, (newsize - nb) | PREV_INUSE);
		retval = chunk2mem(oldp);
		goto DONE;
	    }

	    /* Try to expand forward into next chunk;  split off remainder below */
	    else if (next != av->top &&
		    !inuse(next) &&
		    (unsigned long)(newsize = oldsize + chunksize(next)) >=
		    (unsigned long)(nb)) {
		newp = oldp;
		unlink(next, bck, fwd);
	    }

	    /* allocate, copy, free */
	    else {
		newmem = malloc(nb - MALLOC_ALIGN_MASK);
		if (newmem == 0) {
		    retval = 0; /* propagate failure */
		    goto DONE;
		}

		newp = mem2chunk(newmem);
		newsize = chunksize(newp);

		/*
		   Avoid copy if newp is next chunk after oldp.
		   */
		if (newp == next) {
		    newsize += oldsize;
		    newp = oldp;
		}
		else {
		    /*
		       Unroll copy of <= 36 bytes (72 if 8byte sizes)
		       We know that contents have an odd number of
		       size_t-sized words; minimally 3.
		       */

		    copysize = oldsize - (sizeof(size_t));
		    s = (size_t*)(oldmem);
		    d = (size_t*)(newmem);
		    ncopies = copysize / sizeof(size_t);
		    assert(ncopies >= 3);

		    if (ncopies > 9)
			memcpy(d, s, copysize);

		    else {
			*(d+0) = *(s+0);
			*(d+1) = *(s+1);
			*(d+2) = *(s+2);
			if (ncopies > 4) {
			    *(d+3) = *(s+3);
			    *(d+4) = *(s+4);
			    if (ncopies > 6) {
				*(d+5) = *(s+5);
				*(d+6) = *(s+6);
				if (ncopies > 8) {
				    *(d+7) = *(s+7);
				    *(d+8) = *(s+8);
				}
			    }
			}
		    }

		    ulibc_free(oldmem);
		    check_inuse_chunk(newp);
		    retval = chunk2mem(newp);
		    goto DONE;
		}
	    }
	}

	/* If possible, free extra space in old or extended chunk */

	assert((unsigned long)(newsize) >= (unsigned long)(nb));

	remainder_size = newsize - nb;

	if (remainder_size < MINSIZE) { /* not enough extra to split off */
	    set_head_size(newp, newsize);
	    set_inuse_bit_at_offset(newp, newsize);
	}
	else { /* split remainder */
	    remainder = chunk_at_offset(newp, nb);
	    set_head_size(newp, nb);
	    set_head(remainder, remainder_size | PREV_INUSE);
	    /* Mark remainder as inuse so free() won't complain */
	    set_inuse_bit_at_offset(remainder, remainder_size);
	    ulibc_free(chunk2mem(remainder));
	}

	check_inuse_chunk(newp);
	retval = chunk2mem(newp);
	goto DONE;
    }

    /*
       Handle mmap cases
       */

    else {
	size_t offset = oldp->prev_size;
	size_t pagemask = av->pagesize - 1;
	char *cp;
	unsigned long  sum;

	/* Note the extra (sizeof(size_t)) overhead */
	newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;

	/* don't need to remap if still within same page */
	if (oldsize == newsize - offset) {
	    retval = oldmem;
	    goto DONE;
	}

	cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);

	if (cp != (char*)MORECORE_FAILURE) {

	    newp = (mchunkptr)(cp + offset);
	    set_head(newp, (newsize - offset)|IS_MMAPPED);

	    assert(aligned_OK(chunk2mem(newp)));
	    assert((newp->prev_size == offset));

	    /* update statistics */
	    sum = av->mmapped_mem += newsize - oldsize;
	    if (sum > (unsigned long)(av->max_mmapped_mem))
		av->max_mmapped_mem = sum;
	    sum += av->sbrked_mem;
	    if (sum > (unsigned long)(av->max_total_mem))
		av->max_total_mem = sum;

	    retval = chunk2mem(newp);
	    goto DONE;
	}

	/* Note the extra (sizeof(size_t)) overhead. */
	if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
	    newmem = oldmem; /* do nothing */
	else {
	    /* Must alloc, copy, free. */
	    newmem = malloc(nb - MALLOC_ALIGN_MASK);
	    if (newmem != 0) {
		memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
		ulibc_free(oldmem);
	    }
	}
	retval = newmem;
    }

 DONE:
    return retval;
}
예제 #8
0
파일: free.c 프로젝트: wbx-github/uclibc-ng
/* ------------------------------ free ------------------------------ */
void free(void* mem)
{
    mstate av;

    mchunkptr       p;           /* chunk corresponding to mem */
    size_t size;        /* its size */
    mfastbinptr*    fb;          /* associated fastbin */
    mchunkptr       nextchunk;   /* next contiguous chunk */
    size_t nextsize;    /* its size */
    int             nextinuse;   /* true if nextchunk is used */
    size_t prevsize;    /* size of previous contiguous chunk */
    mchunkptr       bck;         /* misc temp for linking */
    mchunkptr       fwd;         /* misc temp for linking */

    /* free(0) has no effect */
    if (mem == NULL)
	return;

    __MALLOC_LOCK;
    av = get_malloc_state();
    p = mem2chunk(mem);
    size = chunksize(p);

    check_inuse_chunk(p);

    /*
       If eligible, place chunk on a fastbin so it can be found
       and used quickly in malloc.
       */

    if ((unsigned long)(size) <= (unsigned long)(av->max_fast)

#if TRIM_FASTBINS
	    /* If TRIM_FASTBINS set, don't place chunks
	       bordering top into fastbins */
	    && (chunk_at_offset(p, size) != av->top)
#endif
       ) {

	set_fastchunks(av);
	fb = &(av->fastbins[fastbin_index(size)]);
	p->fd = *fb;
	*fb = p;
    }

    /*
       Consolidate other non-mmapped chunks as they arrive.
       */

    else if (!chunk_is_mmapped(p)) {
	set_anychunks(av);

	nextchunk = chunk_at_offset(p, size);
	nextsize = chunksize(nextchunk);

	/* consolidate backward */
	if (!prev_inuse(p)) {
	    prevsize = p->prev_size;
	    size += prevsize;
	    p = chunk_at_offset(p, -((long) prevsize));
	    unlink(p, bck, fwd);
	}

	if (nextchunk != av->top) {
	    /* get and clear inuse bit */
	    nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
	    set_head(nextchunk, nextsize);

	    /* consolidate forward */
	    if (!nextinuse) {
		unlink(nextchunk, bck, fwd);
		size += nextsize;
	    }

	    /*
	       Place the chunk in unsorted chunk list. Chunks are
	       not placed into regular bins until after they have
	       been given one chance to be used in malloc.
	       */

	    bck = unsorted_chunks(av);
	    fwd = bck->fd;
	    p->bk = bck;
	    p->fd = fwd;
	    bck->fd = p;
	    fwd->bk = p;

	    set_head(p, size | PREV_INUSE);
	    set_foot(p, size);

	    check_free_chunk(p);
	}

	/*
	   If the chunk borders the current high end of memory,
	   consolidate into top
	   */

	else {
	    size += nextsize;
	    set_head(p, size | PREV_INUSE);
	    av->top = p;
	    check_chunk(p);
	}

	/*
	   If freeing a large space, consolidate possibly-surrounding
	   chunks. Then, if the total unused topmost memory exceeds trim
	   threshold, ask malloc_trim to reduce top.

	   Unless max_fast is 0, we don't know if there are fastbins
	   bordering top, so we cannot tell for sure whether threshold
	   has been reached unless fastbins are consolidated.  But we
	   don't want to consolidate on each free.  As a compromise,
	   consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
	   is reached.
	   */

	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
	    if (have_fastchunks(av))
		__malloc_consolidate(av);

	    if ((unsigned long)(chunksize(av->top)) >=
		    (unsigned long)(av->trim_threshold))
		__malloc_trim(av->top_pad, av);
	}

    }
    /*
       If the chunk was allocated via mmap, release via munmap()
       Note that if HAVE_MMAP is false but chunk_is_mmapped is
       true, then user must have overwritten memory. There's nothing
       we can do to catch this error unless DEBUG is set, in which case
       check_inuse_chunk (above) will have triggered error.
       */

    else {
	size_t offset = p->prev_size;
	av->n_mmaps--;
	av->mmapped_mem -= (size + offset);
	munmap((char*)p - offset, size + offset);
    }
    __MALLOC_UNLOCK;
}
예제 #9
0
파일: free.c 프로젝트: wbx-github/uclibc-ng
/* ------------------------- __malloc_consolidate -------------------------

  __malloc_consolidate is a specialized version of free() that tears
  down chunks held in fastbins.  Free itself cannot be used for this
  purpose since, among other things, it might place chunks back onto
  fastbins.  So, instead, we need to use a minor variant of the same
  code.

  Also, because this routine needs to be called the first time through
  malloc anyway, it turns out to be the perfect place to trigger
  initialization code.
*/
void attribute_hidden __malloc_consolidate(mstate av)
{
    mfastbinptr*    fb;                 /* current fastbin being consolidated */
    mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
    mchunkptr       p;                  /* current chunk being consolidated */
    mchunkptr       nextp;              /* next chunk to consolidate */
    mchunkptr       unsorted_bin;       /* bin header */
    mchunkptr       first_unsorted;     /* chunk to link to */

    /* These have same use as in free() */
    mchunkptr       nextchunk;
    size_t size;
    size_t nextsize;
    size_t prevsize;
    int             nextinuse;
    mchunkptr       bck;
    mchunkptr       fwd;

    /*
       If max_fast is 0, we know that av hasn't
       yet been initialized, in which case do so below
       */

    if (av->max_fast != 0) {
	clear_fastchunks(av);

	unsorted_bin = unsorted_chunks(av);

	/*
	   Remove each chunk from fast bin and consolidate it, placing it
	   then in unsorted bin. Among other reasons for doing this,
	   placing in unsorted bin avoids needing to calculate actual bins
	   until malloc is sure that chunks aren't immediately going to be
	   reused anyway.
	   */

	maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
	fb = &(av->fastbins[0]);
	do {
	    if ( (p = *fb) != 0) {
		*fb = 0;

		do {
		    check_inuse_chunk(p);
		    nextp = p->fd;

		    /* Slightly streamlined version of consolidation code in free() */
		    size = p->size & ~PREV_INUSE;
		    nextchunk = chunk_at_offset(p, size);
		    nextsize = chunksize(nextchunk);

		    if (!prev_inuse(p)) {
			prevsize = p->prev_size;
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));
			unlink(p, bck, fwd);
		    }

		    if (nextchunk != av->top) {
			nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
			set_head(nextchunk, nextsize);

			if (!nextinuse) {
			    size += nextsize;
			    unlink(nextchunk, bck, fwd);
			}

			first_unsorted = unsorted_bin->fd;
			unsorted_bin->fd = p;
			first_unsorted->bk = p;

			set_head(p, size | PREV_INUSE);
			p->bk = unsorted_bin;
			p->fd = first_unsorted;
			set_foot(p, size);
		    }

		    else {
			size += nextsize;
			set_head(p, size | PREV_INUSE);
			av->top = p;
		    }

		} while ( (p = nextp) != 0);

	    }
	} while (fb++ != maxfb);
    }
    else {
	malloc_init_state(av);
	check_malloc_state();
    }
}
예제 #10
0
/* ------------------------- __malloc_consolidate -------------------------

  __malloc_consolidate is a specialized version of free() that tears
  down chunks held in fastbins.  Free itself cannot be used for this
  purpose since, among other things, it might place chunks back onto
  fastbins.  So, instead, we need to use a minor variant of the same
  code.

  Also, because this routine needs to be called the first time through
  malloc anyway, it turns out to be the perfect place to trigger
  initialization code.
*/
void attribute_hidden __malloc_consolidate(mstate av)
{
    mfastbinptr*    fb;                 /* current fastbin being consolidated */
    mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
    mchunkptr       p;                  /* current chunk being consolidated */
    mchunkptr       nextp;              /* next chunk to consolidate */
    mchunkptr       unsorted_bin;       /* bin header */
    mchunkptr       first_unsorted;     /* chunk to link to */
    ustate	    unit;		/*  */

    /* These have same use as in free() */
    mchunkptr       nextchunk;
    size_t size;
    size_t nextsize;
    size_t prevsize;
    int             nextinuse;
    mchunkptr       bck;
    mchunkptr       fwd;

    /*
       If max_fast is 0, we know that av hasn't
       yet been initialized, in which case do so below
       */

    if (av->max_fast != 0) {
	clear_fastchunks(av);

	unsorted_bin = unsorted_chunks(av);

	/*
	   Remove each chunk from fast bin and consolidate it, placing it
	   then in unsorted bin. Among other reasons for doing this,
	   placing in unsorted bin avoids needing to calculate actual bins
	   until malloc is sure that chunks aren't immediately going to be
	   reused anyway.
	   */

	maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
	fb = &(av->fastbins[0]);
	do {
	    if ( (p = *fb) != 0) {
		*fb = 0;

		do {
		    check_inuse_chunk(p);
		    nextp = p->fd;

		    /* Slightly streamlined version of consolidation code in free() */
		    size = p->size & ~PREV_INUSE;
		    nextchunk = chunk_at_offset(p, size);
		    nextsize = chunksize(nextchunk);

		    if (!prev_inuse(p)) {
			prevsize = p->prev_size;
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));
			unlink(p, bck, fwd);
		    }
		    
		    unit = lookup_ustate_by_mem((void*)p);
		    if (nextchunk != unit->unit_top) {
			nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
			set_head(nextchunk, nextsize);

			if (!nextinuse) {
			    size += nextsize;
			    unlink(nextchunk, bck, fwd);
			}

			first_unsorted = unsorted_bin->fd;
			unsorted_bin->fd = p;
			first_unsorted->bk = p;

			set_head(p, size | PREV_INUSE);
			p->bk = unsorted_bin;
			p->fd = first_unsorted;
			set_foot(p, size);
		    }

		    else {
			size += nextsize;
			set_head(p, size | PREV_INUSE);
			unit->unit_top = p;
		    }

		} while ( (p = nextp) != 0);

	    }
	} while (fb++ != maxfb);
    }
    else {
    	if (get_abstate()->mstate_list.num == 0) {
    		//initialize abheap state
    		init_linked_list(&(get_abstate()->mstate_list));
		init_linked_list(&(get_abstate()->ustate_list));
		init_linked_list(&(get_abstate()->mmapped_ustate_list));
		get_abstate()->ab_top = (mchunkptr)(CHANNEL_ADDR);
		//allocate channel heap space
		mmap((void *) CHANNEL_ADDR, CHANNEL_SIZE, PROT_READ|PROT_WRITE, 
			MAP_ANONYMOUS|MAP_FIXED|MAP_SHARED, -1, 0);	
		touch_mem((void *)CHANNEL_ADDR, CHANNEL_SIZE);
    	}
	malloc_init_state(av);
	check_malloc_state();
    }
}
예제 #11
0
파일: arena.c 프로젝트: Ace-Tang/glibc
static int
internal_function
heap_trim (heap_info *heap, size_t pad)
{
  mstate ar_ptr = heap->ar_ptr;
  unsigned long pagesz = GLRO (dl_pagesize);
  mchunkptr top_chunk = top (ar_ptr), p, bck, fwd;
  heap_info *prev_heap;
  long new_size, top_size, top_area, extra, prev_size, misalign;

  /* Can this heap go away completely? */
  while (top_chunk == chunk_at_offset (heap, sizeof (*heap)))
    {
      prev_heap = heap->prev;
      prev_size = prev_heap->size - (MINSIZE - 2 * SIZE_SZ);
      p = chunk_at_offset (prev_heap, prev_size);
      /* fencepost must be properly aligned.  */
      misalign = ((long) p) & MALLOC_ALIGN_MASK;
      p = chunk_at_offset (prev_heap, prev_size - misalign);
      assert (p->size == (0 | PREV_INUSE)); /* must be fencepost */
      p = prev_chunk (p);
      new_size = chunksize (p) + (MINSIZE - 2 * SIZE_SZ) + misalign;
      assert (new_size > 0 && new_size < (long) (2 * MINSIZE));
      if (!prev_inuse (p))
        new_size += p->prev_size;
      assert (new_size > 0 && new_size < HEAP_MAX_SIZE);
      if (new_size + (HEAP_MAX_SIZE - prev_heap->size) < pad + MINSIZE + pagesz)
        break;
      ar_ptr->system_mem -= heap->size;
      arena_mem -= heap->size;
      LIBC_PROBE (memory_heap_free, 2, heap, heap->size);
      delete_heap (heap);
      heap = prev_heap;
      if (!prev_inuse (p)) /* consolidate backward */
        {
          p = prev_chunk (p);
          unlink (ar_ptr, p, bck, fwd);
        }
      assert (((unsigned long) ((char *) p + new_size) & (pagesz - 1)) == 0);
      assert (((char *) p + new_size) == ((char *) heap + heap->size));
      top (ar_ptr) = top_chunk = p;
      set_head (top_chunk, new_size | PREV_INUSE);
      /*check_chunk(ar_ptr, top_chunk);*/
    }

  /* Uses similar logic for per-thread arenas as the main arena with systrim
     by preserving the top pad and at least a page.  */
  top_size = chunksize (top_chunk);
  top_area = top_size - MINSIZE - 1;
  if (top_area <= pad)
    return 0;

  extra = ALIGN_DOWN(top_area - pad, pagesz);
  if ((unsigned long) extra < mp_.trim_threshold)
    return 0;

  /* Try to shrink. */
  if (shrink_heap (heap, extra) != 0)
    return 0;

  ar_ptr->system_mem -= extra;
  arena_mem -= extra;

  /* Success. Adjust top accordingly. */
  set_head (top_chunk, (top_size - extra) | PREV_INUSE);
  /*check_chunk(ar_ptr, top_chunk);*/
  return 1;
}
예제 #12
0
static void
_int_free (mstate av, mchunkptr p, int have_lock)
{
  ...
  else if (!chunk_is_mmapped(p)) {
    ...
    nextchunk = chunk_at_offset(p, size);
    ...
    nextsize = chunksize(nextchunk);
    ...
    /* consolidate backward */
    if (!prev_inuse(p)) {
      prevsize = p->prev_size;
      size += prevsize;
      p = chunk_at_offset(p, -((long) prevsize));
      unlink(p, bck, fwd);
    }

    if (nextchunk != av->top) {
      /* get and clear inuse bit */
      nextinuse = inuse_bit_at_offset(nextchunk, nextsize);

      /* consolidate forward */
      if (!nextinuse) {
	unlink(nextchunk, bck, fwd);
	size += nextsize;
      } else
	clear_inuse_bit_at_offset(nextchunk, 0);

      /*
	Place the chunk in unsorted chunk list. Chunks are
	not placed into regular bins until after they have
	been given one chance to be used in malloc.
      */

      bck = unsorted_chunks(av);
      fwd = bck->fd;
      if (__glibc_unlikely (fwd->bk != bck))
	{
	  errstr = "free(): corrupted unsorted chunks";
	  goto errout;
	}
      p->fd = fwd;
      p->bk = bck;
      if (!in_smallbin_range(size))
	{
	  p->fd_nextsize = NULL;
	  p->bk_nextsize = NULL;
	}
      bck->fd = p;
      fwd->bk = p;

      set_head(p, size | PREV_INUSE);
      set_foot(p, size);

      check_free_chunk(av, p);
    }

    /*
      If the chunk borders the current high end of memory,
      consolidate into top
    */

    else {
      size += nextsize;
      set_head(p, size | PREV_INUSE);
      av->top = p;
      check_chunk(av, p);
    }
    ...
}