示例#1
0
static void 
pos_malloc_consolidate(char *name, mstate av)
{
	mfastbinptr*    fb;
	mfastbinptr*    maxfb;
	mchunkptr       p;
	mchunkptr       nextp;

#if CONSISTENCY == 1
	clear_fastchunks_log(name, av);
#else
	clear_fastchunks(av);
#endif

	maxfb = &fastbin (av, NFASTBINS - 1);

	fb = &fastbin (av, 0);
	do {
		p = *fb;
		if (p != 0) {
#if CONSISTENCY == 1
			POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)0);
#else
			*fb = 0;
#endif
			do {
				nextp = p->fd;
#if CONSISTENCY == 1
				clear_inuse_log(name, p);
#else
				clear_inuse(p);
#endif
				pos_int_free(name, av, p, 0);
			} while ( (p = nextp) != 0);
		}
	} while (fb++ != maxfb);

}
示例#2
0
//static void 
void
pos_malloc_init_state(char *name, mstate av)
{
	mchunkptr	first_chunk;
	unsigned long	first_size;
	mchunkptr	last_chunk;
	unsigned long	last_size;

	mchunkptr       bck;
	mchunkptr       fwd;

	int	i;
	mbinptr	bin;


	// init mutex key
	pthread_mutex_init( &av->mutex,NULL ) ; 
	// initialize malloc_state
#if CONSISTENCY == 1
	set_init_key_log(name, av);
#else
	set_init_key(av);
#endif

// Below codes don't need logging.

	for (i=1; i < NBINS; i++) {
		bin = bin_at(av,i);
		bin->fd = bin->bk = bin;
	}

	//set_max_fast(DEFAULT_MXFAST);
	clear_fastchunks(av);
	for (i=0; i< NFASTBINS ; i++) {
		av->fastbinsY[i] = 0;
	}

	// first chunk
	first_chunk = chunk_at_offset(av, sizeof(struct malloc_state));
	first_size = (PAGESIZE - sizeof(struct malloc_state) - 2*SIZE_SZ)/2;	// 956
//#if CONSISTENCY == 1
	//first_size = (128*1024-1)*4096 + 960; //536867776
	//first_size = request2size(first_size); // 536867792
//#else
	first_size = 960;
//#endif
//insert_to_unsorted(av, first_chunk, bck, fwd, first_size);

	set_head(first_chunk, first_size | FIRST_CHUNK | PREV_INUSE);
	set_foot(first_chunk, first_size);
	clear_inuse_bit_at_offset(first_chunk, first_size);

	// last_chunk
	last_chunk = chunk_at_offset(first_chunk, first_size);
	//last_size = first_size;
//#if CONSISTENCY == 1
	////last_size = (128*1024)*4096 + 944; // 536867760
	//last_size = (256*1024)*4096 - first_size - 2*SIZE_SZ;
	//last_size = request2size(last_size); // 536874032
//#else
//	last_size = 988;
	last_size = 1800;
//#endif
	insert_to_unsorted(av, last_chunk, bck, fwd, last_size);

	set_head(last_chunk, last_size | LAST_CHUNK | PREV_INUSE);
	set_foot(last_chunk, last_size);
	clear_inuse_bit_at_offset(last_chunk, last_size);

	av->last_remainder = 0;
	for (i=0; i<BINMAPSIZE; i++) {
		av->binmap[i] = 0;
	}
	av->system_mem = PAGESIZE;

	av -> prime_obj = NULL;
}
示例#3
0
/* ------------------------- __malloc_consolidate -------------------------

  __malloc_consolidate is a specialized version of free() that tears
  down chunks held in fastbins.  Free itself cannot be used for this
  purpose since, among other things, it might place chunks back onto
  fastbins.  So, instead, we need to use a minor variant of the same
  code.

  Also, because this routine needs to be called the first time through
  malloc anyway, it turns out to be the perfect place to trigger
  initialization code.
*/
void attribute_hidden __malloc_consolidate(mstate av)
{
    mfastbinptr*    fb;                 /* current fastbin being consolidated */
    mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
    mchunkptr       p;                  /* current chunk being consolidated */
    mchunkptr       nextp;              /* next chunk to consolidate */
    mchunkptr       unsorted_bin;       /* bin header */
    mchunkptr       first_unsorted;     /* chunk to link to */

    /* These have same use as in free() */
    mchunkptr       nextchunk;
    size_t size;
    size_t nextsize;
    size_t prevsize;
    int             nextinuse;
    mchunkptr       bck;
    mchunkptr       fwd;

    /*
       If max_fast is 0, we know that av hasn't
       yet been initialized, in which case do so below
       */

    if (av->max_fast != 0) {
	clear_fastchunks(av);

	unsorted_bin = unsorted_chunks(av);

	/*
	   Remove each chunk from fast bin and consolidate it, placing it
	   then in unsorted bin. Among other reasons for doing this,
	   placing in unsorted bin avoids needing to calculate actual bins
	   until malloc is sure that chunks aren't immediately going to be
	   reused anyway.
	   */

	maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
	fb = &(av->fastbins[0]);
	do {
	    if ( (p = *fb) != 0) {
		*fb = 0;

		do {
		    check_inuse_chunk(p);
		    nextp = p->fd;

		    /* Slightly streamlined version of consolidation code in free() */
		    size = p->size & ~PREV_INUSE;
		    nextchunk = chunk_at_offset(p, size);
		    nextsize = chunksize(nextchunk);

		    if (!prev_inuse(p)) {
			prevsize = p->prev_size;
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));
			unlink(p, bck, fwd);
		    }

		    if (nextchunk != av->top) {
			nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
			set_head(nextchunk, nextsize);

			if (!nextinuse) {
			    size += nextsize;
			    unlink(nextchunk, bck, fwd);
			}

			first_unsorted = unsorted_bin->fd;
			unsorted_bin->fd = p;
			first_unsorted->bk = p;

			set_head(p, size | PREV_INUSE);
			p->bk = unsorted_bin;
			p->fd = first_unsorted;
			set_foot(p, size);
		    }

		    else {
			size += nextsize;
			set_head(p, size | PREV_INUSE);
			av->top = p;
		    }

		} while ( (p = nextp) != 0);

	    }
	} while (fb++ != maxfb);
    }
    else {
	malloc_init_state(av);
	check_malloc_state();
    }
}
示例#4
0
文件: hooks.c 项目: AdvancedC/glibc
int
__malloc_set_state(void* msptr)
{
  struct malloc_save_state* ms = (struct malloc_save_state*)msptr;
  size_t i;
  mbinptr b;

  disallow_malloc_check = 1;
  ptmalloc_init();
  if(ms->magic != MALLOC_STATE_MAGIC) return -1;
  /* Must fail if the major version is too high. */
  if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2;
  (void)mutex_lock(&main_arena.mutex);
  /* There are no fastchunks.  */
  clear_fastchunks(&main_arena);
  if (ms->version >= 4)
    set_max_fast(ms->max_fast);
  else
    set_max_fast(64);	/* 64 used to be the value we always used.  */
  for (i=0; i<NFASTBINS; ++i)
    fastbin (&main_arena, i) = 0;
  for (i=0; i<BINMAPSIZE; ++i)
    main_arena.binmap[i] = 0;
  top(&main_arena) = ms->av[2];
  main_arena.last_remainder = 0;
  for(i=1; i<NBINS; i++) {
    b = bin_at(&main_arena, i);
    if(ms->av[2*i+2] == 0) {
      assert(ms->av[2*i+3] == 0);
      first(b) = last(b) = b;
    } else {
      if(ms->version >= 3 &&
	 (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i &&
			   largebin_index(chunksize(ms->av[2*i+3]))==i))) {
	first(b) = ms->av[2*i+2];
	last(b) = ms->av[2*i+3];
	/* Make sure the links to the bins within the heap are correct.  */
	first(b)->bk = b;
	last(b)->fd = b;
	/* Set bit in binblocks.  */
	mark_bin(&main_arena, i);
      } else {
	/* Oops, index computation from chunksize must have changed.
	   Link the whole list into unsorted_chunks.  */
	first(b) = last(b) = b;
	b = unsorted_chunks(&main_arena);
	ms->av[2*i+2]->bk = b;
	ms->av[2*i+3]->fd = b->fd;
	b->fd->bk = ms->av[2*i+3];
	b->fd = ms->av[2*i+2];
      }
    }
  }
  if (ms->version < 3) {
    /* Clear fd_nextsize and bk_nextsize fields.  */
    b = unsorted_chunks(&main_arena)->fd;
    while (b != unsorted_chunks(&main_arena)) {
      if (!in_smallbin_range(chunksize(b))) {
	b->fd_nextsize = NULL;
	b->bk_nextsize = NULL;
      }
      b = b->fd;
    }
  }
  mp_.sbrk_base = ms->sbrk_base;
  main_arena.system_mem = ms->sbrked_mem_bytes;
  mp_.trim_threshold = ms->trim_threshold;
  mp_.top_pad = ms->top_pad;
  mp_.n_mmaps_max = ms->n_mmaps_max;
  mp_.mmap_threshold = ms->mmap_threshold;
  check_action = ms->check_action;
  main_arena.max_system_mem = ms->max_sbrked_mem;
  mp_.n_mmaps = ms->n_mmaps;
  mp_.max_n_mmaps = ms->max_n_mmaps;
  mp_.mmapped_mem = ms->mmapped_mem;
  mp_.max_mmapped_mem = ms->max_mmapped_mem;
  /* add version-dependent code here */
  if (ms->version >= 1) {
    /* Check whether it is safe to enable malloc checking, or whether
       it is necessary to disable it.  */
    if (ms->using_malloc_checking && !using_malloc_checking &&
	!disallow_malloc_check)
      __malloc_check_init ();
    else if (!ms->using_malloc_checking && using_malloc_checking) {
      __malloc_hook = NULL;
      __free_hook = NULL;
      __realloc_hook = NULL;
      __memalign_hook = NULL;
      using_malloc_checking = 0;
    }
  }
  if (ms->version >= 4) {
#ifdef PER_THREAD
    mp_.arena_test = ms->arena_test;
    mp_.arena_max = ms->arena_max;
    narenas = ms->narenas;
#endif
  }
  check_malloc_state(&main_arena);

  (void)mutex_unlock(&main_arena.mutex);
  return 0;
}
示例#5
0
/* ------------------------- __malloc_consolidate -------------------------

  __malloc_consolidate is a specialized version of free() that tears
  down chunks held in fastbins.  Free itself cannot be used for this
  purpose since, among other things, it might place chunks back onto
  fastbins.  So, instead, we need to use a minor variant of the same
  code.

  Also, because this routine needs to be called the first time through
  malloc anyway, it turns out to be the perfect place to trigger
  initialization code.
*/
void attribute_hidden __malloc_consolidate(mstate av)
{
    mfastbinptr*    fb;                 /* current fastbin being consolidated */
    mfastbinptr*    maxfb;              /* last fastbin (for loop control) */
    mchunkptr       p;                  /* current chunk being consolidated */
    mchunkptr       nextp;              /* next chunk to consolidate */
    mchunkptr       unsorted_bin;       /* bin header */
    mchunkptr       first_unsorted;     /* chunk to link to */
    ustate	    unit;		/*  */

    /* These have same use as in free() */
    mchunkptr       nextchunk;
    size_t size;
    size_t nextsize;
    size_t prevsize;
    int             nextinuse;
    mchunkptr       bck;
    mchunkptr       fwd;

    /*
       If max_fast is 0, we know that av hasn't
       yet been initialized, in which case do so below
       */

    if (av->max_fast != 0) {
	clear_fastchunks(av);

	unsorted_bin = unsorted_chunks(av);

	/*
	   Remove each chunk from fast bin and consolidate it, placing it
	   then in unsorted bin. Among other reasons for doing this,
	   placing in unsorted bin avoids needing to calculate actual bins
	   until malloc is sure that chunks aren't immediately going to be
	   reused anyway.
	   */

	maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
	fb = &(av->fastbins[0]);
	do {
	    if ( (p = *fb) != 0) {
		*fb = 0;

		do {
		    check_inuse_chunk(p);
		    nextp = p->fd;

		    /* Slightly streamlined version of consolidation code in free() */
		    size = p->size & ~PREV_INUSE;
		    nextchunk = chunk_at_offset(p, size);
		    nextsize = chunksize(nextchunk);

		    if (!prev_inuse(p)) {
			prevsize = p->prev_size;
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));
			unlink(p, bck, fwd);
		    }
		    
		    unit = lookup_ustate_by_mem((void*)p);
		    if (nextchunk != unit->unit_top) {
			nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
			set_head(nextchunk, nextsize);

			if (!nextinuse) {
			    size += nextsize;
			    unlink(nextchunk, bck, fwd);
			}

			first_unsorted = unsorted_bin->fd;
			unsorted_bin->fd = p;
			first_unsorted->bk = p;

			set_head(p, size | PREV_INUSE);
			p->bk = unsorted_bin;
			p->fd = first_unsorted;
			set_foot(p, size);
		    }

		    else {
			size += nextsize;
			set_head(p, size | PREV_INUSE);
			unit->unit_top = p;
		    }

		} while ( (p = nextp) != 0);

	    }
	} while (fb++ != maxfb);
    }
    else {
    	if (get_abstate()->mstate_list.num == 0) {
    		//initialize abheap state
    		init_linked_list(&(get_abstate()->mstate_list));
		init_linked_list(&(get_abstate()->ustate_list));
		init_linked_list(&(get_abstate()->mmapped_ustate_list));
		get_abstate()->ab_top = (mchunkptr)(CHANNEL_ADDR);
		//allocate channel heap space
		mmap((void *) CHANNEL_ADDR, CHANNEL_SIZE, PROT_READ|PROT_WRITE, 
			MAP_ANONYMOUS|MAP_FIXED|MAP_SHARED, -1, 0);	
		touch_mem((void *)CHANNEL_ADDR, CHANNEL_SIZE);
    	}
	malloc_init_state(av);
	check_malloc_state();
    }
}