Пример #1
0
static void
dump_heap (heap_info *heap)
{
  char *ptr;
  mchunkptr p;

  fprintf (stderr, "Heap %p, size %10lx:\n", heap, (long) heap->size);
  ptr = (heap->ar_ptr != (mstate) (heap + 1)) ?
        (char *) (heap + 1) : (char *) (heap + 1) + sizeof (struct malloc_state);
  p = (mchunkptr) (((unsigned long) ptr + MALLOC_ALIGN_MASK) &
                   ~MALLOC_ALIGN_MASK);
  for (;; )
    {
      fprintf (stderr, "chunk %p size %10lx", p, (long) p->size);
      if (p == top (heap->ar_ptr))
        {
          fprintf (stderr, " (top)\n");
          break;
        }
      else if (p->size == (0 | PREV_INUSE))
        {
          fprintf (stderr, " (fence)\n");
          break;
        }
      fprintf (stderr, "\n");
      p = next_chunk (p);
    }
}
Пример #2
0
void MemAllocFree(MemAlloc *ma, void *addr)
{
  MemAllocChunk *c;
  assert(ma != NULL);
  if (addr == NULL) {
    return;
  }
  c = (MemAllocChunk *)(((int8_t *) addr) - sizeof (MemAllocChunk));
  c->id = MEMALLOC_CHUNK_ID_FREE;
  if (c->next != 0) {
    MemAllocChunk *nc = next_chunk(c);
    if (nc->id == MEMALLOC_CHUNK_ID_FREE) {
      c->size += sizeof (MemAllocChunk) + nc->size;
      c->next = nc->next;
    }
  }
  if (c->prev != 0) {
    MemAllocChunk *pc = prev_chunk(c);
    if (pc->id == MEMALLOC_CHUNK_ID_FREE) {
      pc->size += sizeof (MemAllocChunk) + c->size;
      pc->next = c->next;
    }
  }
#ifdef ENABLE_PRINT_MAP
  print_map(ma);
#endif
}
Пример #3
0
/**
 * Verify if the next chunk really is MTrk chunk, and if so, initialize some track variables and return 0.
 * Return different value otherwise.
 */
static int
parse_mtrk_header(smf_track_t *track)
{
	struct chunk_header_struct *mtrk;

	/* Make sure compiler didn't do anything stupid. */
	assert(sizeof(struct chunk_header_struct) == 8);
	assert(track->smf != NULL);

	mtrk = next_chunk(track->smf);

	if (mtrk == NULL)
		return (-1);

	if (!chunk_signature_matches(mtrk, "MTrk")) {
		g_warning("SMF warning: Expected MTrk signature, got %c%c%c%c instead; ignoring this chunk.",
				mtrk->id[0], mtrk->id[1], mtrk->id[2], mtrk->id[3]);
		
		return (-2);
	}

	track->file_buffer = mtrk;
	track->file_buffer_length = sizeof(struct chunk_header_struct) + ntohl(mtrk->length);
	track->next_event_offset = sizeof(struct chunk_header_struct);

	return (0);
}
Пример #4
0
void *MemAllocAllocate(MemAlloc *ma, size_t size)
{
  size_t sz;
  assert(ma != NULL);
  sz = (size + (MEMALLOC_ALIGN_SIZE - 1)) / MEMALLOC_ALIGN_SIZE * MEMALLOC_ALIGN_SIZE;
  MemAllocChunk *c = ma->top;
  while (!0) {
    if (c->id == MEMALLOC_CHUNK_ID_FREE && c->size >= sz) {
      break;
    }
    if (c->next == 0) {
      return NULL;
    }
    c = next_chunk(c);
  }
  if (c->size > sizeof (MemAllocChunk) + sz) {
    MemAllocChunk *c2 = (MemAllocChunk *)(((int8_t *) c) + sizeof (MemAllocChunk) + sz);
    c2->id = MEMALLOC_CHUNK_ID_FREE;
    c2->size = c->size - sizeof (MemAllocChunk) - sz;
    c2->prev = sizeof (MemAllocChunk) + sz;
    c2->next = c->next;
    c->next = c2->prev;
  }
  c->id = MEMALLOC_CHUNK_ID_USED;
  c->size = sz;
#ifdef ENABLE_PRINT_MAP
  print_map(ma);
#endif
  return ((int8_t *) c) + sizeof (MemAllocChunk);
}
Пример #5
0
// MDSM entry point
int main(int argc, char *argv[])
{
    // Create main QCoreApplication instance
    QCoreApplication app(argc, argv);
    SURVEY* survey = file_process_arguments(argc, argv);

    // Initialise Dedispersion code and survey struct
    initialise(survey);

    unsigned char *input_buffer;

    // Process current chunk
    unsigned int counter = 0, data_read = 0;
    while (TRUE) 
    {
        // Get current buffer pointer
        input_buffer = get_buffer_pointer();

		// Read data from file
//		data_read = readBinaryData(input_buffer, survey -> fp, survey -> nbits, 
//                                   survey -> nsamp, survey -> nchans, survey -> nbeams);

        // Check if there is more processing to be done
		unsigned int x;
        next_chunk(survey -> nsamp, x, survey -> tsamp * survey -> nsamp * counter, survey -> tsamp);
		if (!start_processing(survey -> nsamp))  break;
       
        counter++;
    }

    // Tear down MDSM
    tearDown();
}
Пример #6
0
static mchunkptr
internal_function
mem2chunk_check(void* mem, unsigned char **magic_p)
{
  mchunkptr p;
  INTERNAL_SIZE_T sz, c;
  unsigned char magic;

  if(!aligned_OK(mem)) return NULL;
  p = mem2chunk(mem);
  if (!chunk_is_mmapped(p)) {
    /* Must be a chunk in conventional heap memory. */
    int contig = contiguous(&main_arena);
    sz = chunksize(p);
    if((contig &&
	((char*)p<mp_.sbrk_base ||
	 ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) ||
       sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) ||
       ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK ||
			    (contig && (char*)prev_chunk(p)<mp_.sbrk_base) ||
			    next_chunk(prev_chunk(p))!=p) ))
      return NULL;
    magic = MAGICBYTE(p);
    for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
    }
  } else {
    unsigned long offset, page_mask = GLRO(dl_pagesize)-1;

    /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two
       alignment relative to the beginning of a page.  Check this
       first. */
    offset = (unsigned long)mem & page_mask;
    if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 &&
	offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 &&
	offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 &&
	offset<0x2000) ||
       !chunk_is_mmapped(p) || (p->size & PREV_INUSE) ||
       ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) ||
       ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) )
      return NULL;
    magic = MAGICBYTE(p);
    for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) {
      if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL;
    }
  }
  ((unsigned char*)p)[sz] ^= 0xFF;
  if (magic_p)
    *magic_p = (unsigned char *)p + sz;
  return p;
}
Пример #7
0
static void print_map(MemAlloc *ma)
{
  MemAllocChunk *c;
  assert(ma != NULL);
  c = ma->top;
  printf("Address          ID   Size       Prev       Next\n");
  printf("================ ==== ========== ========== ==========\n");
  while (!0) {
    printf("%16p %s %10u %10u %10u\n", c, c->id == MEMALLOC_CHUNK_ID_FREE ? "FREE" : "USED", c->size, c->prev, c->next);
    if (c->next == 0) {
      break;
    }
    c = next_chunk(c);
  }
  printf("[END]\n");
}
Пример #8
0
size_t MemAllocGetMaxFreeSize(MemAlloc *ma)
{
  size_t max_size;
  MemAllocChunk *c;
  assert(ma != NULL);
  c = ma->top;
  max_size = 0;
  while (!0) {
    if (c->id == MEMALLOC_CHUNK_ID_FREE && c->size > max_size) {
      max_size = c->size;
    }
    if (c->next == 0) {
      break;
    }
    c = next_chunk(c);
  }
  return max_size;
}
Пример #9
0
void pos_check_unsafe_segment(char *name, mstate av, struct seg_info *head, Void_t *first_chunk)
{
    mchunkptr chunk_ptr;

    chunk_ptr = (mchunkptr)first_chunk;
    while(1)
    {
        //inuse Chunk
	if(inuse(chunk_ptr)){
	    printf("Node Address : %p" , chunk2mem(chunk_ptr));
	    pos_check_unsafe_region(name, av, head, chunk_ptr);
	    printf("\n");
	}
	if(chunk_is_last(chunk_ptr))
	    break;

	chunk_ptr = next_chunk(chunk_ptr);		
    }
}
Пример #10
0
/**
 * Verifies if MThd header looks OK.  Returns 0 iff it does.
 */
static int
parse_mthd_header(smf_t *smf)
{
	int len;
	struct chunk_header_struct *mthd, *tmp_mthd;

	/* Make sure compiler didn't do anything stupid. */
	assert(sizeof(struct chunk_header_struct) == 8);

	/*
	 * We could just do "mthd = smf->file_buffer;" here, but this way we wouldn't
	 * get useful error messages.
	 */
	if (smf->file_buffer_length < 6) {
		g_critical("SMF error: file is too short, it cannot be a MIDI file.");

		return (-1);
	}

	tmp_mthd = (struct chunk_header_struct*)smf->file_buffer;

	if (!chunk_signature_matches(tmp_mthd, "MThd")) {
		g_critical("SMF error: MThd signature not found, is that a MIDI file?");
		
		return (-2);
	}

	/* Ok, now use next_chunk(). */
	mthd = next_chunk(smf);
	if (mthd == NULL)
		return (-3);

	assert(mthd == tmp_mthd);

	len = ntohl(mthd->length);
	if (len != 6) {
		g_critical("SMF error: MThd chunk length %d, must be 6.", len);

		return (-4);
	}

	return (0);
}
Пример #11
0
void* realloc(void* mem, size_t       bytes)
{
  if (mem == 0) 
    return malloc(bytes);
  else
  {
    size_t       nb      = request2size(bytes);
    mchunkptr    p       = mem2chunk(mem);
    size_t       oldsize = p->size;
    int          room;
    mchunkptr    nxt;

    UPDATE_STATS((++n_reallocs, requested_mem += bytes-oldsize));
    
    /* try to expand (even if already big enough), to clean up chunk */

    while (!inuse(nxt = next_chunk(p)))
    {
      UPDATE_STATS ((malloced_mem += nxt->size, ++n_consol));
      unlink(nxt);
      set_size(p, p->size + nxt->size);
    }

    room = p->size - nb;
    if (room >= 0)
    {
      split(p, nb);
      UPDATE_STATS(malloced_mem -= room);
      return chunk2mem(p);
    }
    else /* do the obvious */
    {
      void* newmem;
      set_inuse(p);    /* don't let malloc consolidate us yet! */
      newmem = malloc(nb);
      bcopy(mem, newmem, oldsize - SIZE_SZ);
      free(mem);
      UPDATE_STATS(++n_reallocs_with_copy);
      return newmem;
    }
  }
}
Пример #12
0
//static void 
void
pos_int_free(char *name, mstate av, mchunkptr p, int flag)
{
	INTERNAL_SIZE_T size;
	mfastbinptr* fb;
	mchunkptr prevchunk;
	INTERNAL_SIZE_T prevsize;
	mchunkptr nextchunk;
	INTERNAL_SIZE_T nextsize;
	int nextinuse;
	mchunkptr bck;
	mchunkptr fwd;

	//const char *errstr = NULL;


	size = chunksize(p);
	/*if ((uintptr_t) p > (uintptr_t) -size || misaligned_chunk (p)) {
		errstr = "free(): invalid pointer";
errout:
		//malloc_printerr (check_action, errstr, chunk2mem(p));
		return;
	}*/
	/*if (size < MINSIZE) {
		errstr = "free(): invalid size";
		goto errout;
	}*/

	//check_inuse_chunk(av, p);


	// fastbin
	if (flag==1 && (unsigned long)(size) <= (unsigned long)(get_max_fast ())) {
		/*if (chunk_at_offset (p, size)->size <= 2 * SIZE_SZ
		   || chunksize (chunk_at_offset (p, size)) >= av->system_mem) {
			errstr = "free(): invalid next size (fast)";
			goto errout;
		}*/

#if CONSISTENCY == 1
		set_fastchunks_log(name, av);
#else
		set_fastchunks(av);
#endif
		fb = &fastbin(av, fastbin_index(size));

		if (*fb == p) {
			//errstr = "double free or corruption (fasttop)";
			//goto errout;
			return ;
		}

#if CONSISTENCY == 1
		POS_WRITE_VAUE(name, (unsigned long *)&p->fd, (unsigned long)*fb);
		POS_WRITE_VAUE(name, (unsigned long *)fb, (unsigned long)p);
#else
		p->fd = *fb;
		*fb = p;
#endif

		return ;
	}

	// 1. First chunk
	if (chunk_is_first(p)) {

		nextchunk = next_chunk(p);
		nextsize = chunksize(nextchunk);

		// 1-1. (free F), free L
		if (chunk_is_last(nextchunk) && !inuse(nextchunk)) {

			//if (av < p && p < (char *)(av+PAGESIZE)){
			if ((char*)av+sizeof(struct malloc_state) == (char*)p) {
#if CONSISTENCY == 1
				insert_to_unsorted_log(name, av, p, bck, fwd, size);

				set_foot_log(name, p, size);
				clear_inuse_bit_at_offset_log(name, p, size);
#else
				insert_to_unsorted(av, p, bck, fwd, size);

				set_foot(p, size);
				clear_inuse_bit_at_offset(p, size);
#endif

				goto out;
			}
			else {
#if CONSISTENCY == 1
				unlink_log(name, nextchunk, bck, fwd);
				size = size + nextsize + 2*SIZE_SZ;

				pos_log_insert_malloc_free(name, (unsigned long)p, size);
				//pos_seg_free(name, (void *)p, size); // Delayed pos_seg_free
				POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
				unlink(nextchunk, bck, fwd);
				size = size + nextsize + 2*SIZE_SZ;
				/*if (size%PAGESIZE != 0) {
					errstr = "free(): unmmap size is not page size";
					goto errout;
				}*/
				//FREE((char*)p, size);
				pos_seg_free(name, (void *)p, size);
				av->system_mem -= size;
#endif

				goto out;
			}

		}

		// 1-3. (free F), free M
		else if (!inuse(nextchunk)) {
#if CONSISTENCY == 1
			unlink_log(name, nextchunk, bck, fwd);
			size += nextsize;

			insert_to_unsorted_log(name, av, p, bck, fwd, size);

			set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
			set_foot_log(name, p, size);
#else
			unlink(nextchunk, bck, fwd);
			size += nextsize;

			insert_to_unsorted(av, p, bck, fwd, size);

			set_head(p, size | FIRST_CHUNK | PREV_INUSE);
			set_foot(p, size);
#endif

			goto out;
		}

		// 1-2. (free F), inuse L & 1-4. (free F), inuse M
		else {
#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, p, bck, fwd, size);

			set_foot_log(name, p, size);
			clear_inuse_bit_at_offset_log(name, p, size);
#else
			insert_to_unsorted(av, p, bck, fwd, size);

			set_foot(p, size);
			clear_inuse_bit_at_offset(p, size);
#endif

			goto out;
		}

	}

	// 2. Last chunk
	else if (chunk_is_last(p)) {

		if (!prev_inuse(p)) {
			prevchunk = prev_chunk(p);
			prevsize = chunksize(prevchunk);

			// 2-1. free F, (free L)
			if (chunk_is_first(prevchunk)) {

				//if (av < prevchunk && prevchunk < av+PAGESIZE){
				if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) {
#if CONSISTENCY == 1
					insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
					set_foot_log(name, p, size);
					clear_inuse_bit_at_offset_log(name, p, size);
#else
					insert_to_unsorted(av, p, bck, fwd, size);
  
					set_foot(p, size);
					clear_inuse_bit_at_offset(p, size);
#endif

					goto out;
				}
				else {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					size = prevsize+size+2*SIZE_SZ;
					//pos_seg_free(name, (void *)p, size);
					pos_log_insert_malloc_free(name, (unsigned long)p, size);
					POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
					unlink(prevchunk, bck, fwd);
					size = prevsize+size+2*SIZE_SZ;
					/*if (size%PAGESIZE != 0) {
						errstr = "free(): unmmap size is not page size";
						goto errout;
					}*/
					//FREE((char*)p, size);
					pos_seg_free(name, (void *)p, size);
					av->system_mem -= size;
#endif

					goto out;
				}

			}

			// 2-3. free M, (free L)
			else {
#if CONSISTENCY == 1
				unlink_log(name, prevchunk, bck, fwd);
				size += prevsize;
				p = chunk_at_offset(p, -((long) prevsize));
  
				insert_to_unsorted_log(name, av, p, bck, fwd, size);

				set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE);
				set_foot_log(name, p, size);
				clear_inuse_bit_at_offset_log(name, p, size);
#else
				unlink(prevchunk, bck, fwd);
				size += prevsize;
				p = chunk_at_offset(p, -((long) prevsize));
  
				insert_to_unsorted(av, p, bck, fwd, size);

				set_head(p, size | LAST_CHUNK | PREV_INUSE);
				set_foot(p, size);
				clear_inuse_bit_at_offset(p, size);
#endif
				goto out;
			}

		}

		// 2-2. inuse F, (free L) & 2-4. inuse M, (free L)
		else {
#if CONSISTENCY == 1
			insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
			set_foot_log(name, p, size);
			clear_inuse_bit_at_offset_log(name, p, size);
#else
			insert_to_unsorted(av, p, bck, fwd, size);
  
			set_foot(p, size);
			clear_inuse_bit_at_offset(p, size);
#endif

			goto out;
		}

	}

	// 3. Middle chunk
	else {

		nextchunk = next_chunk(p);
		nextsize = chunksize(nextchunk);

		if (!prev_inuse(p)) {
			prevchunk = prev_chunk(p);
			prevsize = chunksize(prevchunk);

			// 3-1. free F, (free M), free L
			if (chunk_is_first(prevchunk) && chunk_is_last(nextchunk) && !inuse(nextchunk) ) {
	
				//if (av < prevchunk && prevchunk < av+PAGESIZE){
				if((char*)av+sizeof(struct malloc_state) == (char*)prevchunk) {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					size += prevsize;
					p = chunk_at_offset(p, -((long) prevsize));

					insert_to_unsorted_log(name, av, p, bck, fwd, size);
  
					set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
					set_foot_log(name, p, size);
					clear_inuse_bit_at_offset_log(name, p, size);
#else
					unlink(prevchunk, bck, fwd);
					size += prevsize;
					p = chunk_at_offset(p, -((long) prevsize));

					insert_to_unsorted(av, p, bck, fwd, size);
  
					set_head(p, size | FIRST_CHUNK | PREV_INUSE);
					set_foot(p, size);
					clear_inuse_bit_at_offset(p, size);
  #endif
  
					goto out;
				}

				else {
#if CONSISTENCY == 1
					unlink_log(name, prevchunk, bck, fwd);
					unlink_log(name, nextchunk, bck, fwd);
					p = chunk_at_offset(p, -((long) prevsize));
					size = prevsize+size+nextsize+2*SIZE_SZ;

					pos_log_insert_malloc_free(name, (unsigned long)p, size);
					//pos_seg_free(name, (void *)p, size);
					POS_WRITE_VAUE(name, (unsigned long *)&av->system_mem, (unsigned long)(av->system_mem-size));
#else
					unlink(prevchunk, bck, fwd);
					unlink(nextchunk, bck, fwd);
					p = chunk_at_offset(p, -((long) prevsize));
					size = prevsize+size+nextsize+2*SIZE_SZ;
					/*if (size%PAGESIZE != 0) {
						errstr = "free(): unmmap size is not page size";
						goto errout;
					}*/
					//FREE((char*)p, size);
					pos_seg_free(name, (void *)p, size);
					av->system_mem -= size;
  #endif
  
					goto out;
				}
			}

#if CONSISTENCY == 1
			unlink_log(name, prevchunk, bck, fwd);
#else
			unlink(prevchunk, bck, fwd);
#endif
			size += prevsize;
			p = chunk_at_offset(p, -((long) prevsize));

			if (chunk_is_first(prevchunk)) {
#if CONSISTENCY == 1
				set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
#else
				set_head(p, size | FIRST_CHUNK | PREV_INUSE);
				//set_foot(p, size);
				//clear_inuse_bit_at_offset(p, size);
#endif
			}

		}

		nextinuse = inuse_bit_at_offset(nextchunk, nextsize);

		if (!nextinuse) {
#if CONSISTENCY == 1
			unlink_log(name, nextchunk, bck, fwd);
#else
			unlink(nextchunk, bck, fwd);
#endif
			size += nextsize;
		}

#if CONSISTENCY == 1
		insert_to_unsorted_log(name, av, p, bck, fwd, size);

		if (chunk_is_first(p)) {
			set_head_log(name, p, size | FIRST_CHUNK | PREV_INUSE);
		} else if (chunk_is_last(nextchunk)&&!nextinuse) {
			set_head_log(name, p, size | LAST_CHUNK | PREV_INUSE);
		} else {
			set_head_log(name, p, size | PREV_INUSE);
		}
		set_foot_log(name, p, size);
		clear_inuse_bit_at_offset_log(name, p, size);
#else
		//else
		//clear_inuse_bit_at_offset(nextchunk, 0);

		insert_to_unsorted(av, p, bck, fwd, size);

		if (chunk_is_first(p)) {
			set_head(p, size | FIRST_CHUNK | PREV_INUSE);
		} else if (chunk_is_last(nextchunk)&&!nextinuse) {
			set_head(p, size | LAST_CHUNK | PREV_INUSE);
		} else {
			set_head(p, size | PREV_INUSE);
		}
		set_foot(p, size);
		clear_inuse_bit_at_offset(p, size);

		//check_free_chunk(av, p);
 #endif
	}

out: 
	if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD && have_fastchunks(av)) {
		pos_malloc_consolidate(name, av);
	}
}
Пример #13
0
void* malloc(size_t       bytes)
{
  size_t       nb  = request2size(bytes);  /* padded request size */
  mbinptr      b   = size2bin(nb);         /* corresponding bin */
  mchunkptr    hd  = &(b->hd);             /* head of its list */
  mchunkptr    p   = hd->fd;               /* chunk traverser */

  UPDATE_STATS((requested_mem+=bytes, ++n_malloc_bins));

  /* Try a (near) exact match in own bin */
  /* clean out unusable but consolidatable chunks in bin while traversing */

  while (p != hd)
  {
    UPDATE_STATS(++n_malloc_chunks);
    if (p->size >= nb)
      goto found;
    else    /* try to consolidate; same code as malloc_find_space */
    {
      mchunkptr nextp = p->fd;       /* save, in case of relinks */
      int consolidated = 0;          /* only unlink/relink if consolidated */
      
      mchunkptr t;

      while (!inuse(t = prev_chunk(p))) /* consolidate backward */
      {
        if (!consolidated) { consolidated = 1; unlink(p); }
        if (t == nextp) nextp = t->fd;
        unlink(t);
        set_size(t, t->size + p->size);
        p = t;
        UPDATE_STATS (++n_consol);
      }
      
      while (!inuse(t = next_chunk(p))) /* consolidate forward */
      {
        if (!consolidated) { consolidated = 1; unlink(p); }
        if (t == nextp) nextp = t->fd;
        unlink(t);
        set_size(p, p->size + t->size);
        UPDATE_STATS (++n_consol);
      }
      
      if (consolidated)
      {
        if (p->size >= nb)
        {
          /* make it safe to unlink again below */
          UPDATE_STATS(++n_avail);
          p->fd = p->bk = p;
          goto found;
        }
        else
          consollink(p);
      }

      p = nextp;

    }
  }

  b->dirty = 0; /* true if got here */

  /*  Scan bigger bins for a victim */

  while (++b <= malloc_maxbin)
  {
    UPDATE_STATS(++n_malloc_bins);
    if ((p = b->hd.bk) != &(b->hd))    /* no need to check size */
      goto found;
  }

  /* Consolidate or sbrk */

  p = malloc_find_space(nb);

  if (p == 0) return 0; /* allocation failure */

 found:   /* Use what we found */

  unlink(p);
  split(p, nb); 
  UPDATE_STATS(do_malloc_stats(p));
  return chunk2mem(p);
}
Пример #14
0
static mchunkptr malloc_find_space(size_t       nb)
{
  mbinptr b;

  /* first, re-adjust max used bin */

  while (malloc_maxbin >= FIRSTBIN && 
         malloc_maxbin->hd.bk == &(malloc_maxbin->hd))
  {
    malloc_maxbin->dirty = 0;
    --malloc_maxbin;
  }

  for (b = malloc_maxbin; b >= FIRSTBIN; --b)
  {
    UPDATE_STATS(++n_malloc_bins);

    if (b->dirty)
    {
      mchunkptr h = &(b->hd);         /* head of list */
      mchunkptr p = h->fd;            /* chunk traverser */

      while (p != h)
      {
        mchunkptr nextp = p->fd;       /* save, in case of relinks */
        int consolidated = 0;          /* only unlink/relink if consolidated */

        mchunkptr t;

        UPDATE_STATS(++n_malloc_chunks);

        while (!inuse(t = prev_chunk(p))) /* consolidate backward */
        {
          if (!consolidated) { consolidated = 1; unlink(p); }
          if (t == nextp) nextp = t->fd;
          unlink(t);
          set_size(t, t->size + p->size);
          p = t;
          UPDATE_STATS (++n_consol);
        }
        
        while (!inuse(t = next_chunk(p))) /* consolidate forward */
        {
          if (!consolidated) { consolidated = 1; unlink(p); }
          if (t == nextp) nextp = t->fd;
          unlink(t);
          set_size(p, p->size + t->size);
          UPDATE_STATS (++n_consol);
        }

       if (consolidated)
       {
          if (p->size >= nb)
          {
            /* make it safe to unlink in malloc */
            UPDATE_STATS(++n_avail);
            p->fd = p->bk = p;
            return p;
          }
          else
            consollink(p);
        }

        p = nextp;

      }

      b->dirty = 0;

    }
  }

  /* nothing available - sbrk some more */

  return malloc_from_sys(nb);
}