static mstate _int_new_arena(size_t size) { mstate a; heap_info *h; char *ptr; unsigned long misalign; h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), mp_.top_pad); if(!h) { /* Maybe size is too large to fit in a single heap. So, just try to create a minimally-sized arena and let _int_malloc() attempt to deal with the large request via mmap_chunk(). */ h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); if(!h) return 0; } a = h->ar_ptr = (mstate)(h+1); malloc_init_state(a); /*a->next = NULL;*/ a->system_mem = a->max_system_mem = h->size; arena_mem += h->size; /* Set up the top chunk, with proper alignment. */ ptr = (char *)(a + 1); misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; if (misalign > 0) ptr += MALLOC_ALIGNMENT - misalign; top(a) = (mchunkptr)ptr; set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); tsd_setspecific(arena_key, (void *)a); mutex_init(&a->mutex); (void)mutex_lock(&a->mutex); #ifdef PER_THREAD (void)mutex_lock(&list_lock); #endif /* Add the new arena to the global list. */ a->next = main_arena.next; atomic_write_barrier (); main_arena.next = a; #ifdef PER_THREAD (void)mutex_unlock(&list_lock); #endif THREAD_STAT(++(a->stat_lock_loop)); return a; }
/* ------------------------- __malloc_consolidate ------------------------- __malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code. Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */ void attribute_hidden __malloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */ /* These have same use as in free() */ mchunkptr nextchunk; size_t size; size_t nextsize; size_t prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd; /* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */ if (av->max_fast != 0) { clear_fastchunks(av); unsorted_bin = unsorted_chunks(av); /* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); fb = &(av->fastbins[0]); do { if ( (p = *fb) != 0) { *fb = 0; do { check_inuse_chunk(p); nextp = p->fd; /* Slightly streamlined version of consolidation code in free() */ size = p->size & ~PREV_INUSE; nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } if (nextchunk != av->top) { nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); if (!nextinuse) { size += nextsize; unlink(nextchunk, bck, fwd); } first_unsorted = unsorted_bin->fd; unsorted_bin->fd = p; first_unsorted->bk = p; set_head(p, size | PREV_INUSE); p->bk = unsorted_bin; p->fd = first_unsorted; set_foot(p, size); } else { size += nextsize; set_head(p, size | PREV_INUSE); av->top = p; } } while ( (p = nextp) != 0); } } while (fb++ != maxfb); } else { malloc_init_state(av); check_malloc_state(); } }
static mstate _int_new_arena (size_t size) { mstate a; heap_info *h; char *ptr; unsigned long misalign; h = new_heap (size + (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT), mp_.top_pad); if (!h) { /* Maybe size is too large to fit in a single heap. So, just try to create a minimally-sized arena and let _int_malloc() attempt to deal with the large request via mmap_chunk(). */ h = new_heap (sizeof (*h) + sizeof (*a) + MALLOC_ALIGNMENT, mp_.top_pad); if (!h) return 0; } a = h->ar_ptr = (mstate) (h + 1); malloc_init_state (a); a->attached_threads = 1; /*a->next = NULL;*/ a->system_mem = a->max_system_mem = h->size; /* Set up the top chunk, with proper alignment. */ ptr = (char *) (a + 1); misalign = (unsigned long) chunk2mem (ptr) & MALLOC_ALIGN_MASK; if (misalign > 0) ptr += MALLOC_ALIGNMENT - misalign; top (a) = (mchunkptr) ptr; set_head (top (a), (((char *) h + h->size) - ptr) | PREV_INUSE); LIBC_PROBE (memory_arena_new, 2, a, size); mstate replaced_arena = thread_arena; thread_arena = a; __libc_lock_init (a->mutex); __libc_lock_lock (list_lock); /* Add the new arena to the global list. */ a->next = main_arena.next; /* FIXME: The barrier is an attempt to synchronize with read access in reused_arena, which does not acquire list_lock while traversing the list. */ atomic_write_barrier (); main_arena.next = a; __libc_lock_unlock (list_lock); __libc_lock_lock (free_list_lock); detach_arena (replaced_arena); __libc_lock_unlock (free_list_lock); /* Lock this arena. NB: Another thread may have been attached to this arena because the arena is now accessible from the main_arena.next list and could have been picked by reused_arena. This can only happen for the last arena created (before the arena limit is reached). At this point, some arena has to be attached to two threads. We could acquire the arena lock before list_lock to make it less likely that reused_arena picks this new arena, but this could result in a deadlock with __malloc_fork_lock_parent. */ __libc_lock_lock (a->mutex); return a; }
/* ------------------------- __malloc_consolidate ------------------------- __malloc_consolidate is a specialized version of free() that tears down chunks held in fastbins. Free itself cannot be used for this purpose since, among other things, it might place chunks back onto fastbins. So, instead, we need to use a minor variant of the same code. Also, because this routine needs to be called the first time through malloc anyway, it turns out to be the perfect place to trigger initialization code. */ void attribute_hidden __malloc_consolidate(mstate av) { mfastbinptr* fb; /* current fastbin being consolidated */ mfastbinptr* maxfb; /* last fastbin (for loop control) */ mchunkptr p; /* current chunk being consolidated */ mchunkptr nextp; /* next chunk to consolidate */ mchunkptr unsorted_bin; /* bin header */ mchunkptr first_unsorted; /* chunk to link to */ ustate unit; /* */ /* These have same use as in free() */ mchunkptr nextchunk; size_t size; size_t nextsize; size_t prevsize; int nextinuse; mchunkptr bck; mchunkptr fwd; /* If max_fast is 0, we know that av hasn't yet been initialized, in which case do so below */ if (av->max_fast != 0) { clear_fastchunks(av); unsorted_bin = unsorted_chunks(av); /* Remove each chunk from fast bin and consolidate it, placing it then in unsorted bin. Among other reasons for doing this, placing in unsorted bin avoids needing to calculate actual bins until malloc is sure that chunks aren't immediately going to be reused anyway. */ maxfb = &(av->fastbins[fastbin_index(av->max_fast)]); fb = &(av->fastbins[0]); do { if ( (p = *fb) != 0) { *fb = 0; do { check_inuse_chunk(p); nextp = p->fd; /* Slightly streamlined version of consolidation code in free() */ size = p->size & ~PREV_INUSE; nextchunk = chunk_at_offset(p, size); nextsize = chunksize(nextchunk); if (!prev_inuse(p)) { prevsize = p->prev_size; size += prevsize; p = chunk_at_offset(p, -((long) prevsize)); unlink(p, bck, fwd); } unit = lookup_ustate_by_mem((void*)p); if (nextchunk != unit->unit_top) { nextinuse = inuse_bit_at_offset(nextchunk, nextsize); set_head(nextchunk, nextsize); if (!nextinuse) { size += nextsize; unlink(nextchunk, bck, fwd); } first_unsorted = unsorted_bin->fd; unsorted_bin->fd = p; first_unsorted->bk = p; set_head(p, size | PREV_INUSE); p->bk = unsorted_bin; p->fd = first_unsorted; set_foot(p, size); } else { size += nextsize; set_head(p, size | PREV_INUSE); unit->unit_top = p; } } while ( (p = nextp) != 0); } } while (fb++ != maxfb); } else { if (get_abstate()->mstate_list.num == 0) { //initialize abheap state init_linked_list(&(get_abstate()->mstate_list)); init_linked_list(&(get_abstate()->ustate_list)); init_linked_list(&(get_abstate()->mmapped_ustate_list)); get_abstate()->ab_top = (mchunkptr)(CHANNEL_ADDR); //allocate channel heap space mmap((void *) CHANNEL_ADDR, CHANNEL_SIZE, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_FIXED|MAP_SHARED, -1, 0); touch_mem((void *)CHANNEL_ADDR, CHANNEL_SIZE); } malloc_init_state(av); check_malloc_state(); } }
static void ptmalloc_init (void) { if (__malloc_initialized >= 0) return; __malloc_initialized = 0; #ifdef SHARED /* In case this libc copy is in a non-default namespace, never use brk. Likewise if dlopened from statically linked program. */ Dl_info di; struct link_map *l; if (_dl_open_hook != NULL || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 && l->l_ns != LM_ID_BASE)) __morecore = __failing_morecore; #endif thread_arena = &main_arena; malloc_init_state (&main_arena); #if HAVE_TUNABLES TUNABLE_GET (check, int32_t, TUNABLE_CALLBACK (set_mallopt_check)); TUNABLE_GET (top_pad, size_t, TUNABLE_CALLBACK (set_top_pad)); TUNABLE_GET (perturb, int32_t, TUNABLE_CALLBACK (set_perturb_byte)); TUNABLE_GET (mmap_threshold, size_t, TUNABLE_CALLBACK (set_mmap_threshold)); TUNABLE_GET (trim_threshold, size_t, TUNABLE_CALLBACK (set_trim_threshold)); TUNABLE_GET (mmap_max, int32_t, TUNABLE_CALLBACK (set_mmaps_max)); TUNABLE_GET (arena_max, size_t, TUNABLE_CALLBACK (set_arena_max)); TUNABLE_GET (arena_test, size_t, TUNABLE_CALLBACK (set_arena_test)); # if USE_TCACHE TUNABLE_GET (tcache_max, size_t, TUNABLE_CALLBACK (set_tcache_max)); TUNABLE_GET (tcache_count, size_t, TUNABLE_CALLBACK (set_tcache_count)); TUNABLE_GET (tcache_unsorted_limit, size_t, TUNABLE_CALLBACK (set_tcache_unsorted_limit)); # endif #else const char *s = NULL; if (__glibc_likely (_environ != NULL)) { char **runp = _environ; char *envline; while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { size_t len = strcspn (envline, "="); if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue; switch (len) { case 6: if (memcmp (envline, "CHECK_", 6) == 0) s = &envline[7]; break; case 8: if (!__builtin_expect (__libc_enable_secure, 0)) { if (memcmp (envline, "TOP_PAD_", 8) == 0) __libc_mallopt (M_TOP_PAD, atoi (&envline[9])); else if (memcmp (envline, "PERTURB_", 8) == 0) __libc_mallopt (M_PERTURB, atoi (&envline[9])); } break; case 9: if (!__builtin_expect (__libc_enable_secure, 0)) { if (memcmp (envline, "MMAP_MAX_", 9) == 0) __libc_mallopt (M_MMAP_MAX, atoi (&envline[10])); else if (memcmp (envline, "ARENA_MAX", 9) == 0) __libc_mallopt (M_ARENA_MAX, atoi (&envline[10])); } break; case 10: if (!__builtin_expect (__libc_enable_secure, 0)) { if (memcmp (envline, "ARENA_TEST", 10) == 0) __libc_mallopt (M_ARENA_TEST, atoi (&envline[11])); } break; case 15: if (!__builtin_expect (__libc_enable_secure, 0)) { if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16])); else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16])); } break; default: break; } } } if (s && s[0] != '\0' && s[0] != '0') __malloc_check_init (); #endif #if HAVE_MALLOC_INIT_HOOK void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook); if (hook != NULL) (*hook)(); #endif __malloc_initialized = 1; }