static void ptmalloc_lock_all (void) { mstate ar_ptr; if(__malloc_initialized < 1) return; if (mutex_trylock(&list_lock)) { Void_t *my_arena; tsd_getspecific(arena_key, my_arena); if (my_arena == ATFORK_ARENA_PTR) /* This is the same thread which already locks the global list. Just bump the counter. */ goto out; /* This thread has to wait its turn. */ (void)mutex_lock(&list_lock); } for(ar_ptr = &main_arena;;) { (void)mutex_lock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } save_malloc_hook = __malloc_hook; save_free_hook = __free_hook; __malloc_hook = malloc_atfork; __free_hook = free_atfork; /* Only the current thread may perform malloc/free calls now. */ tsd_getspecific(arena_key, save_arena); tsd_setspecific(arena_key, ATFORK_ARENA_PTR); out: ++atfork_recursive_cntr; }
static void ptmalloc_unlock_all2 (void) { mstate ar_ptr; if (__malloc_initialized < 1) return; tsd_setspecific (arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; free_list = NULL; for (ar_ptr = &main_arena;; ) { mutex_init (&ar_ptr->mutex); if (ar_ptr != save_arena) { ar_ptr->next_free = free_list; free_list = ar_ptr; } ar_ptr = ar_ptr->next; if (ar_ptr == &main_arena) break; } mutex_init (&list_lock); atfork_recursive_cntr = 0; }
/* Lock and return an arena that can be reused for memory allocation. Avoid AVOID_ARENA as we have already failed to allocate memory in it and it is currently locked. */ static mstate reused_arena (mstate avoid_arena) { mstate result; static mstate next_to_use; if (next_to_use == NULL) next_to_use = &main_arena; result = next_to_use; do { if (!mutex_trylock (&result->mutex)) goto out; result = result->next; } while (result != next_to_use); /* Avoid AVOID_ARENA as we have already failed to allocate memory in that arena and it is currently locked. */ if (result == avoid_arena) result = result->next; /* No arena available. Wait for the next in line. */ /* LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); */ (void) mutex_lock (&result->mutex); out: /* LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); */ tsd_setspecific (arena_key, (void *) result); next_to_use = result->next; return result; }
static mstate reused_arena (void) { mstate result; static mstate next_to_use; if (next_to_use == NULL) next_to_use = &main_arena; result = next_to_use; do { if (!mutex_trylock(&result->mutex)) goto out; result = result->next; } while (result != next_to_use); /* No arena available. Wait for the next in line. */ (void)mutex_lock(&result->mutex); out: tsd_setspecific(arena_key, (void *)result); THREAD_STAT(++(result->stat_lock_loop)); next_to_use = result->next; return result; }
static void ptmalloc_unlock_all2 (void) { mstate ar_ptr; if(__malloc_initialized < 1) return; #if defined _LIBC || defined MALLOC_HOOKS tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; #endif #ifdef PER_THREAD free_list = NULL; #endif for(ar_ptr = &main_arena;;) { mutex_init(&ar_ptr->mutex); #ifdef PER_THREAD if (ar_ptr != save_arena) { ar_ptr->next_free = free_list; free_list = ar_ptr; } #endif ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } mutex_init(&list_lock); atfork_recursive_cntr = 0; }
void* public_rEALLOc(void* oldmem, size_t bytes) { struct malloc_arena* ar_ptr; mchunkptr oldp; /* chunk corresponding to oldmem */ void* newp; /* chunk to return */ void * (*hook) (void *, size_t, const void *) = __realloc_hook; if (hook != NULL) return (*hook)(oldmem, bytes, RETURN_ADDRESS (0)); #if REALLOC_ZERO_BYTES_FREES if (bytes == 0 && oldmem != NULL) { public_fREe(oldmem); return 0; } #endif /* realloc of null is supposed to be same as malloc */ if (oldmem == 0) return public_mALLOc(bytes); oldp = mem2chunk(oldmem); if (is_mmapped(oldp)) ar_ptr = arena_for_mmap_chunk(oldp); /* FIXME: use mmap_resize */ else ar_ptr = arena_for_chunk(oldp); #if THREAD_STATS if(!mutex_trylock(&ar_ptr->mutex)) ++(ar_ptr->stat_lock_direct); else { (void)mutex_lock(&ar_ptr->mutex); ++(ar_ptr->stat_lock_wait); } #else (void)mutex_lock(&ar_ptr->mutex); #endif #ifndef NO_THREADS /* As in malloc(), remember this arena for the next allocation. */ tsd_setspecific(arena_key, (void *)ar_ptr); #endif if (ar_ptr != &main_arena) bytes += FOOTER_OVERHEAD; newp = mspace_realloc(arena_to_mspace(ar_ptr), oldmem, bytes); if (newp && ar_ptr != &main_arena) set_non_main_arena(newp, ar_ptr); (void)mutex_unlock(&ar_ptr->mutex); assert(!newp || is_mmapped(mem2chunk(newp)) || ar_ptr == arena_for_chunk(mem2chunk(newp))); return newp; }
static mstate _int_new_arena(size_t size) { mstate a; heap_info *h; char *ptr; unsigned long misalign; h = new_heap(size + (sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT), mp_.top_pad); if(!h) { /* Maybe size is too large to fit in a single heap. So, just try to create a minimally-sized arena and let _int_malloc() attempt to deal with the large request via mmap_chunk(). */ h = new_heap(sizeof(*h) + sizeof(*a) + MALLOC_ALIGNMENT, mp_.top_pad); if(!h) return 0; } a = h->ar_ptr = (mstate)(h+1); malloc_init_state(a); /*a->next = NULL;*/ a->system_mem = a->max_system_mem = h->size; arena_mem += h->size; /* Set up the top chunk, with proper alignment. */ ptr = (char *)(a + 1); misalign = (unsigned long)chunk2mem(ptr) & MALLOC_ALIGN_MASK; if (misalign > 0) ptr += MALLOC_ALIGNMENT - misalign; top(a) = (mchunkptr)ptr; set_head(top(a), (((char*)h + h->size) - ptr) | PREV_INUSE); tsd_setspecific(arena_key, (void *)a); mutex_init(&a->mutex); (void)mutex_lock(&a->mutex); #ifdef PER_THREAD (void)mutex_lock(&list_lock); #endif /* Add the new arena to the global list. */ a->next = main_arena.next; atomic_write_barrier (); main_arena.next = a; #ifdef PER_THREAD (void)mutex_unlock(&list_lock); #endif THREAD_STAT(++(a->stat_lock_loop)); return a; }
arena_thread_freeres (void) { void *vptr = NULL; mstate a = tsd_getspecific (arena_key, vptr); tsd_setspecific (arena_key, NULL); if (a != NULL) { (void) mutex_lock (&list_lock); a->next_free = free_list; free_list = a; (void) mutex_unlock (&list_lock); } }
/* Lock and return an arena that can be reused for memory allocation. Avoid AVOID_ARENA as we have already failed to allocate memory in it and it is currently locked. */ static mstate reused_arena (mstate avoid_arena) { mstate result; static mstate next_to_use; if (next_to_use == NULL) next_to_use = &main_arena; result = next_to_use; do { if (!arena_is_corrupt (result) && !mutex_trylock (&result->mutex)) goto out; result = result->next; } while (result != next_to_use); /* Avoid AVOID_ARENA as we have already failed to allocate memory in that arena and it is currently locked. */ if (result == avoid_arena) result = result->next; /* Make sure that the arena we get is not corrupted. */ mstate begin = result; while (arena_is_corrupt (result) || result == avoid_arena) { result = result->next; if (result == begin) break; } /* We could not find any arena that was either not corrupted or not the one we wanted to avoid. */ if (result == begin || result == avoid_arena) return NULL; /* No arena available without contention. Wait for the next in line. */ LIBC_PROBE (memory_arena_reuse_wait, 3, &result->mutex, result, avoid_arena); (void) mutex_lock (&result->mutex); out: LIBC_PROBE (memory_arena_reuse, 2, result, avoid_arena); tsd_setspecific (arena_key, (void *) result); next_to_use = result->next; return result; }
static void ptmalloc_unlock_all (void) { struct malloc_arena *ar_ptr; if(__malloc_initialized < 1) return; tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; for(ar_ptr = &main_arena;;) { (void)mutex_unlock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } (void)mutex_unlock(&list_lock); }
static void ptmalloc_unlock_all2(void) { struct malloc_arena *ar_ptr; if(__malloc_initialized < 1) return; #if defined _LIBC || 1 /*defined MALLOC_HOOKS*/ tsd_setspecific(arena_key, save_arena); __malloc_hook = save_malloc_hook; __free_hook = save_free_hook; #endif for(ar_ptr = &main_arena;;) { (void)mutex_init(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } (void)mutex_init(&list_lock); }
static mstate get_free_list (void) { mstate result = free_list; if (result != NULL) { (void) mutex_lock (&list_lock); result = free_list; if (result != NULL) free_list = result->next_free; (void) mutex_unlock (&list_lock); if (result != NULL) { /* LIBC_PROBE (memory_arena_reuse_free_list, 1, result); */ (void) mutex_lock (&result->mutex); tsd_setspecific (arena_key, (void *) result); } } return result; }
static mstate get_free_list (void) { mstate result = free_list; if (result != NULL) { (void)mutex_lock(&list_lock); result = free_list; if (result != NULL) free_list = result->next_free; (void)mutex_unlock(&list_lock); if (result != NULL) { (void)mutex_lock(&result->mutex); tsd_setspecific(arena_key, (void *)result); THREAD_STAT(++(result->stat_lock_loop)); } } return result; }
static void ptmalloc_lock_all (void) { struct malloc_arena* ar_ptr; if(__malloc_initialized < 1) return; (void)mutex_lock(&list_lock); for(ar_ptr = &main_arena;;) { (void)mutex_lock(&ar_ptr->mutex); ar_ptr = ar_ptr->next; if(ar_ptr == &main_arena) break; } save_malloc_hook = __malloc_hook; save_free_hook = __free_hook; __malloc_hook = malloc_atfork; __free_hook = free_atfork; /* Only the current thread may perform malloc/free calls now. */ tsd_getspecific(arena_key, save_arena); tsd_setspecific(arena_key, ATFORK_ARENA_PTR); }
static void ptmalloc_init (void) { if (__malloc_initialized >= 0) return; __malloc_initialized = 0; #ifdef SHARED /* In case this libc copy is in a non-default namespace, never use brk. Likewise if dlopened from statically linked program. */ Dl_info di; struct link_map *l; if (_dl_open_hook != NULL || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 && l->l_ns != LM_ID_BASE)) __morecore = __failing_morecore; #endif tsd_key_create (&arena_key, NULL); tsd_setspecific (arena_key, (void *) &main_arena); thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); const char *s = NULL; if (__glibc_likely (_environ != NULL)) { char **runp = _environ; char *envline; while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { size_t len = strcspn (envline, "="); if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue; switch (len) { case 6: if (memcmp (envline, "CHECK_", 6) == 0) s = &envline[7]; break; case 8: if (!__builtin_expect (internal_libc_enable_secure, 0)) { if (memcmp (envline, "TOP_PAD_", 8) == 0) __libc_mallopt (M_TOP_PAD, atoi (&envline[9])); else if (memcmp (envline, "PERTURB_", 8) == 0) __libc_mallopt (M_PERTURB, atoi (&envline[9])); } break; case 9: if (!__builtin_expect (internal_libc_enable_secure, 0)) { if (memcmp (envline, "MMAP_MAX_", 9) == 0) __libc_mallopt (M_MMAP_MAX, atoi (&envline[10])); else if (memcmp (envline, "ARENA_MAX", 9) == 0) __libc_mallopt (M_ARENA_MAX, atoi (&envline[10])); } break; case 10: if (!__builtin_expect (internal_libc_enable_secure, 0)) { if (memcmp (envline, "ARENA_TEST", 10) == 0) __libc_mallopt (M_ARENA_TEST, atoi (&envline[11])); } break; case 15: if (!__builtin_expect (internal_libc_enable_secure, 0)) { if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16])); else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16])); } break; default: break; } } } /* if (s && s[0]) { __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0')); if (check_action != 0) __malloc_check_init (); } void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook); if (hook != NULL) (*hook)(); */ __malloc_initialized = 1; }
static mstate internal_function arena_get2(mstate a_tsd, size_t size, mstate avoid_arena) { mstate a; #ifdef PER_THREAD static size_t narenas_limit; a = get_free_list (); if (a == NULL) { /* Nothing immediately available, so generate a new arena. */ if (narenas_limit == 0) { if (mp_.arena_max != 0) narenas_limit = mp_.arena_max; else if (narenas > mp_.arena_test) { int n = __get_nprocs (); if (n >= 1) narenas_limit = NARENAS_FROM_NCORES (n); else /* We have no information about the system. Assume two cores. */ narenas_limit = NARENAS_FROM_NCORES (2); } } repeat:; size_t n = narenas; /* NB: the following depends on the fact that (size_t)0 - 1 is a very large number and that the underflow is OK. If arena_max is set the value of arena_test is irrelevant. If arena_test is set but narenas is not yet larger or equal to arena_test narenas_limit is 0. There is no possibility for narenas to be too big for the test to always fail since there is not enough address space to create that many arenas. */ if (__builtin_expect (n <= narenas_limit - 1, 0)) { if (catomic_compare_and_exchange_bool_acq (&narenas, n + 1, n)) goto repeat; a = _int_new_arena (size); if (__builtin_expect (a == NULL, 0)) catomic_decrement (&narenas); } else a = reused_arena (avoid_arena); } #else if(!a_tsd) a = a_tsd = &main_arena; else { a = a_tsd->next; if(!a) { /* This can only happen while initializing the new arena. */ (void)mutex_lock(&main_arena.mutex); THREAD_STAT(++(main_arena.stat_lock_wait)); return &main_arena; } } /* Check the global, circularly linked list for available arenas. */ bool retried = false; repeat: do { if(!mutex_trylock(&a->mutex)) { if (retried) (void)mutex_unlock(&list_lock); THREAD_STAT(++(a->stat_lock_loop)); tsd_setspecific(arena_key, (void *)a); return a; } a = a->next; } while(a != a_tsd); /* If not even the list_lock can be obtained, try again. This can happen during `atfork', or for example on systems where thread creation makes it temporarily impossible to obtain _any_ locks. */ if(!retried && mutex_trylock(&list_lock)) { /* We will block to not run in a busy loop. */ (void)mutex_lock(&list_lock); /* Since we blocked there might be an arena available now. */ retried = true; a = a_tsd; goto repeat; } /* Nothing immediately available, so generate a new arena. */ a = _int_new_arena(size); (void)mutex_unlock(&list_lock); #endif return a; }
static #endif void ptmalloc_init(void) { const char* s; int secure = 0; void *mspace; if(__malloc_initialized >= 0) return; __malloc_initialized = 0; /*if (mp_.pagesize == 0) ptmalloc_init_minimal();*/ #ifndef NO_THREADS # if USE_STARTER & 1 /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ save_malloc_hook = __malloc_hook; save_memalign_hook = __memalign_hook; save_free_hook = __free_hook; __malloc_hook = malloc_starter; __memalign_hook = memalign_starter; __free_hook = free_starter; # ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize(); # endif /* !defined _LIBC */ # endif /* USE_STARTER & 1 */ #endif /* !defined NO_THREADS */ mutex_init(&main_arena.mutex); main_arena.next = &main_arena; mspace = create_mspace_with_base((char*)&main_arena + MSPACE_OFFSET, sizeof(main_arena) - MSPACE_OFFSET, 0); assert(mspace == arena_to_mspace(&main_arena)); mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (void *)&main_arena); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); #ifndef NO_THREADS # if USE_STARTER & 1 __malloc_hook = save_malloc_hook; __memalign_hook = save_memalign_hook; __free_hook = save_free_hook; # endif # if USE_STARTER & 2 __malloc_hook = 0; __memalign_hook = 0; __free_hook = 0; # endif #endif #ifdef _LIBC secure = __libc_enable_secure; #else if (! secure) { if ((s = getenv("MALLOC_TRIM_THRESHOLD_"))) public_mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if ((s = getenv("MALLOC_TOP_PAD_")) || (s = getenv("MALLOC_GRANULARITY_"))) public_mALLOPt(M_GRANULARITY, atoi(s)); if ((s = getenv("MALLOC_MMAP_THRESHOLD_"))) public_mALLOPt(M_MMAP_THRESHOLD, atoi(s)); /*if ((s = getenv("MALLOC_MMAP_MAX_"))) this is no longer available public_mALLOPt(M_MMAP_MAX, atoi(s));*/ } s = getenv("MALLOC_CHECK_"); #endif if (s) { /*if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); __malloc_check_init();*/ } if (__malloc_initialize_hook != NULL) (*__malloc_initialize_hook)(); __malloc_initialized = 1; }
static void ptmalloc_init (void) { #if __STD_C const char* s; #else char* s; #endif int secure = 0; if(__malloc_initialized >= 0) return; __malloc_initialized = 0; #ifdef _LIBC # if defined SHARED && defined USE_TLS && !USE___THREAD /* ptmalloc_init_minimal may already have been called via __libc_malloc_pthread_startup, above. */ if (mp_.pagesize == 0) # endif #endif ptmalloc_init_minimal(); #ifndef NO_THREADS # if defined _LIBC && defined USE_TLS /* We know __pthread_initialize_minimal has already been called, and that is enough. */ # define NO_STARTER # endif # ifndef NO_STARTER /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ save_malloc_hook = __malloc_hook; save_memalign_hook = __memalign_hook; save_free_hook = __free_hook; __malloc_hook = malloc_starter; __memalign_hook = memalign_starter; __free_hook = free_starter; # ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize(); # endif /* !defined _LIBC */ # endif /* !defined NO_STARTER */ #endif /* !defined NO_THREADS */ mutex_init(&main_arena.mutex); main_arena.next = &main_arena; #if defined _LIBC && defined SHARED /* In case this libc copy is in a non-default namespace, never use brk. Likewise if dlopened from statically linked program. */ Dl_info di; struct link_map *l; if (_dl_open_hook != NULL || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 && l->l_ns != LM_ID_BASE)) __morecore = __failing_morecore; #endif mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (Void_t *)&main_arena); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); #ifndef NO_THREADS # ifndef NO_STARTER __malloc_hook = save_malloc_hook; __memalign_hook = save_memalign_hook; __free_hook = save_free_hook; # else # undef NO_STARTER # endif #endif #ifdef _LIBC secure = __libc_enable_secure; s = NULL; if (__builtin_expect (_environ != NULL, 1)) { char **runp = _environ; char *envline; while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { size_t len = strcspn (envline, "="); if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue; switch (len) { case 6: if (memcmp (envline, "CHECK_", 6) == 0) s = &envline[7]; break; case 8: if (! secure) { if (memcmp (envline, "TOP_PAD_", 8) == 0) mALLOPt(M_TOP_PAD, atoi(&envline[9])); else if (memcmp (envline, "PERTURB_", 8) == 0) mALLOPt(M_PERTURB, atoi(&envline[9])); } break; case 9: if (! secure && memcmp (envline, "MMAP_MAX_", 9) == 0) mALLOPt(M_MMAP_MAX, atoi(&envline[10])); break; case 15: if (! secure) { if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16])); else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16])); } break; default: break; } } } #else if (! secure) { if((s = getenv("MALLOC_TRIM_THRESHOLD_"))) mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_TOP_PAD_"))) mALLOPt(M_TOP_PAD, atoi(s)); if((s = getenv("MALLOC_PERTURB_"))) mALLOPt(M_PERTURB, atoi(s)); if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) mALLOPt(M_MMAP_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_MMAP_MAX_"))) mALLOPt(M_MMAP_MAX, atoi(s)); } s = getenv("MALLOC_CHECK_"); #endif if(s) { if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); if (check_action != 0) __malloc_check_init(); } if(__malloc_initialize_hook != NULL) (*__malloc_initialize_hook)(); __malloc_initialized = 1; }
static struct malloc_arena* arena_get2(struct malloc_arena* a_tsd, size_t size) { struct malloc_arena* a; int err; if(!a_tsd) a = a_tsd = &main_arena; else { a = a_tsd->next; if(!a) { /* This can only happen while initializing the new arena. */ (void)mutex_lock(&main_arena.mutex); THREAD_STAT(++(main_arena.stat_lock_wait)); return &main_arena; } } /* Check the global, circularly linked list for available arenas. */ repeat: do { if(!mutex_trylock(&a->mutex)) { THREAD_STAT(++(a->stat_lock_loop)); tsd_setspecific(arena_key, (void *)a); return a; } a = a->next; } while(a != a_tsd); /* If not even the list_lock can be obtained, try again. This can happen during `atfork', or for example on systems where thread creation makes it temporarily impossible to obtain _any_ locks. */ if(mutex_trylock(&list_lock)) { a = a_tsd; goto repeat; } (void)mutex_unlock(&list_lock); /* Nothing immediately available, so generate a new arena. */ a = _int_new_arena(size); if(!a) return 0; tsd_setspecific(arena_key, (void *)a); mutex_init(&a->mutex); err = mutex_lock(&a->mutex); /* remember result */ /* Add the new arena to the global list. */ (void)mutex_lock(&list_lock); a->next = main_arena.next; atomic_write_barrier (); main_arena.next = a; (void)mutex_unlock(&list_lock); if(err) /* locking failed; keep arena for further attempts later */ return 0; THREAD_STAT(++(a->stat_lock_loop)); return a; }