static void _allocate_memory(void) { void *stack_mem, *temp_malloc_mem; if ((stack_mem = alloca(_size_stack))) _touch_memory(stack_mem, _size_stack); if ((temp_malloc_mem = malloc(_size_malloc_tmp))) _touch_memory(temp_malloc_mem, _size_malloc_tmp); if ((_malloc_mem = malloc(_size_malloc))) _touch_memory(_malloc_mem, _size_malloc); free(temp_malloc_mem); }
static void _allocate_memory(void) { void *stack_mem, *temp_malloc_mem; struct rlimit limit; /* Check if we could preallocate requested stack */ if ((getrlimit (RLIMIT_STACK, &limit) == 0) && ((_size_stack * 2) < limit.rlim_cur) && ((stack_mem = alloca(_size_stack)))) _touch_memory(stack_mem, _size_stack); /* FIXME else warn user setting got ignored */ if ((temp_malloc_mem = malloc(_size_malloc_tmp))) _touch_memory(temp_malloc_mem, _size_malloc_tmp); if ((_malloc_mem = malloc(_size_malloc))) _touch_memory(_malloc_mem, _size_malloc); free(temp_malloc_mem); }
static void gotoblas_memory_init(void) { void *buffer; hot_alloc = 1; buffer = (void *)blas_memory_alloc(0); #ifdef SMP if (blas_cpu_number == 0) blas_get_cpu_number(); #ifdef SMP_SERVER if (blas_server_avail == 0) blas_thread_init(); #endif _init_thread_memory((void *)((BLASULONG)buffer + GEMM_OFFSET_A)); #else _touch_memory(NULL, NULL, NULL, (void *)((BLASULONG)buffer + GEMM_OFFSET_A), NULL, 0); #endif blas_memory_free(buffer); }
static void _allocate_memory(void) { #ifndef VALGRIND_POOL void *stack_mem; struct rlimit limit; int i, area = 0, missing = _size_malloc_tmp, max_areas = 32, hblks; char *areas[max_areas]; /* Check if we could preallocate requested stack */ if ((getrlimit (RLIMIT_STACK, &limit) == 0) && ((_size_stack * 2) < limit.rlim_cur) && ((stack_mem = alloca(_size_stack)))) _touch_memory(stack_mem, _size_stack); /* FIXME else warn user setting got ignored */ /* * When a brk() fails due to fragmented address space (which sometimes * happens when we try to grab 8M or so), glibc will make a new * arena. In this arena, the rules for using “direct” mmap are relaxed, * circumventing the MAX_MMAPs and MMAP_THRESHOLD settings. We can, * however, detect when this happens with mallinfo() and try to co-opt * malloc into using MMAP as a MORECORE substitute instead of returning * MMAP'd memory directly. Since MMAP-as-MORECORE does not munmap the * memory on free(), this is good enough for our purposes. */ while (missing > 0) { struct mallinfo inf = mallinfo(); hblks = inf.hblks; if ((areas[area] = malloc(_size_malloc_tmp))) _touch_memory(areas[area], _size_malloc_tmp); inf = mallinfo(); if (hblks < inf.hblks) { /* malloc cheated and used mmap, even though we told it not to; we try with twice as many areas, each half the size, to circumvent the faulty logic in glibc */ free(areas[area]); _size_malloc_tmp /= 2; } else { ++ area; missing -= _size_malloc_tmp; } if (area == max_areas && missing > 0) { /* Too bad. Warn the user and proceed, as things are * most likely going to work out anyway. */ log_warn("WARNING: Failed to reserve memory, %d bytes missing.", missing); break; } } if ((_malloc_mem = malloc(_size_malloc))) _touch_memory(_malloc_mem, _size_malloc); /* free up the reserves so subsequent malloc's can use that memory */ for (i = 0; i < area; ++i) free(areas[i]); #endif }