int palHeapAllocator::Create(palPageAllocator* page_allocator) { internal_ = create_mspace(0, page_allocator); if (internal_ == NULL) { return PAL_HEAP_ALLOCATOR_COULD_NOT_CREATE; } mspace_track_large_chunks(internal_, 1); return 0; }
void chpl_mem_layerInit(void) { chpl_comm_desired_shared_heap(&saved_heap_start, &saved_heap_size); if (!saved_heap_start || !saved_heap_size) { chpl_dlmalloc_heap = create_mspace(0, 1); } else { chpl_dlmalloc_heap = create_mspace_with_base(saved_heap_start, saved_heap_size, 1); } }
void mempool_init() { if (!mp_mspace__) { mp_mspace__ = create_mspace(0, 0); KeInitializeSpinLock(&mp_spinlock__); } }
static mm_mspace_t mm_mspace_create(void) { mm_mspace_t space; space.opaque = create_mspace(0, 0); if (space.opaque == NULL) mm_fatal(errno, "failed to create mspace"); return space; }
void chpl_mem_layerInit(void) { void* heap_base; size_t heap_size; chpl_comm_desired_shared_heap(&heap_base, &heap_size); if (heap_base == NULL || heap_size == 0) chpl_dlmalloc_heap = create_mspace(0, 1); else chpl_dlmalloc_heap = create_mspace_with_base(heap_base, heap_size, 1); }
static mspace hb_mspace( void ) { PHB_MSPACE pm = ( PHB_MSPACE ) hb_stackAllocator(); if( pm ) return pm->ms; if( ! s_gm ) s_gm = create_mspace( 0, 1 ); return s_gm; }
void new_default_arena_with_capacity(memory_arena *arena, ulen starting_capacity) { mspace space = create_mspace(starting_capacity, 0); // TODO: Should locked be 1 instead? That would help with threadsafety. if (space == NULL) { panic("OOM while building new dlmalloc mspace"); } mspace_track_large_chunks(space, 1); // make sure that destroying an arena will always free all its memory new_custom_arena(arena, space); arena->alloc = mspace_malloc; arena->alloc_zeroed = dlmalloc_mspace_alloc_zeroed; arena->free = mspace_free; arena->realloc = mspace_realloc; arena->deallocate = dlmalloc_mspace_deallocate; }
void HeapInit(MemAllocHeap* heap, size_t capacity, uint32_t flags) { #if ENABLED(USE_DLMALLOC) heap->m_MemSpace = create_mspace(capacity, 0); if (!heap->m_MemSpace) Croak("couldn't create memspace for new heap"); #else heap->m_MemSpace = nullptr; #endif heap->m_Flags = flags; if (flags & HeapFlags::kThreadSafe) { MutexInit(&heap->m_Lock); } }
/** * Initialize segment in a private heap */ int ssvm_master_init_private (ssvm_private_t * ssvm) { ssvm_shared_header_t *sh; u32 pagesize = clib_mem_get_page_size (); u32 rnd_size = 0; u8 *heap; rnd_size = clib_max (ssvm->ssvm_size + (pagesize - 1), ssvm->ssvm_size); rnd_size &= ~(pagesize - 1); #if USE_DLMALLOC == 0 { mheap_t *heap_header; heap = mheap_alloc (0, rnd_size); if (heap == 0) { clib_unix_warning ("mheap alloc"); return -1; } heap_header = mheap_header (heap); heap_header->flags |= MHEAP_FLAG_THREAD_SAFE; } #else heap = create_mspace (rnd_size, 1 /* locked */ ); #endif ssvm->ssvm_size = rnd_size; ssvm->i_am_master = 1; ssvm->my_pid = getpid (); ssvm->requested_va = ~0; /* Allocate a [sic] shared memory header, in process memory... */ sh = clib_mem_alloc_aligned (sizeof (*sh), CLIB_CACHE_LINE_BYTES); ssvm->sh = sh; clib_memset (sh, 0, sizeof (*sh)); sh->heap = heap; sh->ssvm_va = pointer_to_uword (heap); sh->type = SSVM_SEGMENT_PRIVATE; return 0; }
static PHB_MSPACE hb_mspace_alloc( void ) { if( s_mspool[ 0 ].ms == NULL && s_gm ) { s_mspool[ 0 ].count = 1; s_mspool[ 0 ].ms = s_gm; return &s_mspool[ 0 ]; } else { int i, imin = 0; for( i = 1; i < HB_MSPACE_COUNT; ++i ) { if( s_mspool[ i ].count < s_mspool[ imin ].count ) imin = i; } if( s_mspool[ imin ].ms == NULL ) s_mspool[ imin ].ms = create_mspace( 0, 1 ); s_mspool[ imin ].count++; return &s_mspool[ imin ]; } }
/* Abstract. */ malloc_zone_t * malloc_create_zone(vm_size_t start_size, unsigned flags) { OSMemoryZone* osm = _OSGetFirstAvailableMemoryZone(); /* mkay, get default impls */ osm->basic_zone.calloc = (void*)Impl_malloc_zone_calloc; osm->basic_zone.free = (void*)Impl_malloc_zone_free; osm->basic_zone.malloc = (void*)Impl_malloc_zone_malloc; osm->basic_zone.valloc = (void*)Impl_malloc_zone_valloc; osm->basic_zone.memalign = (void*)Impl_malloc_zone_memalign; osm->basic_zone.realloc = (void*)Impl_malloc_zone_realloc; if (flags != 0xfee1dead) { /* make a mspace */ osm->memory_space = create_mspace(start_size, FALSE); } OSLog("malloc_create_zone: created zone {size=%d, space=%p, addr=%p}", start_size, osm->memory_space, osm); return (malloc_zone_t*)osm; }
void inline initTheSpaces(){ for(int i = 0; i < N_MSPACES; i++) mspaces[i] = create_mspace(0, 1); }
OOBase::ArenaAllocator::ArenaAllocator(bool locked) : m_mspace(NULL) { m_mspace = create_mspace(0,locked ? 1 : 0); if (!m_mspace) OOBase_CallCriticalFailure("Failed to create dl_malloc mspace"); }
void DCU_initialize() { if (DCU_STATE(DCU_INITIALIZED)) { return; } if (DCU_STATE(DCU_MUTEX_INITED)) { DCU_MutexScopedLock lock(DCU_mutex); if (DCU_STATE(DCU_INITIALIZED)) { return; } else { fprintf(DCU_FALLBACK_STREAM, "DynamicCheckUp Concurrency error.\n"); _exit(1); } } if (pthread_mutex_init(&DCU_mutex, 0) < 0) { fprintf(DCU_FALLBACK_STREAM, "DynamicCheckUp unable to initialize mutex\n"); _exit(1); } else { DCU_SET_FLAG(DCU_MUTEX_INITED); } { DCU_MutexScopedLock lock(DCU_mutex); memory_space = create_mspace(0, 0); DCU_SET_FLAG(DCU_INITIALIZED); // // init backtrace so it wont recursively call malloc // DCU_Pointer stack[DCU_STACK_TRACE_SIZE]; backtrace(stack, DCU_STACK_TRACE_SIZE); // // Init Tracing data // DCU_stream = DCU_FALLBACK_STREAM; memset(DCU_memory_stats, 0, sizeof(DCU_MemoryStats) * DCU_DYNAMIC_OPERATION_TYPES); memset(&DCU_memory_stats_new, 0, sizeof(DCU_MemoryStats)); memset(&DCU_memory_stats_new_array, 0, sizeof(DCU_MemoryStats)); memset(&DCU_memory_stats_c, 0, sizeof(DCU_MemoryStats)); memset(DCU_null_stack, 0, sizeof(DCU_null_stack)); // // Operations HashTable // DCU_memory = (DCU_OperationInfo**) DCU_malloc( DCU_HASH_TABLE_SIZE * sizeof(DCU_OperationInfo*) ); memset(DCU_memory, 0, DCU_HASH_TABLE_SIZE * sizeof(DCU_OperationInfo*)); // // Problems Linked-List // DCU_problems = 0; // // Open Log File // DCU_stream = fopen(DCU_OUTPUT_FILE, "w"); if (DCU_stream < 0) { fprintf(DCU_FALLBACK_STREAM, "DynamicCheckUp: Unable to open %s: %m\n", DCU_OUTPUT_FILE); DCU_stream = DCU_FALLBACK_STREAM; } else { int flags = fcntl(fileno(DCU_stream), F_GETFD, 0); if (flags >= 0) { flags |= FD_CLOEXEC; fcntl(fileno(DCU_stream), F_SETFD, flags); } setvbuf(DCU_stream, stream_trace_buffer, _IOFBF, DCU_STREAM_BUFFER_SIZE); } DCU_SET_FLAG(DCU_TRACING); } DCU_write("DynamicCheckUp Started\n"); }