static void extmem_init(void) { if (extmem_mspace == NULL) { size_t extmem_vmalloc_limit = (VMALLOC_TOTAL/3)& ~(0x02000000-1); if (extmem_mspace_size > 0x02000000) { extmem_mspace_size -= 0x02000000; } else { extmem_get_lca_reserved_mem(); if (extmem_mspace_size > 0x02000000) { extmem_mspace_size -= 0x02000000; } } if (extmem_mspace_size > extmem_vmalloc_limit) { printk(KERN_WARNING "[EXT_MEM] extmem_mspace_size: 0x%zx over limit: 0x%zx\n", extmem_mspace_size, extmem_vmalloc_limit); extmem_mspace_size = extmem_vmalloc_limit; } if (extmem_mspace_size <= 0) { printk(KERN_ERR "[EXT_MEM] no extmem, need check config\n"); BUG(); } #ifdef CONFIG_ARM64 extmem_mspace_base = (void*) ioremap_wc(extmem_phys_base, extmem_mspace_size); #else extmem_mspace_base = (void*) ioremap_cached(extmem_phys_base, extmem_mspace_size); #endif extmem_mspace = create_mspace_with_base(extmem_mspace_base, extmem_mspace_size, 1); extmem_printk("[EXT_MEM] extmem_phys_base: %p, extmem_mspace_size: 0x%zx, extmem_mspace: %p\n", (void *)extmem_phys_base, extmem_mspace_size, extmem_mspace); } }
struct qxl_mem * qxl_mem_create (void *base, unsigned long n_bytes) { struct qxl_mem *mem; mem = calloc (sizeof (*mem), 1); if (!mem) goto out; ErrorF ("memory space from %p to %p\n", base, (char *)base + n_bytes); mem->space = create_mspace_with_base (base, n_bytes, 0, NULL); mem->base = base; mem->n_bytes = n_bytes; #ifdef DEBUG_QXL_MEM { size_t used; mspace_malloc_stats_return(mem->space, NULL, NULL, &used); mem->used_initial = used; mem->unverifiable = 0; mem->missing = 0; } #endif out: return mem; }
int palHeapAllocator::Create(void* mem, uint64_t size) { internal_ = create_mspace_with_base(mem, (size_t)size); if (internal_ == NULL) { return PAL_HEAP_ALLOCATOR_COULD_NOT_CREATE; } return 0; }
static void extmem_init(void) { if (extmem_mspace == NULL) { if (extmem_mspace_size == 0) { size_t extmem_vmalloc_limit = (VMALLOC_TOTAL/3)& ~(0x02000000-1); if (get_max_DRAM_size() < (CONFIG_MAX_DRAM_SIZE_SUPPORT + 0x02000000)) { printk(KERN_ERR "[EXT_MEM] no extmem, get_max_DRAM_size:%p, CONFIG_MAX_DRAM_SIZE_SUPPORT:0x%x, get_max_phys_addr:%p\n", (void *)get_max_DRAM_size(), CONFIG_MAX_DRAM_SIZE_SUPPORT, (void *)get_max_phys_addr()); BUG(); } extmem_mspace_size = get_max_DRAM_size() - CONFIG_MAX_DRAM_SIZE_SUPPORT - 0x02000000; if (extmem_mspace_size > extmem_vmalloc_limit) { printk(KERN_WARNING "[EXT_MEM] extmem_mspace_size: 0x%zx over limit: 0x%zx\n", extmem_mspace_size, extmem_vmalloc_limit); extmem_mspace_size = extmem_vmalloc_limit; } } //extmem_mspace_base = (void*) ioremap(get_max_phys_addr(), extmem_mspace_size); extmem_mspace_base = (void*) ioremap_cached(get_max_phys_addr(), extmem_mspace_size); extmem_mspace = create_mspace_with_base(extmem_mspace_base, extmem_mspace_size, 1); extmem_printk("[EXT_MEM] get_max_DRAM_size:0x%x, CONFIG_MAX_DRAM_SIZE_SUPPORT:0x%x, get_max_phys_addr:%p, extmem_mspace:%p\n", get_max_DRAM_size(), CONFIG_MAX_DRAM_SIZE_SUPPORT, (void *)get_max_phys_addr(), extmem_mspace); } }
static struct malloc_arena* _int_new_arena(size_t size) { struct malloc_arena* a; size_t mmap_sz = sizeof(*a) + pad_request(size); void *m; if (mmap_sz < ARENA_SIZE_MIN) mmap_sz = ARENA_SIZE_MIN; /* conservative estimate for page size */ mmap_sz = (mmap_sz + 8191) & ~(size_t)8191; a = CALL_MMAP(mmap_sz); if ((char*)a == (char*)-1) return 0; m = create_mspace_with_base((char*)a + MSPACE_OFFSET, mmap_sz - MSPACE_OFFSET, 0); if (!m) { CALL_MUNMAP(a, mmap_sz); a = 0; } else { /*a->next = NULL;*/ /*a->system_mem = a->max_system_mem = h->size;*/ } return a; }
void ctr_rend_texture_init() { void *mem = linearMemAlign(MEM_SPACE_SIZE, 0x80); tex_msp = create_mspace_with_base(mem, MEM_SPACE_SIZE, 0); mspace_track_large_chunks(tex_msp, 1); tex_msp_base = mem; }
/*! \brief Main function of the example memory manager. * * This example shows how memory can be allocated from different * memory spaces. * The default allocation will get memory from the internal SRAM. * By using the "memory space" functionality of the memory manager * it is possible to use other memory spaces as resources like an * attached SDRAM. */ int main(void) { void *some_space; void *some_more_space; void *some_space_in_sdram; mspace sdram_msp; // Switch to external oscillator 0. pcl_switch_to_osc(PCL_OSC0, FOSC0, OSC0_STARTUP); sdramc_init(FHSB_HZ); // default allocation from internal SRAM some_space = dlmalloc(512); some_more_space = dlmalloc(64); // Create a new memory space the covers the SDRAM sdram_msp = create_mspace_with_base((void*) SDRAM_START_ADDRESS, MEM_SPACE_SIZE, 0); // allocate memory from the created memroy space some_space_in_sdram = mspace_malloc(sdram_msp, 512); while (true) { } }
static void extmem_init(void) { if (extmem_mspace == NULL) { if (extmem_mspace_size == 0) extmem_mspace_size = get_actual_DRAM_size() - CONFIG_MAX_DRAM_SIZE_SUPPORT; extmem_mspace_base = (void*) ioremap(get_max_phys_addr(), extmem_mspace_size); extmem_mspace = create_mspace_with_base(extmem_mspace_base, extmem_mspace_size, 1); } }
void chpl_mem_layerInit(void) { chpl_comm_desired_shared_heap(&saved_heap_start, &saved_heap_size); if (!saved_heap_start || !saved_heap_size) { chpl_dlmalloc_heap = create_mspace(0, 1); } else { chpl_dlmalloc_heap = create_mspace_with_base(saved_heap_start, saved_heap_size, 1); } }
void PoolAllocator::Initialize(void* poolMemory, size_t poolMemorySize) { m_poolMemory = poolMemory; m_poolMemorySize = poolMemorySize; SI_ASSERT(m_mspace == nullptr); m_mspace = create_mspace_with_base(poolMemory , poolMemorySize , 1); // thread safeのためlockedにしておく. }
void chpl_mem_layerInit(void) { void* heap_base; size_t heap_size; chpl_comm_desired_shared_heap(&heap_base, &heap_size); if (heap_base == NULL || heap_size == 0) chpl_dlmalloc_heap = create_mspace(0, 1); else chpl_dlmalloc_heap = create_mspace_with_base(heap_base, heap_size, 1); }
/** * Initialize memfd segment master */ int ssvm_master_init_memfd (ssvm_private_t * memfd) { uword page_size; ssvm_shared_header_t *sh; void *oldheap; clib_mem_vm_alloc_t alloc = { 0 }; clib_error_t *err; if (memfd->ssvm_size == 0) return SSVM_API_ERROR_NO_SIZE; ASSERT (vec_c_string_is_terminated (memfd->name)); alloc.name = (char *) memfd->name; alloc.size = memfd->ssvm_size; alloc.flags = CLIB_MEM_VM_F_SHARED; alloc.requested_va = memfd->requested_va; if ((err = clib_mem_vm_ext_alloc (&alloc))) { clib_error_report (err); return SSVM_API_ERROR_CREATE_FAILURE; } memfd->fd = alloc.fd; memfd->sh = (ssvm_shared_header_t *) alloc.addr; memfd->my_pid = getpid (); memfd->i_am_master = 1; page_size = 1ull << alloc.log2_page_size; sh = memfd->sh; sh->master_pid = memfd->my_pid; sh->ssvm_size = memfd->ssvm_size; sh->ssvm_va = pointer_to_uword (sh); sh->type = SSVM_SEGMENT_MEMFD; #if USE_DLMALLOC == 0 uword flags = MHEAP_FLAG_DISABLE_VM | MHEAP_FLAG_THREAD_SAFE; sh->heap = mheap_alloc_with_flags (((u8 *) sh) + page_size, memfd->ssvm_size - page_size, flags); #else sh->heap = create_mspace_with_base (((u8 *) sh) + page_size, memfd->ssvm_size - page_size, 1 /* locked */ ); mspace_disable_expand (sh->heap); #endif oldheap = ssvm_push_heap (sh); sh->name = format (0, "%s", memfd->name, 0); ssvm_pop_heap (oldheap); /* The application has to set set sh->ready... */ return 0; }
static void extmem_init(void) { if (extmem_mspace == NULL) { if (extmem_mspace_size == 0) { // 0x9E000000 is the spare address extmem_mspace_size = get_actual_DRAM_size() - CONFIG_MAX_DRAM_SIZE_SUPPORT - 0x02000000; } //extmem_mspace_base = (void*) ioremap(get_max_phys_addr(), extmem_mspace_size); extmem_mspace_base = (void*) ioremap_cached(get_max_phys_addr(), extmem_mspace_size); extmem_mspace = create_mspace_with_base(extmem_mspace_base, extmem_mspace_size, 1); //printk(KERN_ERR "[LCH_DEBUG]get_actual_DRAM_size:0x%x, CONFIG_MAX_DRAM_SIZE_SUPPORT:0x%x, get_max_phys_addr:0x%x, extmem_mspace:0x%x\n", // get_actual_DRAM_size(), CONFIG_MAX_DRAM_SIZE_SUPPORT, get_max_phys_addr(), extmem_mspace); } }
METHODPREFIX(CLASS, void, init)(ST_ARGS, jobject byteBuffer, jlong size, jboolean synchronize) { void * base=env->GetDirectBufferAddress(byteBuffer); mspace space=create_mspace_with_base(base, size, synchronize?1:0); if(space==NULL) { JNU_ThrowByName(env, EXCCLASS, "Error creating mspace in buffer (buffer size too small?)"); } initObj(env, obj, "ptr", sizeof(JNISTRUCT)); FID=getLongFieldId(env, obj, "ptr"); MYHEADID(JNISTRUCT, FID); str->space=space; str->base=base; str->size=size; }
void qxl_mem_free_all (struct qxl_mem *mem) { #ifdef DEBUG_QXL_MEM size_t maxfp, fp, used; if (mem->space) { mspace_malloc_stats_return(mem->space, &maxfp, &fp, &used); mem->missing = used - mem->used_initial; ErrorF ("untracked %zd bytes (%s)", used - mem->used_initial, mem->unverifiable ? "marked unverifiable" : "oops"); } #endif mem->space = create_mspace_with_base (mem->base, mem->n_bytes, 0, NULL); }
static mspace createMspace(void* begin, size_t morecoreStart, size_t startingSize) { // Clear errno to allow strerror on error. errno = 0; // Allow access to inital pages that will hold mspace. mprotect(begin, morecoreStart, PROT_READ | PROT_WRITE); // Create mspace using our backing storage starting at begin and with a footprint of // morecoreStart. Don't use an internal dlmalloc lock. When morecoreStart bytes of memory are // exhausted morecore will be called. mspace msp = create_mspace_with_base(begin, morecoreStart, false /*locked*/); if (msp != NULL) { // Do not allow morecore requests to succeed beyond the starting size of the heap. mspace_set_footprint_limit(msp, startingSize); } else { ALOGE("create_mspace_with_base failed %s", strerror(errno)); } return msp; }
/* Heap segment size, in bytes. Can't grow for now, so choose something sensible, and within the machine's limits (see sysctl vars kernel.shmmax and kernel.shmall) */ int gm_init(size_t segmentSize) { /* Create a SysV IPC shared memory segment, attach to it, and mark the segment to * auto-destroy when the number of attached processes becomes 0. * * IMPORTANT: There is a small window of vulnerability between shmget and shmctl that * can lead to major issues: between these calls, we have a segment of persistent * memory that will survive the program if it dies (e.g. someone just happens to send us * a SIGKILL) */ assert(GM == NULL); assert(gm_shmid == 0); gm_shmid = shmget(IPC_PRIVATE, segmentSize, 0644 | IPC_CREAT /*| SHM_HUGETLB*/); if (gm_shmid == -1) { perror("gm_create failed shmget"); exit(1); } GM = static_cast<gm_segment*>(shmat(gm_shmid, GM_BASE_ADDR, 0)); if (GM != GM_BASE_ADDR) { perror("gm_create failed shmat"); warn("shmat failed, shmid %d. Trying not to leave garbage behind before dying...", gm_shmid); int ret = shmctl(gm_shmid, IPC_RMID, NULL); if (ret) { perror("shmctl failed, we're leaving garbage behind!"); panic("Check /proc/sysvipc/shm and manually delete segment with shmid %d", gm_shmid); } else { panic("shmctl succeeded, we're dying in peace"); } } //Mark the segment to auto-destroy when the number of attached processes becomes 0. int ret = shmctl(gm_shmid, IPC_RMID, NULL); assert(!ret); char* alloc_start = reinterpret_cast<char*>(GM) + 1024; size_t alloc_size = segmentSize - 1 - 1024; GM->base_regp = NULL; GM->mspace_ptr = create_mspace_with_base(alloc_start, alloc_size, 1 /*locked*/); futex_init(&GM->lock); assert(GM->mspace_ptr); return gm_shmid; }
static mspace getMspace() { if (gExecutableStore == NULL) { int fd = ashmem_create_region("CodeFlinger code cache", kMaxCodeCacheCapacity); LOG_ALWAYS_FATAL_IF(fd < 0, "Creating code cache, ashmem_create_region " "failed with error '%s'", strerror(errno)); gExecutableStore = mmap(NULL, kMaxCodeCacheCapacity, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE, fd, 0); LOG_ALWAYS_FATAL_IF(gExecutableStore == MAP_FAILED, "Creating code cache, mmap failed with error " "'%s'", strerror(errno)); close(fd); gMspace = create_mspace_with_base(gExecutableStore, kMaxCodeCacheCapacity, /*locked=*/ false); mspace_set_footprint_limit(gMspace, kMaxCodeCacheCapacity); } return gMspace; }
int memory_register(void* baseaddr, size_t mem_size, const char* uuid, int do_create) { if (!use_allocator) return 0; int memid; size_t offset = 0; size_t max_quant_params_count = 1; for (memid = 0; memid < memory_table_size; memid++) if (!memory_table[memid].in_use) break; if (memid == memory_table_size) memory_table_grow(EP_MEMORY_TABLE_INC); memory_t* memptr = &memory_table[memid]; memptr->in_use = 1; memptr->do_create = do_create; memptr->shm_base = NULL; memptr->client_shm_base = baseaddr; memptr->shm_mspace = NULL; memptr->shm_id = -1; memptr->mem_size = mem_size; if (baseaddr == NULL) offset = (((sizeof(intptr_t*) * (max_ep + 1)) / PAGE_SIZE) + 1) * PAGE_SIZE; if (do_create) { /* Client */ /* Create a shared memory segment */ snprintf(memptr->shm_filename, SHM_FILENAME_LEN, SHM_FILENAME_PREFIX"%s%d", uuid, memid); if ((memptr->shm_id = shm_open(memptr->shm_filename, O_RDWR|O_CREAT, S_IRWXU|S_IRWXG|S_IROTH)) < 0) { PRINT("CLIENT: shm_open failed (%s)\n", strerror(errno)); memory_release(memid); return -1; } /* Adjust the size of the shared memory segment */ if (ftruncate(memptr->shm_id, mem_size) < 0) { PRINT("CLIENT: ftruncate failed (%s)\n", strerror(errno)); memory_release(memid); return -1; } /* Create mmap region */ if ((memptr->shm_base = (void*)mmap(baseaddr, mem_size, (PROT_WRITE|PROT_READ), (baseaddr) ? (MAP_FIXED|MAP_SHARED) : MAP_SHARED, memptr->shm_id, 0)) == MAP_FAILED) { PRINT("CLIENT: mmap failed (%s)\n", strerror(errno)); memory_release(memid); return -1; } } else { /* Server */ /* Open shared memory region */ snprintf(memptr->shm_filename, SHM_FILENAME_LEN, SHM_FILENAME_PREFIX"%s%d", uuid, memid); if ((memptr->shm_id = shm_open(memptr->shm_filename, O_RDWR, S_IRWXU|S_IRWXG|S_IROTH)) < 0) { DEBUG_PRINT("SERVER: shm_open failed (%s)\n", strerror(errno)); memory_release(memid); return -1; } /* Create mmap region */ if ((memptr->shm_base = (void*)mmap(NULL, mem_size, PROT_WRITE|PROT_READ, MAP_SHARED, memptr->shm_id, 0)) == MAP_FAILED) { DEBUG_PRINT("SERVER: mmap failed (%s)\n", strerror(errno)); memory_release(memid); return -1; } } ASSERT(memptr->shm_base != NULL); if (do_create) { memptr->client_shm_base = memptr->shm_base; if (baseaddr == NULL) { /* Track client shmem address and server cqueue address */ for (int i = 0; i < max_ep + 1 + max_quant_params_count; i++) { void* cqueue_base = (intptr_t*)memptr->shm_base + i; if (i == 0) /* Index 0: client shared memory region */ *(intptr_t*)cqueue_base = (intptr_t)memptr->shm_base; else /* Index 1 to max_ep: server cqueue address */ *(intptr_t*)cqueue_base = (intptr_t)-1; } /* Create dlmalloc mspace only for allocated regions */ intptr_t mspace_start = (intptr_t)memptr->shm_base + (intptr_t)offset; memptr->shm_mspace = create_mspace_with_base((void*)mspace_start, mem_size-offset, 1); if (memptr->shm_mspace == NULL) { memory_release(memid); return -1; } } DEBUG_PRINT("CLIENT: %p %ld %ld %p %ld %p\n", memptr->shm_base, *(intptr_t*)memptr->shm_base, offset, memptr->shm_base + (intptr_t)offset, mem_size, memptr->shm_mspace); } else DEBUG_PRINT("SERVER: %p %ld %ld %p %ld\n", memptr->shm_base, *(intptr_t*)memptr->shm_base, offset, memptr->shm_base + (intptr_t)offset, mem_size); return memid; }
void myhbwmalloc_init(void) { /* set to NULL before trying to initialize. if we return before * successful creation of the mspace, then it will still be NULL, * and we can use that in subsequent library calls to determine * that the library failed to initialize. */ myhbwmalloc_mspace = NULL; /* verbose printout? */ myhbwmalloc_verbose = 0; { char * env_char = getenv("HBWMALLOC_VERBOSE"); if (env_char != NULL) { myhbwmalloc_verbose = 1; printf("hbwmalloc: HBWMALLOC_VERBOSE set\n"); } } /* fail hard or soft? */ myhbwmalloc_hardfail = 1; { char * env_char = getenv("HBWMALLOC_SOFTFAIL"); if (env_char != NULL) { myhbwmalloc_hardfail = 0; printf("hbwmalloc: HBWMALLOC_SOFTFAIL set\n"); } } /* set the atexit handler that will destroy the mspace and free the numa allocation */ atexit(myhbwmalloc_final); /* detect and configure use of NUMA memory nodes */ { int max_possible_node = numa_max_possible_node(); int num_possible_nodes = numa_num_possible_nodes(); int max_numa_nodes = numa_max_node(); int num_configured_nodes = numa_num_configured_nodes(); int num_configured_cpus = numa_num_configured_cpus(); if (myhbwmalloc_verbose) { printf("hbwmalloc: numa_max_possible_node() = %d\n", max_possible_node); printf("hbwmalloc: numa_num_possible_nodes() = %d\n", num_possible_nodes); printf("hbwmalloc: numa_max_node() = %d\n", max_numa_nodes); printf("hbwmalloc: numa_num_configured_nodes() = %d\n", num_configured_nodes); printf("hbwmalloc: numa_num_configured_cpus() = %d\n", num_configured_cpus); } /* FIXME this is a hack. assumes HBW is only numa node 1. */ if (num_configured_nodes <= 2) { myhbwmalloc_numa_node = num_configured_nodes-1; } else { fprintf(stderr,"hbwmalloc: we support only 2 numa nodes, not %d\n", num_configured_nodes); } if (myhbwmalloc_verbose) { for (int i=0; i<num_configured_nodes; i++) { unsigned max_numa_cpus = numa_num_configured_cpus(); struct bitmask * mask = numa_bitmask_alloc( max_numa_cpus ); int rc = numa_node_to_cpus(i, mask); if (rc != 0) { fprintf(stderr, "hbwmalloc: numa_node_to_cpus failed\n"); } else { printf("hbwmalloc: numa node %d cpu mask:", i); for (unsigned j=0; j<max_numa_cpus; j++) { int bit = numa_bitmask_isbitset(mask,j); printf(" %d", bit); } printf("\n"); } numa_bitmask_free(mask); } fflush(stdout); } } #if 0 /* unused */ /* see if the user specifies a slab size */ size_t slab_size_requested = 0; { char * env_char = getenv("HBWMALLOC_BYTES"); if (env_char!=NULL) { long units = 1L; if ( NULL != strstr(env_char,"G") ) units = 1000000000L; else if ( NULL != strstr(env_char,"M") ) units = 1000000L; else if ( NULL != strstr(env_char,"K") ) units = 1000L; else units = 1L; int num_count = strspn(env_char, "0123456789"); memset( &env_char[num_count], ' ', strlen(env_char)-num_count); slab_size_requested = units * atol(env_char); } if (myhbwmalloc_verbose) { printf("hbwmalloc: requested slab_size_requested = %zu\n", slab_size_requested); } } #endif /* see what libnuma says is available */ size_t myhbwmalloc_slab_size; { int node = myhbwmalloc_numa_node; long long freemem; long long maxmem = numa_node_size64(node, &freemem); if (myhbwmalloc_verbose) { printf("hbwmalloc: numa_node_size64 says maxmem=%lld freemem=%lld for numa node %d\n", maxmem, freemem, node); } myhbwmalloc_slab_size = freemem; } /* assume threads, disable if MPI knows otherwise, then allow user to override. */ int multithreaded = 1; #ifdef HAVE_MPI int nprocs; { int is_init, is_final; MPI_Initialized(&is_init); MPI_Finalized(&is_final); if (is_init && !is_final) { MPI_Comm_size(MPI_COMM_WORLD, &nprocs); } /* give equal portion to every MPI process */ myhbwmalloc_slab_size /= nprocs; /* if the user initializes MPI with MPI_Init or * MPI_Init_thread(MPI_THREAD_SINGLE), they assert there * are no threads at all, which means we can skip the * malloc mspace lock. * * if the user lies to MPI, they deserve any bad thing * that comes of it. */ int provided; MPI_Query_thread(&provided); if (provided==MPI_THREAD_SINGLE) { multithreaded = 0; } else { multithreaded = 1; } if (myhbwmalloc_verbose) { printf("hbwmalloc: MPI processes = %d (threaded = %d)\n", nprocs, multithreaded); printf("hbwmalloc: myhbwmalloc_slab_size = %d\n", myhbwmalloc_slab_size); } } #endif /* user can assert that hbwmalloc and friends need not be thread-safe */ { char * env_char = getenv("HBWMALLOC_LOCKLESS"); if (env_char != NULL) { multithreaded = 0; if (myhbwmalloc_verbose) { printf("hbwmalloc: user has disabled locking in mspaces by setting HBWMALLOC_LOCKLESS\n"); } } } myhbwmalloc_slab = numa_alloc_onnode( myhbwmalloc_slab_size, myhbwmalloc_numa_node); if (myhbwmalloc_slab==NULL) { fprintf(stderr, "hbwmalloc: numa_alloc_onnode returned NULL for size = %zu\n", myhbwmalloc_slab_size); return; } else { if (myhbwmalloc_verbose) { printf("hbwmalloc: numa_alloc_onnode succeeded for size %zu\n", myhbwmalloc_slab_size); } /* part (less than 128*sizeof(size_t) bytes) of this space is used for bookkeeping, * so the capacity must be at least this large */ if (myhbwmalloc_slab_size < 128*sizeof(size_t)) { fprintf(stderr, "hbwmalloc: not enough space for mspace bookkeeping\n"); return; } /* see above regarding if the user lies to MPI. */ int locked = multithreaded; myhbwmalloc_mspace = create_mspace_with_base( myhbwmalloc_slab, myhbwmalloc_slab_size, locked); if (myhbwmalloc_mspace == NULL) { fprintf(stderr, "hbwmalloc: create_mspace_with_base returned NULL\n"); return; } else if (myhbwmalloc_verbose) { printf("hbwmalloc: create_mspace_with_base succeeded for size %zu\n", myhbwmalloc_slab_size); } } }
static #endif void ptmalloc_init(void) { const char* s; int secure = 0; void *mspace; if(__malloc_initialized >= 0) return; __malloc_initialized = 0; /*if (mp_.pagesize == 0) ptmalloc_init_minimal();*/ #ifndef NO_THREADS # if USE_STARTER & 1 /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ save_malloc_hook = __malloc_hook; save_memalign_hook = __memalign_hook; save_free_hook = __free_hook; __malloc_hook = malloc_starter; __memalign_hook = memalign_starter; __free_hook = free_starter; # ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize(); # endif /* !defined _LIBC */ # endif /* USE_STARTER & 1 */ #endif /* !defined NO_THREADS */ mutex_init(&main_arena.mutex); main_arena.next = &main_arena; mspace = create_mspace_with_base((char*)&main_arena + MSPACE_OFFSET, sizeof(main_arena) - MSPACE_OFFSET, 0); assert(mspace == arena_to_mspace(&main_arena)); mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (void *)&main_arena); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); #ifndef NO_THREADS # if USE_STARTER & 1 __malloc_hook = save_malloc_hook; __memalign_hook = save_memalign_hook; __free_hook = save_free_hook; # endif # if USE_STARTER & 2 __malloc_hook = 0; __memalign_hook = 0; __free_hook = 0; # endif #endif #ifdef _LIBC secure = __libc_enable_secure; #else if (! secure) { if ((s = getenv("MALLOC_TRIM_THRESHOLD_"))) public_mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if ((s = getenv("MALLOC_TOP_PAD_")) || (s = getenv("MALLOC_GRANULARITY_"))) public_mALLOPt(M_GRANULARITY, atoi(s)); if ((s = getenv("MALLOC_MMAP_THRESHOLD_"))) public_mALLOPt(M_MMAP_THRESHOLD, atoi(s)); /*if ((s = getenv("MALLOC_MMAP_MAX_"))) this is no longer available public_mALLOPt(M_MMAP_MAX, atoi(s));*/ } s = getenv("MALLOC_CHECK_"); #endif if (s) { /*if(s[0]) mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); __malloc_check_init();*/ } if (__malloc_initialize_hook != NULL) (*__malloc_initialize_hook)(); __malloc_initialized = 1; }
int main(int argc, char *argv[]) { size_t size1 = 255; void *addr1 = NULL; size_t size2 = 256; void *addr2 = NULL; size_t size3 = kMaxCodeCacheCapacity - 512; void *addr3 = NULL; gExecutableStore = malloc(kMaxCodeCacheCapacity); if (gExecutableStore == NULL) { printf("error malloc\n"); return -1; } gMspace = create_mspace_with_base(gExecutableStore, kMaxCodeCacheCapacity, /*locked=*/ true); printf("-> create mspace\n"); mspace_malloc_stats(gMspace); addr1 = mspace_malloc(gMspace, size1); if (addr1 != NULL) { printf("-> malloc addr1 = %p, 0x%x\n", addr1, (uint32_t)size1); mspace_malloc_stats(gMspace); printf("addr1 size = 0x%x\n", (uint32_t)mspace_usable_size(addr1)); } addr2 = mspace_malloc(gMspace, size2); if (addr2 != NULL) { printf("-> malloc addr2 = %p, 0x%x\n", addr2, (uint32_t)size2); mspace_malloc_stats(gMspace); printf("addr2 size = 0x%x\n", (uint32_t)mspace_usable_size(addr2)); } addr3 = mspace_malloc(gMspace, size3); if (addr3 != NULL) { printf("-> malloc addr3 = %p, 0x%x\n", addr3, (uint32_t)size3); mspace_malloc_stats(gMspace); printf("addr3 size = 0x%x\n", (uint32_t)mspace_usable_size(addr3)); } else { printf("malloc addr3 error!\n"); } if (addr1 != NULL) { mspace_free(gMspace, addr1); } if (addr2 != NULL) { mspace_free(gMspace, addr2); } if (addr3 != NULL) { mspace_free(gMspace, addr3); } printf("-> all free\n"); mspace_malloc_stats(gMspace); _exit: if (gMspace != NULL) { destroy_mspace(gMspace); gMspace = NULL; } if (gExecutableStore != NULL) { free(gExecutableStore); gExecutableStore = NULL; } return 0; }
static void __attribute__((noinline)) __sm_init(void) { char* tmp; size_t total_size; size_t rank_divider; size_t pagesize = (size_t)getpagesize(); int do_init; //Whether to do initialization //Set up a temporary area on the stack for malloc() calls during our // initialization process. uint64_t* temp_space = alloca(TEMP_SIZE); sm_mspace = create_mspace_with_base(temp_space, TEMP_SIZE, 0); //Keep this for use with valgrind. //sm_mspace = create_mspace_with_base(sm_temp, TEMP_SIZE, 0); //sm_region->limit = (intptr_t)sm_region + TEMP_SIZE; //Query environment variables to figure out how much size is available. //The value of SM_SIZE is always expected to be megabytes. tmp = getenv("SM_SIZE"); if(tmp == NULL) { //On BGQ, the size var MUST be set. //If it is not, there probably is only enough shared memory for the // system reservation. Can't assume there's usable SM, so abort. #ifdef __bg__ ERROR("SM_SIZE env var not set (make sure BG_SHAREDMEMSIZE is set too"); #else total_size = DEFAULT_TOTAL_SIZE; #endif } else { total_size = atol(tmp) * 1024L * 1024L; } //SM_RANKS and DEFAULT_RANK_DIVIDER indicate how many regions to break the //SM region into -- one region per rank/process. tmp = getenv("SM_RANKS"); //if (tmp == NULL){ #ifdef __MIC__ tmp = getenv("MIC_PPN"); #endif //} if(tmp == NULL) { rank_divider = DEFAULT_RANK_DIVIDER; } else { rank_divider = atol(tmp); } //offset is the size taken by sm_region at the beginning of the space. size_t offset = ((sizeof(struct sm_region) / pagesize) + 1) * pagesize; #ifdef USE_PROC_MAPS void* map_addr = find_map_address(total_size + offset); #else void* map_addr = NULL; #endif printf("map addr : %lu \n", map_addr); //Set up the SM region using one of mmap/sysv/pshm do_init = __sm_init_region(map_addr, total_size + offset); //Only the process creating the file should initialize. if(do_init) { //Only the initializing process registers the shutdown handler. atexit(__sm_destroy); sm_region->limit = (intptr_t)sm_region + total_size + offset; #ifdef __bg__ //Ensure everything above is set before brk below: // setting brk is the synchronization signal. __lwsync(); #endif sm_region->brk = (intptr_t)sm_region + offset; } else { //Wait for another process to finish initialization. void* volatile * brk_ptr = (void**)&sm_region->brk; while(*brk_ptr == NULL); //Ensure none of the following loads occur during/before the spin loop. #ifdef __bg__ __lwsync(); #endif } //Create my own mspace. size_t local_size = total_size / rank_divider; printf("params: map_addr : %lu sm_region: %lu brk : %lu limit : %lu total_size : %ld offset : %lu ranks : %ld local_size : %ld \n", map_addr, sm_region, sm_region->brk ,sm_region->limit, total_size, offset, rank_divider, local_size); //Check that this process' region is mapped to the same address as the //process that initialized the region. if(sm_region->limit != (intptr_t)sm_region + total_size + offset) { printf("ERROR !!! ==> params: map_addr : %lu sm_region: %lu brk : %lu limit : %lu total_size : %ld offset : %lu ranks : %ld local_size : %ld \n", map_addr, sm_region, sm_region->brk ,sm_region->limit, total_size, offset, rank_divider, local_size); ERROR("sm_region limit %lx doesn't match computed limit %lx", sm_region->limit, (intptr_t)sm_region + total_size + offset); } sm_lower = sm_region; sm_upper = (void*)sm_region->limit; //void* base = sm_morecore(local_size); void* base = (void*)__sync_fetch_and_add(&sm_region->brk, local_size); if(base < sm_lower || base >= sm_upper) { printf("ERROR !!! ==> params:sm_lower : %lu sm_upper : %lu map_addr : %lu sm_region: %lu brk : %lu limit : %lu total_size : %ld offset : %lu ranks : %ld local_size : %ld \n", sm_lower, sm_upper ,map_addr, sm_region, sm_region->brk ,sm_region->limit, total_size, offset, rank_divider, local_size); ERROR("Got local base %p outside of range %p -> %p",base, sm_lower, sm_upper); } //Clearing the memory seems to avoid some bugs and // forces out subtle OOM issues here instead of later. //memset(base, 0, local_size); //WARNING("%d sm_region %p base %p total_size %lx local_size %lx\n", // getpid(), sm_region, base, total_size, local_size); //Careful to subtract off space for the local data. sm_mspace = create_mspace_with_base(base, local_size, 1); }
int ssvm_master_init_shm (ssvm_private_t * ssvm) { int ssvm_fd; #if USE_DLMALLOC == 0 int mh_flags = MHEAP_FLAG_DISABLE_VM | MHEAP_FLAG_THREAD_SAFE; #endif clib_mem_vm_map_t mapa = { 0 }; u8 junk = 0, *ssvm_filename; ssvm_shared_header_t *sh; uword page_size, requested_va = 0; void *oldheap; if (ssvm->ssvm_size == 0) return SSVM_API_ERROR_NO_SIZE; if (CLIB_DEBUG > 1) clib_warning ("[%d] creating segment '%s'", getpid (), ssvm->name); ASSERT (vec_c_string_is_terminated (ssvm->name)); ssvm_filename = format (0, "/dev/shm/%s%c", ssvm->name, 0); unlink ((char *) ssvm_filename); vec_free (ssvm_filename); ssvm_fd = shm_open ((char *) ssvm->name, O_RDWR | O_CREAT | O_EXCL, 0777); if (ssvm_fd < 0) { clib_unix_warning ("create segment '%s'", ssvm->name); return SSVM_API_ERROR_CREATE_FAILURE; } if (fchmod (ssvm_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP) < 0) clib_unix_warning ("ssvm segment chmod"); if (svm_get_root_rp ()) { /* TODO: is this really needed? */ svm_main_region_t *smr = svm_get_root_rp ()->data_base; if (fchown (ssvm_fd, smr->uid, smr->gid) < 0) clib_unix_warning ("ssvm segment chown"); } if (lseek (ssvm_fd, ssvm->ssvm_size, SEEK_SET) < 0) { clib_unix_warning ("lseek"); close (ssvm_fd); return SSVM_API_ERROR_SET_SIZE; } if (write (ssvm_fd, &junk, 1) != 1) { clib_unix_warning ("set ssvm size"); close (ssvm_fd); return SSVM_API_ERROR_SET_SIZE; } page_size = clib_mem_get_fd_page_size (ssvm_fd); if (ssvm->requested_va) { requested_va = ssvm->requested_va; clib_mem_vm_randomize_va (&requested_va, min_log2 (page_size)); } mapa.requested_va = requested_va; mapa.size = ssvm->ssvm_size; mapa.fd = ssvm_fd; if (clib_mem_vm_ext_map (&mapa)) { clib_unix_warning ("mmap"); close (ssvm_fd); return SSVM_API_ERROR_MMAP; } close (ssvm_fd); sh = mapa.addr; sh->master_pid = ssvm->my_pid; sh->ssvm_size = ssvm->ssvm_size; sh->ssvm_va = pointer_to_uword (sh); sh->type = SSVM_SEGMENT_SHM; #if USE_DLMALLOC == 0 sh->heap = mheap_alloc_with_flags (((u8 *) sh) + page_size, ssvm->ssvm_size - page_size, mh_flags); #else sh->heap = create_mspace_with_base (((u8 *) sh) + page_size, ssvm->ssvm_size - page_size, 1 /* locked */ ); mspace_disable_expand (sh->heap); #endif oldheap = ssvm_push_heap (sh); sh->name = format (0, "%s", ssvm->name, 0); ssvm_pop_heap (oldheap); ssvm->sh = sh; ssvm->my_pid = getpid (); ssvm->i_am_master = 1; /* The application has to set set sh->ready... */ return 0; }
/** * initialize the memory pool */ void shmemi_mem_init (void *base, size_t capacity) { myspace = create_mspace_with_base (base, capacity, 1); }
mspace create_contiguous_mspace_with_base(size_t starting_capacity, size_t max_capacity, int locked, void *base) { struct mspace_contig_state *cs; unsigned int pagesize; mstate m; init_mparams(); pagesize = PAGESIZE; assert(starting_capacity <= max_capacity); assert(((uintptr_t)base & (pagesize-1)) == 0); assert(((uintptr_t)max_capacity & (pagesize-1)) == 0); starting_capacity = (size_t)ALIGN_UP(starting_capacity, pagesize); /* Make the first page read/write. dlmalloc needs to use that page. */ if (mprotect(base, starting_capacity, PROT_READ | PROT_WRITE) < 0) { goto error; } /* Create the mspace, pointing to the memory given. */ m = create_mspace_with_base((char *)base + sizeof(*cs), starting_capacity, locked); if (m == (mspace)0) { goto error; } /* Make sure that m is in the same page as base. */ assert(((uintptr_t)m & (uintptr_t)~(pagesize-1)) == (uintptr_t)base); /* Use some space for the information that our MORECORE needs. */ cs = (struct mspace_contig_state *)base; /* Find out exactly how much of the memory the mspace * is using. */ cs->brk = m->seg.base + m->seg.size; cs->top = (char *)base + max_capacity; assert((char *)base <= cs->brk); assert(cs->brk <= cs->top); /* Prevent access to the memory we haven't handed out yet. */ if (cs->brk != cs->top) { /* mprotect() requires page-aligned arguments, but it's possible * for cs->brk not to be page-aligned at this point. */ char *prot_brk = (char *)ALIGN_UP(cs->brk, pagesize); if ((mprotect(base, prot_brk - (char *)base, PROT_READ | PROT_WRITE) < 0) || (mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)) { goto error; } } cs->m = m; cs->magic = CONTIG_STATE_MAGIC; return (mspace)m; error: return (mspace)0; }