__private_extern__ void __CFAllocatorInitialize(void) { __kCFAllocatorTypeID = _CFRuntimeRegisterClass(&__CFAllocatorClass); _CFRuntimeSetInstanceTypeID(&__kCFAllocatorSystemDefault, __kCFAllocatorTypeID); __kCFAllocatorSystemDefault._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); #if DEPLOYMENT_TARGET_MACOSX __kCFAllocatorSystemDefault._context.info = (CF_USING_COLLECTABLE_MEMORY ? __CFCollectableZone : malloc_default_zone()); memset(malloc_default_zone(), 0, 2 * sizeof(void *)); #endif __kCFAllocatorSystemDefault._allocator = kCFAllocatorSystemDefault; #ifdef DEPLOYMENT_TARGET_WINDOWS __kCFAllocatorSystemDefault._context.allocate = __CFAllocatorSystemAllocate; __kCFAllocatorSystemDefault._context.reallocate = __CFAllocatorSystemReallocate; __kCFAllocatorSystemDefault._context.deallocate = __CFAllocatorSystemDeallocate; #endif // DEPLOYMENT_TARGET_WINDOWS _CFRuntimeSetInstanceTypeID(&__kCFAllocatorMalloc, __kCFAllocatorTypeID); __kCFAllocatorMalloc._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorMalloc._allocator = kCFAllocatorSystemDefault; #if DEPLOYMENT_TARGET_MACOSX _CFRuntimeSetInstanceTypeID(&__kCFAllocatorMallocZone, __kCFAllocatorTypeID); __kCFAllocatorMallocZone._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorMallocZone._allocator = kCFAllocatorSystemDefault; __kCFAllocatorMallocZone._context.info = malloc_default_zone(); #endif //__MACH__ _CFRuntimeSetInstanceTypeID(&__kCFAllocatorNull, __kCFAllocatorTypeID); __kCFAllocatorNull._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorNull._allocator = kCFAllocatorSystemDefault; }
/* ** Initialize this module. */ static int sqlite3MemInit(void *NotUsed){ #if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) int cpuCount; size_t len; if( _sqliteZone_ ){ return SQLITE_OK; } len = sizeof(cpuCount); /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); if( cpuCount>1 ){ /* defer MT decisions to system malloc */ _sqliteZone_ = malloc_default_zone(); }else{ /* only 1 core, use our own zone to contention over global locks, ** e.g. we have our own dedicated locks */ bool success; malloc_zone_t* newzone = malloc_create_zone(4096, 0); malloc_set_zone_name(newzone, "Sqlite_Heap"); do{ success = OSAtomicCompareAndSwapPtrBarrier(NULL, newzone, (void * volatile *)&_sqliteZone_); }while(!_sqliteZone_); if( !success ){ /* somebody registered a zone first */ malloc_destroy_zone(newzone); } } #endif UNUSED_PARAMETER(NotUsed); return SQLITE_OK; }
static malloc_zone_t * zone_default_get(void) { malloc_zone_t **zones = NULL; unsigned int num_zones = 0; /* * On OSX 10.12, malloc_default_zone returns a special zone that is not * present in the list of registered zones. That zone uses a "lite zone" * if one is present (apparently enabled when malloc stack logging is * enabled), or the first registered zone otherwise. In practice this * means unless malloc stack logging is enabled, the first registered * zone is the default. So get the list of zones to get the first one, * instead of relying on malloc_default_zone. */ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL, (vm_address_t**)&zones, &num_zones)) { /* * Reset the value in case the failure happened after it was * set. */ num_zones = 0; } if (num_zones) return (zones[0]); return (malloc_default_zone()); }
DeathHandler::~DeathHandler() { // Disable alternative signal handler stack stack_t altstack; altstack.ss_sp = NULL; altstack.ss_size = 0; altstack.ss_flags = SS_DISABLE; sigaltstack(&altstack, NULL); struct sigaction sa; sigaction(SIGSEGV, NULL, &sa); sa.sa_handler = SIG_DFL; sigaction(SIGSEGV, &sa, NULL); sigaction(SIGABRT, NULL, &sa); sa.sa_handler = SIG_DFL; sigaction(SIGABRT, &sa, NULL); sigaction(SIGFPE, NULL, &sa); sa.sa_handler = SIG_DFL; sigaction(SIGFPE, &sa, NULL); delete[] memory_; #ifdef __APPLE__ malloc_zone_t* zone = malloc_default_zone(); SetMallocZone(zone, malloc_, free_); #endif }
__private_extern__ void __CFAllocatorInitialize(void) { __kCFAllocatorTypeID = _CFRuntimeRegisterClass(&__CFAllocatorClass); _CFRuntimeSetInstanceTypeID(&__kCFAllocatorSystemDefault, __kCFAllocatorTypeID); __kCFAllocatorSystemDefault._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI __kCFAllocatorSystemDefault._context.info = (kCFUseCollectableAllocator ? objc_collectableZone() : malloc_default_zone()); #endif __kCFAllocatorSystemDefault._allocator = kCFAllocatorSystemDefault; _CFRuntimeSetInstanceTypeID(&__kCFAllocatorMalloc, __kCFAllocatorTypeID); __kCFAllocatorMalloc._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorMalloc._allocator = kCFAllocatorSystemDefault; #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI _CFRuntimeSetInstanceTypeID(&__kCFAllocatorMallocZone, __kCFAllocatorTypeID); __kCFAllocatorMallocZone._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorMallocZone._allocator = kCFAllocatorSystemDefault; __kCFAllocatorMallocZone._context.info = malloc_default_zone(); #endif _CFRuntimeSetInstanceTypeID(&__kCFAllocatorNull, __kCFAllocatorTypeID); __kCFAllocatorNull._base._cfisa = __CFISAForTypeID(__kCFAllocatorTypeID); __kCFAllocatorNull._allocator = kCFAllocatorSystemDefault; }
void debugInstallFreeHook(void) { #if defined(_WIN32) && defined(_DEBUG) lastCrtAllocHook = _CrtSetAllocHook(DebugAllocHook); #endif #ifdef __GLIBC__ // __free_hook is not thread safe so it marked as deprecated. Use here // is hopefully safe and should catch errors in a single threaded program // and only miss some in a multithreaded program lastFreeHook = __free_hook; __free_hook = DebugFreeHook; #endif #ifdef __APPLE__ malloc_zone_t* zone = malloc_default_zone(); assert(zone != NULL); //remove the write protection from the zone struct if (zone->version >= 8) { vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ | VM_PROT_WRITE); } lastMallocZone = *zone; zone->free = DebugFreeHook; zone->free_definite_size = DebugFreeDefiniteSizeHook; if (zone->version >= 8) { vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ); } #endif }
void installAllocHooks (void) { // Pointer to the default malloc zone malloc_zone_t * default_zone; // // Get the default malloc zone and record the pointers to the real malloc // functions. // default_zone = malloc_default_zone(); real_malloc = default_zone->malloc; real_calloc = default_zone->calloc; real_valloc = default_zone->valloc; real_realloc = default_zone->realloc; real_free = default_zone->free; // // Install intercept routines. // default_zone->malloc = track_malloc; default_zone->calloc = track_calloc; default_zone->valloc = track_valloc; default_zone->realloc = track_realloc; default_zone->free = track_free; }
static void __CFAllocatorSystemDeallocate(void *ptr, void *info) { malloc_zone_t * const zone = (info == &__MallocDefaultZoneInfoPlaceholder) ? malloc_default_zone() : (malloc_zone_t *)info; #if defined(DEBUG) size_t size = malloc_size(ptr); if (size) memset(ptr, 0xCC, size); #endif malloc_zone_free(zone, ptr); }
void set_alloc_failure_countdown_to(int count) { ALLOC_FAIL_COUNTER = count; malloc_zone_t *zone = malloc_default_zone(); zone->malloc = fail_countdown_malloc; zone->calloc = fail_countdown_calloc; zone->realloc = fail_countdown_realloc; }
void setup() { malloc_zone_t * zone = malloc_default_zone(); tail = keep_list; system_malloc = zone->malloc; zone->malloc = mymalloc; system_free = zone->free; zone->free = myfree; }
void reset_alloc (void) { ALLOC_ERR_PROB = 0.0; malloc_zone_t *zone = malloc_default_zone(); zone->malloc = SYSTEM_MALLOC; zone->calloc = SYSTEM_CALLOC; zone->realloc = SYSTEM_REALLOC; }
void set_alloc_failure_rate_to(double p) { ALLOC_ERR_PROB = p; malloc_zone_t *zone = malloc_default_zone(); zone->malloc = fail_prone_malloc; zone->calloc = fail_prone_calloc; zone->realloc = fail_prone_realloc; }
void machine_specific_initialization (void) { #ifdef __APPLE__ malloc_zone_t *zone = malloc_default_zone(); SYSTEM_MALLOC = zone->malloc; SYSTEM_CALLOC = zone->calloc; SYSTEM_REALLOC = zone->realloc; #endif }
void teardown() { malloc_zone_t * zone = malloc_default_zone(); while (tail > keep_list) { tail--; system_free(zone, *tail); } zone->malloc = system_malloc; zone->free = system_free; }
CF_PRIVATE void __CFAllocatorInitialize(void) { static dispatch_once_t initOnce = 0; dispatch_once(&initOnce, ^{ __kCFAllocatorTypeID = _CFRuntimeRegisterClass(&__CFAllocatorClass); // initOnce covered _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorSystemDefault); #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI __kCFAllocatorSystemDefault._context.info = malloc_default_zone(); #endif __kCFAllocatorSystemDefault._allocator = kCFAllocatorSystemDefault; _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorMalloc); __kCFAllocatorMalloc._allocator = kCFAllocatorSystemDefault; #if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_EMBEDDED_MINI _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorMallocZone); __kCFAllocatorMallocZone._allocator = kCFAllocatorSystemDefault; __kCFAllocatorMallocZone._context.info = malloc_default_zone(); #endif _CFAllocatorSetInstanceTypeIDAndIsa(&__kCFAllocatorNull); __kCFAllocatorNull._allocator = kCFAllocatorSystemDefault; });
// // Redirect the system malloc. // static void my_init_hook (void) { if (theZone == NULL) { theZone = malloc_default_zone(); // Store the old hooks. originalSize = theZone->size; originalMalloc = theZone->malloc; originalCalloc = theZone->calloc; originalValloc = theZone->valloc; originalFree = theZone->free; originalRealloc = theZone->realloc; originalDestroy = theZone->destroy; // Point the hooks to the replacement functions. theZone->size = mysize; theZone->malloc = mymalloc; theZone->calloc = mycalloc; theZone->valloc = myvalloc; theZone->free = myfree; theZone->realloc = myrealloc; theZone->destroy = mydestroy; theZone->zone_name = mallocName; // // We aren't replacing everything, so NULL away. // // Trash the batch callback hooks. theZone->batch_malloc = NULL; theZone->batch_free = NULL; // And kill the introspection pointer (whatever that means). theZone->introspect = NULL; // And now in Snow Leopard, more to NULL out. theZone->memalign = NULL; theZone->free_definite_size = NULL; } }
void makeLargeMallocFailSilently() { malloc_zone_t* zone = malloc_default_zone(); #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) mach_vm_address_t pageStart = reinterpret_cast<vm_address_t>(zone) & static_cast<vm_size_t>(~(getpagesize() - 1)); vm_prot_t initialProtection = protectionOfRegion(pageStart); vm_size_t len = reinterpret_cast<vm_address_t>(zone) - pageStart + sizeof(malloc_zone_t); if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection | VM_PROT_WRITE)) CRASH(); #endif savedMalloc = zone->malloc; savedRealloc = zone->realloc; zone->malloc = checkedMalloc; zone->realloc = checkedRealloc; #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection)) CRASH(); #endif }
void makeLargeMallocFailSilently() { malloc_zone_t* zone = malloc_default_zone(); #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070 mach_vm_address_t pageStart = reinterpret_cast<vm_address_t>(zone) & static_cast<vm_size_t>(~(getpagesize() - 1)); vm_prot_t initialProtection = protectionOfRegion(pageStart); vm_size_t len = reinterpret_cast<vm_address_t>(zone) - pageStart + sizeof(malloc_zone_t); if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection | VM_PROT_WRITE)) CRASH(); #endif savedMalloc = zone->malloc; savedRealloc = zone->realloc; zone->malloc = checkedMalloc; zone->realloc = checkedRealloc; #if __MAC_OS_X_VERSION_MIN_REQUIRED >= 1070 if (mach_vm_protect(mach_task_self(), pageStart, len, 0, initialProtection)) CRASH(); #endif }
/* ** Initialize this module. */ static int sqlite3MemInit(void *NotUsed){ #if defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) int cpuCount; size_t len; if( _sqliteZone_ ){ return SQLITE_OK; } len = sizeof(cpuCount); /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); if( cpuCount>1 ){ /* defer MT decisions to system malloc */ _sqliteZone_ = malloc_default_zone(); }else{ /* only 1 core, use our own zone to contention over global locks, ** e.g. we have our own dedicated locks */ _sqliteZone_ = malloc_create_zone(4096, 0); malloc_set_zone_name(_sqliteZone_, "Sqlite_Heap"); } #endif /* defined(__APPLE__) && !defined(SQLITE_WITHOUT_ZONEMALLOC) */ UNUSED_PARAMETER(NotUsed); return SQLITE_OK; }
DeathHandler::DeathHandler(bool altstack) { if (memory_ == NULL) { memory_ = new char[kNeededMemory + (altstack? MINSIGSTKSZ : 0)]; } if (altstack) { stack_t altstack; altstack.ss_sp = memory_ + kNeededMemory; altstack.ss_size = MINSIGSTKSZ; altstack.ss_flags = 0; if (sigaltstack(&altstack, NULL) < 0) { perror("DeathHandler - sigaltstack()"); } } struct sigaction sa; sa.sa_sigaction = (sa_sigaction_handler)HandleSignal; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART | SA_SIGINFO | (altstack? SA_ONSTACK : 0); if (sigaction(SIGSEGV, &sa, NULL) < 0) { perror("DeathHandler - sigaction(SIGSEGV)"); } if (sigaction(SIGABRT, &sa, NULL) < 0) { perror("DeathHandler - sigaction(SIGABBRT)"); } if (sigaction(SIGFPE, &sa, NULL) < 0) { perror("DeathHandler - sigaction(SIGFPE)"); } #ifdef __APPLE__ malloc_zone_t* zone = malloc_default_zone(); if (!zone) { print("Failed to override malloc() and free()"); return; } // Override malloc() and free() SetMallocZone(zone, reinterpret_cast<void*>(__malloc_zone), reinterpret_cast<void*>(__free_zone), &malloc_, &free_); #endif }
void debugRemoveFreeHook(void) { #if defined(_WIN32) && defined(_DEBUG) _CrtSetAllocHook(lastCrtAllocHook); #endif #ifdef __GLIBC__ __free_hook = lastFreeHook; #endif #ifdef __APPLE__ malloc_zone_t* zone = malloc_default_zone(); assert(zone != NULL); //remove the write protection from the zone struct if (zone->version >= 8) { vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ | VM_PROT_WRITE); } zone->free = lastMallocZone.free; zone->free_definite_size = lastMallocZone.free_definite_size; if (zone->version >= 8) { vm_protect(mach_task_self(), (uintptr_t)zone, sizeof(*zone), 0, VM_PROT_READ); } #endif }
void register_zone(void) { /* * If something else replaced the system default zone allocator, don't * register jemalloc's. */ malloc_zone_t *default_zone = malloc_default_zone(); if (!default_zone->zone_name || strcmp(default_zone->zone_name, "DefaultMallocZone") != 0) { return; } zone.size = (void *)zone_size; zone.malloc = (void *)zone_malloc; zone.calloc = (void *)zone_calloc; zone.valloc = (void *)zone_valloc; zone.free = (void *)zone_free; zone.realloc = (void *)zone_realloc; zone.destroy = (void *)zone_destroy; zone.zone_name = "jemalloc_zone"; zone.batch_malloc = NULL; zone.batch_free = NULL; zone.introspect = &zone_introspect; zone.version = JEMALLOC_ZONE_VERSION; #if (JEMALLOC_ZONE_VERSION >= 5) zone.memalign = zone_memalign; #endif #if (JEMALLOC_ZONE_VERSION >= 6) zone.free_definite_size = zone_free_definite_size; #endif #if (JEMALLOC_ZONE_VERSION >= 8) zone.pressure_relief = NULL; #endif zone_introspect.enumerator = NULL; zone_introspect.good_size = (void *)zone_good_size; zone_introspect.check = NULL; zone_introspect.print = NULL; zone_introspect.log = NULL; zone_introspect.force_lock = (void *)zone_force_lock; zone_introspect.force_unlock = (void *)zone_force_unlock; zone_introspect.statistics = NULL; #if (JEMALLOC_ZONE_VERSION >= 6) zone_introspect.zone_locked = NULL; #endif #if (JEMALLOC_ZONE_VERSION >= 7) zone_introspect.enable_discharge_checking = NULL; zone_introspect.disable_discharge_checking = NULL; zone_introspect.discharge = NULL; #ifdef __BLOCKS__ zone_introspect.enumerate_discharged_pointers = NULL; #else zone_introspect.enumerate_unavailable_without_blocks = NULL; #endif #endif /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so * malloc_default_purgeable_zone is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. As purgeable zones only exist on >= 10.6, we need * to check for the existence of malloc_default_purgeable_zone() at * run time. */ if (malloc_default_purgeable_zone != NULL) malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ malloc_zone_register(&zone); /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it at the * location of the specified zone. Unregistering the default zone thus * makes the last registered one the default. On OSX < 10.6, * unregistering shifts all registered zones. The first registered zone * then becomes the default. */ do { default_zone = malloc_default_zone(); malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); } while (malloc_default_zone() != &zone); }
/* Creates the the worker threads. */ static void incdep_init (struct floc *f) { unsigned i; #if defined (HAVE_PTHREAD) && !defined (CONFIG_WITHOUT_THREADS) int rc; pthread_attr_t attr; #elif defined (WINDOWS32) unsigned tid; uintptr_t hThread; #elif defined (__OS2__) int rc; int tid; #endif (void)f; /* heap hacks */ #ifdef __APPLE__ incdep_zone = malloc_create_zone (0, 0); if (!incdep_zone) incdep_zone = malloc_default_zone (); #endif /* create the mutex and two condition variables / event objects. */ #if defined (HAVE_PTHREAD) && !defined (CONFIG_WITHOUT_THREADS) rc = pthread_mutex_init (&incdep_mtx, NULL); if (rc) fatal (f, _("pthread_mutex_init failed: err=%d"), rc); rc = pthread_cond_init (&incdep_cond_todo, NULL); if (rc) fatal (f, _("pthread_cond_init failed: err=%d"), rc); rc = pthread_cond_init (&incdep_cond_done, NULL); if (rc) fatal (f, _("pthread_cond_init failed: err=%d"), rc); #elif defined (WINDOWS32) InitializeCriticalSection (&incdep_mtx); incdep_hev_todo = CreateEvent (NULL, TRUE /*bManualReset*/, FALSE /*bInitialState*/, NULL); if (!incdep_hev_todo) fatal (f, _("CreateEvent failed: err=%d"), GetLastError()); incdep_hev_done = CreateEvent (NULL, TRUE /*bManualReset*/, FALSE /*bInitialState*/, NULL); if (!incdep_hev_done) fatal (f, _("CreateEvent failed: err=%d"), GetLastError()); incdep_hev_todo_waiters = 0; incdep_hev_done_waiters = 0; #elif defined (__OS2__) _fmutex_create (&incdep_mtx, 0); rc = DosCreateEventSem (NULL, &incdep_hev_todo, 0, FALSE); if (rc) fatal (f, _("DosCreateEventSem failed: rc=%d"), rc); rc = DosCreateEventSem (NULL, &incdep_hev_done, 0, FALSE); if (rc) fatal (f, _("DosCreateEventSem failed: rc=%d"), rc); incdep_hev_todo_waiters = 0; incdep_hev_done_waiters = 0; #endif /* create the worker threads and associated per thread data. */ incdep_terminate = 0; if (incdep_are_threads_enabled()) { incdep_num_threads = sizeof (incdep_threads) / sizeof (incdep_threads[0]); if (incdep_num_threads + 1 > job_slots) incdep_num_threads = job_slots <= 1 ? 1 : job_slots - 1; for (i = 0; i < incdep_num_threads; i++) { /* init caches */ unsigned rec_size = sizeof (struct incdep_variable_in_set); if (rec_size < sizeof (struct incdep_variable_def)) rec_size = sizeof (struct incdep_variable_def); if (rec_size < sizeof (struct incdep_recorded_file)) rec_size = sizeof (struct incdep_recorded_file); alloccache_init (&incdep_rec_caches[i], rec_size, "incdep rec", incdep_cache_allocator, (void *)(size_t)i); alloccache_init (&incdep_dep_caches[i], sizeof(struct dep), "incdep dep", incdep_cache_allocator, (void *)(size_t)i); strcache2_init (&incdep_dep_strcaches[i], "incdep dep", /* name */ 65536, /* hash size */ 0, /* default segment size*/ #ifdef HAVE_CASE_INSENSITIVE_FS 1, /* case insensitive */ #else 0, /* case insensitive */ #endif 0); /* thread safe */ strcache2_init (&incdep_var_strcaches[i], "incdep var", /* name */ 32768, /* hash size */ 0, /* default segment size*/ 0, /* case insensitive */ 0); /* thread safe */ /* create the thread. */ #if defined (HAVE_PTHREAD) && !defined (CONFIG_WITHOUT_THREADS) rc = pthread_attr_init (&attr); if (rc) fatal (f, _("pthread_attr_init failed: err=%d"), rc); /*rc = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_JOINABLE); */ rc = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED); if (rc) fatal (f, _("pthread_attr_setdetachstate failed: err=%d"), rc); rc = pthread_create(&incdep_threads[i], &attr, incdep_worker_pthread, (void *)(size_t)i); if (rc) fatal (f, _("pthread_mutex_init failed: err=%d"), rc); pthread_attr_destroy (&attr); #elif defined (WINDOWS32) tid = 0; hThread = _beginthreadex (NULL, 128*1024, incdep_worker_windows, (void *)i, 0, &tid); if (hThread == 0 || hThread == ~(uintptr_t)0) fatal (f, _("_beginthreadex failed: err=%d"), errno); incdep_threads[i] = (HANDLE)hThread; #elif defined (__OS2__) tid = _beginthread (incdep_worker_os2, NULL, 128*1024, (void *)i); if (tid <= 0) fatal (f, _("_beginthread failed: err=%d"), errno); incdep_threads[i] = tid; #endif } } else incdep_num_threads = 0; incdep_initialized = 1; }
malloc_zone_t* malloc_create_zone(vm_size_t start_size, unsigned flags) { return malloc_default_zone(); }
malloc_zone_t* malloc_zone_from_ptr(const void *ptr) { return malloc_default_zone(); }
malloc_zone_t *objc_collectableZone(void) { return malloc_default_zone(); }
__attribute__((constructor)) void register_zone(void) { zone.size = (void *)zone_size; zone.malloc = (void *)zone_malloc; zone.calloc = (void *)zone_calloc; zone.valloc = (void *)zone_valloc; zone.free = (void *)zone_free; zone.realloc = (void *)zone_realloc; zone.destroy = (void *)zone_destroy; zone.zone_name = "replace_malloc_zone"; zone.batch_malloc = NULL; zone.batch_free = NULL; zone.introspect = &zone_introspect; zone.version = JEMALLOC_ZONE_VERSION; zone.memalign = zone_memalign; zone.free_definite_size = zone_free_definite_size; #if (JEMALLOC_ZONE_VERSION >= 8) zone.pressure_relief = NULL; #endif zone_introspect.enumerator = NULL; zone_introspect.good_size = (void *)zone_good_size; zone_introspect.check = NULL; zone_introspect.print = NULL; zone_introspect.log = NULL; zone_introspect.force_lock = (void *)zone_force_lock; zone_introspect.force_unlock = (void *)zone_force_unlock; zone_introspect.statistics = NULL; zone_introspect.zone_locked = NULL; #if (JEMALLOC_ZONE_VERSION >= 7) zone_introspect.enable_discharge_checking = NULL; zone_introspect.disable_discharge_checking = NULL; zone_introspect.discharge = NULL; #ifdef __BLOCKS__ zone_introspect.enumerate_discharged_pointers = NULL; #else zone_introspect.enumerate_unavailable_without_blocks = NULL; #endif #endif /* * The default purgeable zone is created lazily by OSX's libc. It uses * the default zone when it is created for "small" allocations * (< 15 KiB), but assumes the default zone is a scalable_zone. This * obviously fails when the default zone is the jemalloc zone, so * malloc_default_purgeable_zone is called beforehand so that the * default purgeable zone is created when the default zone is still * a scalable_zone. */ malloc_zone_t *purgeable_zone = malloc_default_purgeable_zone(); /* Register the custom zone. At this point it won't be the default. */ malloc_zone_register(&zone); do { malloc_zone_t *default_zone = malloc_default_zone(); /* * Unregister and reregister the default zone. On OSX >= 10.6, * unregistering takes the last registered zone and places it at the * location of the specified zone. Unregistering the default zone thus * makes the last registered one the default. On OSX < 10.6, * unregistering shifts all registered zones. The first registered zone * then becomes the default. */ malloc_zone_unregister(default_zone); malloc_zone_register(default_zone); /* * On OSX 10.6, having the default purgeable zone appear before the default * zone makes some things crash because it thinks it owns the default * zone allocated pointers. We thus unregister/re-register it in order to * ensure it's always after the default zone. On OSX < 10.6, as * unregistering shifts registered zones, this simply removes the purgeable * zone from the list and adds it back at the end, after the default zone. * On OSX >= 10.6, unregistering replaces the purgeable zone with the last * registered zone above, i.e the default zone. Registering it again then * puts it at the end, obviously after the default zone. */ malloc_zone_unregister(purgeable_zone); malloc_zone_register(purgeable_zone); } while (malloc_default_zone() != &zone); }
static PRInt64 GetHeapZone0Used(void *) { malloc_statistics_t stats; malloc_zone_statistics(malloc_default_zone(), &stats); return stats.size_allocated; }
static PRInt64 GetHeapZone0Committed(void *) { malloc_statistics_t stats; malloc_zone_statistics(malloc_default_zone(), &stats); return stats.size_in_use; }
int main() { test_introspection(auto_zone_create("auto zone"), true); test_introspection(malloc_default_zone(), false); test_introspection(malloc_create_zone(8192, 0), false); return 0; }