int TestMain () { scalable_allocation_mode(USE_HUGE_PAGES, 0); #if !_XBOX && !__TBB_WIN8UI_SUPPORT putenv((char*)"TBB_MALLOC_USE_HUGE_PAGES=yes"); #endif checkNoHugePages(); // backreference requires that initialization was done if(!isMallocInitialized()) doInitialization(); checkNoHugePages(); // to succeed, leak detection must be the 1st memory-intensive test TestBackRef(); TestPools(); TestBackend(); #if MALLOC_CHECK_RECURSION for( int p=MaxThread; p>=MinThread; --p ) { TestStartupAlloc::initBarrier( p ); NativeParallelFor( p, TestStartupAlloc() ); ASSERT(!firstStartupBlock, "Startup heap memory leak detected"); } #endif TestLargeObjectCache(); TestObjectRecognition(); TestBitMask(); return Harness::Done; }
void TestHeapLimit() { if(!isMallocInitialized()) doInitialization(); // tiny limit to stop caching int res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); ASSERT(res == TBBMALLOC_OK, NULL); // provoke bootstrap heap initialization before recording memory size scalable_free(scalable_malloc(8)); size_t n, sizeBefore = getMemSize(); // Try to provoke call to OS for memory to check that // requests are not fulfilled from caches. // Single call is not enough here because of backend fragmentation. for (n = minLargeObjectSize; n < 10*1024*1024; n += 16*1024) { void *p = scalable_malloc(n); bool leave = (sizeBefore != getMemSize()); scalable_free(p); if (leave) break; ASSERT(sizeBefore == getMemSize(), "No caching expected"); } ASSERT(n < 10*1024*1024, "scalable_malloc doesn't provoke OS request for memory, " "is some internal cache still used?"); // estimate number of objects in single bootstrap block int objInBootstrapHeapBlock = (slabSize-2*estimatedCacheLineSize)/sizeof(TLSData); // When we have more threads than objects in bootstrap heap block, // additional block can be allocated from a region that is different // from the original region. Thus even after all caches cleaned, // we unable to reach sizeBefore. ASSERT_WARNING(MaxThread<=objInBootstrapHeapBlock, "The test might fail for larger thread number, " "as bootstrap heap is not released till size checking."); for( int p=MaxThread; p>=MinThread; --p ) { RunTestHeapLimit::initBarrier( p ); NativeParallelFor( p, RunTestHeapLimit(sizeBefore) ); } // it's try to match limit as well as set limit, so call here res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); ASSERT(res == TBBMALLOC_OK, NULL); size_t m = getMemSize(); ASSERT(sizeBefore == m, NULL); // restore default res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 0); ASSERT(res == TBBMALLOC_OK, NULL); }
int main(void) { size_t i, j; int curr_mode, res; void *p1, *p2; atexit( MyExit ); for ( curr_mode = 0; curr_mode<=1; curr_mode++) { assert(ExpectedResultHugePages == scalable_allocation_mode(TBBMALLOC_USE_HUGE_PAGES, !curr_mode)); p1 = scalable_malloc(10*1024*1024); assert(p1); assert(ExpectedResultHugePages == scalable_allocation_mode(TBBMALLOC_USE_HUGE_PAGES, curr_mode)); scalable_free(p1); } /* note that huge pages (if supported) are still enabled at this point */ #if __TBB_SOURCE_DIRECTLY_INCLUDED assert(TBBMALLOC_OK == scalable_allocation_mode(TBBMALLOC_INTERNAL_SOURCE_INCLUDED, 0)); #endif for( i=0; i<=1<<16; ++i) { p1 = scalable_malloc(i); if( !p1 ) printf("Warning: there should be memory but scalable_malloc returned NULL\n"); scalable_free(p1); } p1 = p2 = NULL; for( i=1024*1024; ; i/=2 ) { scalable_free(p1); p1 = scalable_realloc(p2, i); p2 = scalable_calloc(i, 32); if (p2) { if (i<sizeof(size_t)) { for (j=0; j<i; j++) assert(0==*((char*)p2+j)); } else { for (j=0; j<i; j+=sizeof(size_t)) assert(0==*((size_t*)p2+j)); } } scalable_free(p2); p2 = scalable_malloc(i); if (i==0) break; } for( i=1; i<1024*1024; i*=2 ) { scalable_free(p1); p1 = scalable_realloc(p2, i); p2 = scalable_malloc(i); } scalable_free(p1); scalable_free(p2); res = scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL); assert(res == TBBMALLOC_OK); res = scalable_allocation_command(TBBMALLOC_CLEAN_THREAD_BUFFERS, NULL); /* expect all caches cleaned before, so got nothing from CLEAN_THREAD_BUFFERS */ assert(res == TBBMALLOC_NO_EFFECT); /* check that invalid param argument give expected result*/ res = scalable_allocation_command(TBBMALLOC_CLEAN_THREAD_BUFFERS, (void*)(intptr_t)1); assert(res == TBBMALLOC_INVALID_PARAM); __TBB_mallocProcessShutdownNotification(); printf("done\n"); return 0; }
HARNESS_EXPORT int main(int argc, char* argv[]) { argC=argc; argV=argv; MaxThread = MinThread = 1; Rmalloc=scalable_malloc; Rrealloc=scalable_realloc; Rcalloc=scalable_calloc; Tfree=scalable_free; Rposix_memalign=scalable_posix_memalign; Raligned_malloc=scalable_aligned_malloc; Raligned_realloc=scalable_aligned_realloc; Taligned_free=scalable_aligned_free; // check if we were called to test standard behavior for (int i=1; i< argc; i++) { if (strcmp((char*)*(argv+i),"-s")==0) { #if __INTEL_COMPILER == 1400 && __linux__ // Workaround for Intel(R) C++ Compiler XE, version 14.0.0.080: // unable to call setSystemAllocs() in such configuration. REPORT("Known issue: Standard allocator testing is not supported.\n"); REPORT( "skip\n" ); return 0; #else setSystemAllocs(); argC--; break; #endif } } ParseCommandLine( argC, argV ); #if __linux__ /* According to man pthreads "NPTL threads do not share resource limits (fixed in kernel 2.6.10)". Use per-threads limits for affected systems. */ if ( LinuxKernelVersion() < 2*1000000 + 6*1000 + 10) perProcessLimits = false; #endif //------------------------------------- #if __APPLE__ /* Skip due to lack of memory limit enforcing under macOS. */ #else limitMem(200); ReallocParam(); limitMem(0); #endif //for linux and dynamic runtime errno is used to check allocator functions //check if library compiled with /MD(d) and we can use errno #if _MSC_VER #if defined(_MT) && defined(_DLL) //check errno if test itself compiled with /MD(d) only char* version_info_block = NULL; int version_info_block_size; LPVOID comments_block = NULL; UINT comments_block_size; #ifdef _DEBUG #define __TBBMALLOCDLL "tbbmalloc_debug.dll" #else //_DEBUG #define __TBBMALLOCDLL "tbbmalloc.dll" #endif //_DEBUG version_info_block_size = GetFileVersionInfoSize( __TBBMALLOCDLL, (LPDWORD)&version_info_block_size ); if( version_info_block_size && ((version_info_block = (char*)malloc(version_info_block_size)) != NULL) && GetFileVersionInfo( __TBBMALLOCDLL, NULL, version_info_block_size, version_info_block ) && VerQueryValue( version_info_block, "\\StringFileInfo\\000004b0\\Comments", &comments_block, &comments_block_size ) && strstr( (char*)comments_block, "/MD" ) ){ __tbb_test_errno = true; } if( version_info_block ) free( version_info_block ); #endif // defined(_MT) && defined(_DLL) #else // _MSC_VER __tbb_test_errno = true; #endif // _MSC_VER CheckArgumentsOverflow(); CheckReallocLeak(); for( int p=MaxThread; p>=MinThread; --p ) { REMARK("testing with %d threads\n", p ); for (int limit=0; limit<2; limit++) { int ret = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 16*1024*limit); ASSERT(ret==TBBMALLOC_OK, NULL); Harness::SpinBarrier *barrier = new Harness::SpinBarrier(p); NativeParallelFor( p, RoundRobin(p, barrier, Verbose) ); delete barrier; } } int ret = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 0); ASSERT(ret==TBBMALLOC_OK, NULL); if( !error_occurred ) REPORT("done\n"); return 0; }
void operator() () const { int res = scalable_allocation_mode(TBBMALLOC_SET_SOFT_HEAP_LIMIT, 1); ASSERT(res == TBBMALLOC_OK, NULL); ASSERT(getMemSize() == memSize, NULL); }