static void test_output(void) { puts( "test the output-function for the _Heap_Walk()" ); puts( "therefore use the (already tested) case with a page size of 0" ); /* use simple case where one PASS and one FAIL will be put out */ test_heap_init_default(); TestHeap.page_size = 0; test_call_heap_walk( false ); _Heap_Walk( &TestHeap, 0, true ); }
static void test_heap_do_initialize( uintptr_t area_size, uintptr_t page_size, uintptr_t success_expected ) { uintptr_t rv = _Heap_Initialize( &TestHeap, TestHeapMemory, area_size, page_size ); if ( success_expected ) { rtems_test_assert( rv > 0 && _Heap_Walk( &TestHeap, 0, false ) ); } else { rtems_test_assert( rv == 0 ); } }
bool _Protected_heap_Walk( Heap_Control *the_heap, int source, bool do_dump ) { bool status; /* * If we are called from within a dispatching critical section, * then it is forbidden to lock a mutex. But since we are inside * a critical section, it should be safe to walk it unlocked. * * NOTE: Dispatching is also disabled during initialization. */ if ( !_Thread_Dispatch_disable_level ) { _RTEMS_Lock_allocator(); status = _Heap_Walk( the_heap, source, do_dump ); _RTEMS_Unlock_allocator(); } else { status = _Heap_Walk( the_heap, source, do_dump ); } return status; }
static void test_heap_assert(bool ret, bool expected) { rtems_test_assert( ret == expected ); rtems_test_assert( _Heap_Walk( &TestHeap, 0, false ) ); }
static void test_check_alloc( void *alloc_begin_ptr, void *expected_alloc_begin_ptr, uintptr_t alloc_size, uintptr_t alignment, uintptr_t boundary ) { uintptr_t const min_block_size = TestHeap.min_block_size; uintptr_t const page_size = TestHeap.page_size; rtems_test_assert( alloc_begin_ptr == expected_alloc_begin_ptr ); if( expected_alloc_begin_ptr != NULL ) { uintptr_t const alloc_begin = (uintptr_t ) alloc_begin_ptr; uintptr_t const alloc_end = alloc_begin + alloc_size; uintptr_t const alloc_area_begin = _Heap_Align_down( alloc_begin, page_size ); uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin; #if UNUSED uintptr_t const alloc_area_size = alloc_area_offset + alloc_size; #endif Heap_Block *block = _Heap_Block_of_alloc_area( alloc_area_begin, page_size ); uintptr_t const block_begin = (uintptr_t ) block; uintptr_t const block_size = _Heap_Block_size( block ); uintptr_t const block_end = block_begin + block_size; rtems_test_assert( block_size >= min_block_size ); rtems_test_assert( block_begin < block_end ); rtems_test_assert( _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size ) ); rtems_test_assert( _Heap_Is_aligned( block_size, page_size ) ); rtems_test_assert( alloc_end <= block_end + HEAP_ALLOC_BONUS ); rtems_test_assert( alloc_area_begin > block_begin ); rtems_test_assert( alloc_area_offset < page_size ); rtems_test_assert( _Heap_Is_aligned( alloc_area_begin, page_size ) ); if ( alignment == 0 ) { rtems_test_assert( alloc_begin == alloc_area_begin ); } else { rtems_test_assert( _Heap_Is_aligned( alloc_begin, alignment ) ); } if ( boundary != 0 ) { uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary ); rtems_test_assert( alloc_size <= boundary ); rtems_test_assert( boundary_line <= alloc_begin || alloc_end <= boundary_line ); } } rtems_test_assert( page_size < CPU_ALIGNMENT || _Heap_Walk( &TestHeap, 0, false ) ); }
static void test_call_heap_walk( bool expectet_retval ) { bool retval = _Heap_Walk( &TestHeap, 0, DUMP ); rtems_test_assert( retval == expectet_retval ); }