Example #1
0
/* i2c_transfer_wait --
 *     Initiate I2C bus transfer and block until this transfer will be
 *     finished. This function wait the semaphore if system in
 *     SYSTEM_STATE_UP state, or poll done flag in other states.
 *
 * PARAMETERS:
 *     bus - I2C bus number
 *     msg - pointer to transfer messages array
 *     nmsg - number of messages in transfer
 *
 * RETURNS:
 *     I2C_SUCCESSFUL, if tranfer finished successfully,
 *     I2C_RESOURCE_NOT_AVAILABLE, if semaphore operations has failed,
 *     value of status field of first error-finished message in transfer,
 *     if something wrong.
 */
i2c_message_status
i2c_transfer_wait(i2c_bus_number bus, i2c_message *msg, int nmsg)
{
    rtems_status_code sc;
    int i;
    if (_System_state_Is_up(_System_state_Get()))
    {
        sc = i2c_transfer_wait_sema(bus, msg, nmsg);
    }
    else
    {
        sc = i2c_transfer_wait_poll(bus, msg, nmsg);
    }

    if (sc != RTEMS_SUCCESSFUL)
        return I2C_RESOURCE_NOT_AVAILABLE;

    for (i = 0; i < nmsg; i++)
    {
        if (msg[i].status != I2C_SUCCESSFUL)
        {
            return msg[i].status;
        }
    }
    return I2C_SUCCESSFUL;
}
Example #2
0
void rtems_shutdown_executive(
   uint32_t   result
)
{
  if ( _System_state_Is_up( _System_state_Get() ) ) {
    #if defined(RTEMS_SMP)
      _SMP_Request_other_cores_to_shutdown();
    #endif

    _Per_CPU_Information[0].idle->Wait.return_code = result;

    _System_state_Set( SYSTEM_STATE_SHUTDOWN );
    _Thread_Stop_multitasking();

    /*******************************************************************
     *******************************************************************
     ******     RETURN TO RTEMS_INITIALIZE_START_MULTITASKING()   ******
     ******                 AND THEN TO BOOT_CARD()               ******
     *******************************************************************
     *******************************************************************/
  }
  _Internal_error_Occurred(
    INTERNAL_ERROR_CORE,
    true,
    INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP
  );
}
Example #3
0
void _Thread_Disable_dispatch( void )
{
  /*
   *  This check is very brutal to system performance but is very helpful
   *  at finding blown stack problems.  If you have a stack problem and
   *  need help finding it, then uncomment this code.  Every system
   *  call will check the stack and since mutexes are used frequently
   *  in most systems, you might get lucky.
   */
  #if defined(RTEMS_HEAVY_STACK_DEBUG)
    if (_System_state_Is_up(_System_state_Get()) && (_ISR_Nest_level == 0)) {
      if ( rtems_stack_checker_is_blown() ) {
	printk( "Stack blown!!\n" );
	rtems_fatal_error_occurred( 99 );
      }
    }
  #endif

  _Thread_Dispatch_increment_disable_level();
  RTEMS_COMPILER_MEMORY_BARRIER();

  /*
   * This check is even more brutal than the other one.  This enables
   * malloc heap integrity checking upon entry to every system call.
   */
  #if defined(RTEMS_HEAVY_MALLOC_DEBUG)
    if ( _Thread_Dispatch_get_disable_level() == 1 ) {
      _Heap_Walk( RTEMS_Malloc_Heap,99, false );
    }
  #endif
}
Example #4
0
void *realloc(
  void *ptr,
  size_t size
)
{
  uintptr_t old_size;
  char    *new_area;

  /*
   *  Do not attempt to allocate memory if in a critical section or ISR.
   */

  if (_System_state_Is_up(_System_state_Get())) {
    if (!_Thread_Dispatch_is_enabled())
      return (void *) 0;
  }

  /*
   * Continue with realloc().
   */
  if ( !ptr )
    return malloc( size );

  if ( !size ) {
    free( ptr );
    return (void *) 0;
  }

  if ( !_Protected_heap_Get_block_size(RTEMS_Malloc_Heap, ptr, &old_size) ) {
    errno = EINVAL;
    return (void *) 0;
  }

  /*
   *  Now resize it.
   */
  if ( _Protected_heap_Resize_block( RTEMS_Malloc_Heap, ptr, size ) ) {
    return ptr;
  }

  /*
   *  There used to be a free on this error case but it is wrong to
   *  free the memory per OpenGroup Single UNIX Specification V2
   *  and the C Standard.
   */

  new_area = malloc( size );

  if ( !new_area ) {
    return (void *) 0;
  }

  memcpy( new_area, ptr, (size < old_size) ? size : old_size );
  free( ptr );

  return new_area;

}
void _Assert_Owner_of_giant( void )
{
  Giant_Control *giant = &_Giant;

  _Assert(
    giant->owner_cpu == _SMP_Get_current_processor()
      || !_System_state_Is_up( _System_state_Get() )
  );
}
Example #6
0
void *malloc(
  size_t  size
)
{
  void        *return_this;

  MSBUMP(malloc_calls, 1);

  /*
   *  If some free's have been deferred, then do them now.
   */
  malloc_deferred_frees_process();

  /*
   * Validate the parameters
   */
  if ( !size )
    return (void *) 0;

  /*
   *  Do not attempt to allocate memory if not in correct system state.
   */
  if ( _System_state_Is_up(_System_state_Get()) &&
       !malloc_is_system_state_OK() )
    return NULL;

  /*
   * Try to give a segment in the current heap if there is not
   * enough space then try to grow the heap.
   * If this fails then return a NULL pointer.
   */

  return_this = _Protected_heap_Allocate( RTEMS_Malloc_Heap, size );

  if ( !return_this ) {
    return_this = (*rtems_malloc_extend_handler)( RTEMS_Malloc_Heap, size );
    if ( !return_this ) {
      errno = ENOMEM;
      return (void *) 0;
    }
  }

  /*
   *  If the user wants us to dirty the allocated memory, then do it.
   */
  if ( rtems_malloc_dirty_helper )
    (*rtems_malloc_dirty_helper)( return_this, size );

  /*
   *  If configured, update the statistics
   */
  if ( rtems_malloc_statistics_helpers )
    (*rtems_malloc_statistics_helpers->at_malloc)(return_this);

  return return_this;
}
Example #7
0
static void
safe_printf (const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);
	if ( _System_state_Is_up( _System_state_Get() ) )
		vfprintf( stderr, fmt, ap );
	else
		vprintk( fmt, ap );
	va_end(ap);
}
Example #8
0
void Fatal_extension( uint32_t source, bool is_internal, uint32_t error )
{
  if ( source != INTERNAL_ERROR_RTEMS_API ) {
    printk( "unexpected fatal source\n" );
  } else if ( is_internal ) {
    printk( "unexpected fatal is internal\n" );
  } else if ( error != 0x81 ) {
    printk( "unexpected fatal error\n" );
  } else {
    printk( "*** END OF TEST STACK CHECKER ***\n" );
  }

  if ( _System_state_Is_up( _System_state_Get() ) )
    _Thread_Stop_multitasking();
}
Example #9
0
void rtems_cache_coherent_add_area(
  void *area_begin,
  uintptr_t area_size
)
{
  if ( _System_state_Is_up( _System_state_Get()) ) {
    _RTEMS_Lock_allocator();

    add_area( area_begin, area_size );

    _RTEMS_Unlock_allocator();
  } else {
    add_area( area_begin, area_size );
  }
}
Example #10
0
int rtems_memalign(
  void   **pointer,
  size_t   alignment,
  size_t   size
)
{
  void *return_this;

  /*
   *  Parameter error checks
   */
  if ( !pointer )
    return EINVAL;

  *pointer = NULL;

  /*
   *  Do not attempt to allocate memory if not in correct system state.
   */
  if ( _System_state_Is_up(_System_state_Get()) &&
       !malloc_is_system_state_OK() )
    return EINVAL;

  /*
   *  If some free's have been deferred, then do them now.
   */
  malloc_deferred_frees_process();

  /*
   *  Perform the aligned allocation requested
   */
  return_this = _Protected_heap_Allocate_aligned(
    RTEMS_Malloc_Heap,
    size,
    alignment
  );
  if ( !return_this )
    return ENOMEM;

  /*
   *  If configured, update the more involved statistics
   */
  if ( rtems_malloc_statistics_helpers )
    (*rtems_malloc_statistics_helpers->at_malloc)(pointer);

  *pointer = return_this;
  return 0;
}
Example #11
0
void Fatal_extension(
  uint32_t   source,
  bool       is_internal,
  uint32_t   error
)
{
  print_test_begin_message();
  printk( "Fatal error (%s) hit\n", FATAL_ERROR_DESCRIPTION );

  if ( source != FATAL_ERROR_EXPECTED_SOURCE ){
    printk( "ERROR==> Fatal Extension source Expected (");
    Put_Source( FATAL_ERROR_EXPECTED_SOURCE );
    printk( ") received (");
    Put_Source( source );
    printk( ")\n" );
  }

  if ( is_internal !=  FATAL_ERROR_EXPECTED_IS_INTERNAL )
  {
    if ( is_internal == TRUE )
      printk(
        "ERROR==> Fatal Extension is internal set to TRUE expected FALSE\n"
      );
    else
      printk(
        "ERROR==> Fatal Extension is internal set to FALSE expected TRUE\n"
      );
  }

  if ( error !=  FATAL_ERROR_EXPECTED_ERROR ) {
    printk( "ERROR==> Fatal Error Expected (");
    Put_Error( source, FATAL_ERROR_EXPECTED_ERROR );
    printk( ") received (");
    Put_Error( source, error );
    printk( ")\n" );
  }

  if (
    source == FATAL_ERROR_EXPECTED_SOURCE
      && is_internal == FATAL_ERROR_EXPECTED_IS_INTERNAL
      && error == FATAL_ERROR_EXPECTED_ERROR
  ) {
    printk( "*** END OF TEST FATAL " FATAL_ERROR_TEST_NAME " ***\n" );
  }

  if ( _System_state_Is_up( _System_state_Get() ) )
    _Thread_Stop_multitasking();
}
Example #12
0
Malloc_System_state _Malloc_System_state( void )
{
  System_state_Codes state = _System_state_Get();

  if ( _System_state_Is_up( state ) ) {
    if ( _Thread_Dispatch_is_enabled() ) {
      return MALLOC_SYSTEM_STATE_NORMAL;
    } else {
      return MALLOC_SYSTEM_STATE_NO_ALLOCATION;
    }
  } else if ( _System_state_Is_before_multitasking( state ) ) {
    return MALLOC_SYSTEM_STATE_NORMAL;
  } else {
    return MALLOC_SYSTEM_STATE_NO_PROTECTION;
  }
}
Example #13
0
MC68681_STATIC void mc68681_write_polled(
  int   minor,
  char  cChar
)
{
  uint32_t                pMC68681_port;
  unsigned char           ucLineStatus;
  int                     iTimeout;
  getRegister_f           getReg;
  setRegister_f           setReg;

  pMC68681_port = Console_Port_Tbl[minor]->ulCtrlPort2;
  getReg        = Console_Port_Tbl[minor]->getRegister;
  setReg        = Console_Port_Tbl[minor]->setRegister;

  /*
   * wait for transmitter holding register to be empty
   */
  iTimeout = 1000;
  ucLineStatus = (*getReg)(pMC68681_port, MC68681_STATUS);
  while ((ucLineStatus & (MC68681_TX_READY|MC68681_TX_EMPTY)) == 0) {

    if ((ucLineStatus & 0xF0))
      (*setReg)( pMC68681_port, MC68681_COMMAND, MC68681_MODE_REG_RESET_ERROR );

    /*
     * Yield while we wait
     */

#if 0
     if(_System_state_Is_up(_System_state_Get())) {
       rtems_task_wake_after(RTEMS_YIELD_PROCESSOR);
     }
#endif
     ucLineStatus = (*getReg)(pMC68681_port, MC68681_STATUS);
     if(!--iTimeout) {
       break;
     }
  }

  /*
   * transmit character
   */

  (*setReg)(pMC68681_port, MC68681_TX_BUFFER, cChar);
}
Example #14
0
static rtems_status_code bsp_interrupt_lock(void)
{
  rtems_status_code sc = RTEMS_SUCCESSFUL;
  if (_System_state_Is_up(_System_state_Get())) {
    if (bsp_interrupt_mutex == RTEMS_ID_NONE) {
      rtems_id mutex = RTEMS_ID_NONE;
      rtems_interrupt_level level;

      /* Create a mutex */
      sc = rtems_semaphore_create (
        rtems_build_name('I', 'N', 'T', 'R'),
        1,
        RTEMS_BINARY_SEMAPHORE | RTEMS_INHERIT_PRIORITY | RTEMS_PRIORITY,
        0,
        &mutex
      );
      if (sc != RTEMS_SUCCESSFUL) {
        return sc;
      }

      /* Assign the mutex */
      rtems_interrupt_disable(level);
      if (bsp_interrupt_mutex == RTEMS_ID_NONE) {
        /* Nobody else assigned the mutex in the meantime */

        bsp_interrupt_mutex = mutex;
        rtems_interrupt_enable(level);
      } else {
        /* Somebody else won */

        rtems_interrupt_enable(level);
        sc = rtems_semaphore_delete(mutex);
        if (sc != RTEMS_SUCCESSFUL) {
          return sc;
        }
      }
    }
    return rtems_semaphore_obtain(
      bsp_interrupt_mutex,
      RTEMS_WAIT,
      RTEMS_NO_TIMEOUT
    );
  } else {
    return RTEMS_SUCCESSFUL;
  }
}
Example #15
0
void _SMP_Multicast_action(
  const size_t setsize,
  const cpu_set_t *cpus,
  SMP_Action_handler handler,
  void *arg
)
{
  SMP_Multicast_action node;
  Processor_mask       targets;
  SMP_lock_Context     lock_context;
  uint32_t             i;

  if ( ! _System_state_Is_up( _System_state_Get() ) ) {
    ( *handler )( arg );
    return;
  }

  if( cpus == NULL ) {
    _Processor_mask_Assign( &targets, _SMP_Get_online_processors() );
  } else {
    _Processor_mask_Zero( &targets );

    for ( i = 0; i < _SMP_Get_processor_count(); ++i ) {
      if ( CPU_ISSET_S( i, setsize, cpus ) ) {
        _Processor_mask_Set( &targets, i );
      }
    }
  }

  _Chain_Initialize_node( &node.Node );
  node.handler = handler;
  node.arg = arg;
  _Processor_mask_Assign( &node.targets, &targets );
  _Atomic_Store_ulong( &node.done, 0, ATOMIC_ORDER_RELAXED );

  _SMP_lock_ISR_disable_and_acquire( &_SMP_Multicast.Lock, &lock_context );
  _Chain_Prepend_unprotected( &_SMP_Multicast.Actions, &node.Node );
  _SMP_lock_Release_and_ISR_enable( &_SMP_Multicast.Lock, &lock_context );

  _SMP_Send_message_multicast( &targets, SMP_MESSAGE_MULTICAST_ACTION );
  _SMP_Multicasts_try_process();

  while ( _Atomic_Load_ulong( &node.done, ATOMIC_ORDER_ACQUIRE ) == 0 ) {
    /* Wait */
  };
}
Example #16
0
void _SMP_Request_other_cores_to_dispatch(void)
{
  int i;
  int cpu;

  cpu = bsp_smp_processor_id();

  if ( !_System_state_Is_up (_System_state_Current) )
    return;
  for (i=1 ; i < _SMP_Processor_count ; i++ ) {
    if ( cpu == i )
      continue;
    if ( _Per_CPU_Information[i].state != RTEMS_BSP_SMP_CPU_UP )
      continue;
    if ( !_Per_CPU_Information[i].dispatch_necessary )
      continue;
    _SMP_Send_message( i, RTEMS_BSP_SMP_CONTEXT_SWITCH_NECESSARY );
  }
}
Example #17
0
void libc_wrapup(void)
{
  /*
   *  In case RTEMS is already down, don't do this.  It could be
   *  dangerous.
   */

  if (!_System_state_Is_up(_System_state_Get()))
     return;

  /*
   *  This was already done if the user called exit() directly .
  _wrapup_reent(0);
   */

  if (_REENT != _global_impure_ptr) {
      _wrapup_reent(_global_impure_ptr);
#if 0
      /*  Don't reclaim this one, just in case we do printfs
       *  on the way out to ROM.
       */
      _reclaim_reent(&libc_global_reent);
#endif
      _REENT = _global_impure_ptr;
  }

  /*
   * Try to drain output buffers.
   *
   * Should this be changed to do *all* file streams?
   *    _fwalk (_REENT, fclose);
   */

  fclose (stdin);
  fclose (stdout);
  fclose (stderr);
}
Example #18
0
File: vpd.c Project: epicsdeb/rtems
/* Not Efficient but simple */
int
BSP_vpdRetrieveFields(VpdBuf data)
{
VpdBuf	      b, b1;
VpdKey	      k;
int		      l,fd = -1, put, got;
int           rval = -1;
unsigned char mot[9];
static int	  (*stop)(int fd);

	memset(mot,0,sizeof(mot));

	if ( 0 && _System_state_Is_up(_System_state_Get()) ) {
		read_bytes = read;
		stop       = close;
		fd         = open(BSP_I2C_VPD_EEPROM_DEV_NAME, 0);
		if ( fd < 0 )
			return -1;
	} else {
		fd = (int)dev;
/*
		init(dev);
 *
 *	 Hangs - probably would need a delay here - just leave motload settings
 */

		read_bytes = early_read;
		stop       = early_close;
	}

	if ( read_bytes(fd, mot, 8) < 8 ) {
		goto bail;
	}

	if ( strcmp((char*)mot,"MOTOROLA") )
		goto bail;

	l = 0;
	do {

		/* skip field -- this is not done the first time since l=0 */
		while ( l > sizeof(mot) ) {
			got = read_buf(fd, mot, sizeof(mot));
			if ( got < 1 )
				goto bail;
			l -= got;
		}
		if ( read_buf(fd, mot, l) < 0 )
			goto bail;

		/* now get a new header */
		if ( read_buf(fd, mot, 2) < 2 )
			goto bail;

		k = mot[0];
		l = mot[1];

		for ( b = data; b->key != End; b++ ) {
			if ( b->key == k && (signed char)b->instance >= 0 ) {
				if ( 0 == b->instance--  ) {
					/* found 'instance' of field 'type' */

					/* limit to buffer size */
					put = b->buflen > l ? l : b->buflen;
					if ( read_buf(fd, b->buf, put) < put )
						goto bail;

					/* if this instance is multiply requested, copy the data */
					for ( b1 = b + 1; b1->key != End; b1++ ) {
						if ( b1->key == k && 0 == b1->instance ) {
							b1->instance--;
							/* we dont' handle the case where
							 * the first buffer couldn't hold the entire
							 * item but this one could...
							 */
							memcpy(b1->buf, b->buf, put);
							b1->found = mot[1];
						}
					}

					l -= put;
					b->found = mot[1];
				}
			}
		}

	} while ( k != End );

	rval = 0;

bail:

	stop(fd);
	return rval;
}
Example #19
0
bool _Heap_Walk(
  Heap_Control *heap,
  int source,
  bool dump
)
{
  uintptr_t const page_size = heap->page_size;
  uintptr_t const min_block_size = heap->min_block_size;
  Heap_Block *const first_block = heap->first_block;
  Heap_Block *const last_block = heap->last_block;
  Heap_Block *block = first_block;
  Heap_Walk_printer printer = dump ?
    _Heap_Walk_print : _Heap_Walk_print_nothing;

  if ( !_System_state_Is_up( _System_state_Get() ) ) {
    return true;
  }

  if ( !_Heap_Walk_check_control( source, printer, heap ) ) {
    return false;
  }

  do {
    uintptr_t const block_begin = (uintptr_t) block;
    uintptr_t const block_size = _Heap_Block_size( block );
    bool const prev_used = _Heap_Is_prev_used( block );
    Heap_Block *const next_block = _Heap_Block_at( block, block_size );
    uintptr_t const next_block_begin = (uintptr_t) next_block;
    bool const is_not_last_block = block != last_block;

    if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
      (*printer)(
        source,
        true,
        "block 0x%08x: next block 0x%08x not in heap\n",
        block,
        next_block
      );

      return false;
    }

    if ( !_Heap_Is_aligned( block_size, page_size ) && is_not_last_block ) {
      (*printer)(
        source,
        true,
        "block 0x%08x: block size %u not page aligned\n",
        block,
        block_size
      );

      return false;
    }

    if ( block_size < min_block_size && is_not_last_block ) {
      (*printer)(
        source,
        true,
        "block 0x%08x: size %u < min block size %u\n",
        block,
        block_size,
        min_block_size
      );

      return false;
    }

    if ( next_block_begin <= block_begin && is_not_last_block ) {
      (*printer)(
        source,
        true,
        "block 0x%08x: next block 0x%08x is not a successor\n",
        block,
        next_block
      );

      return false;
    }

    if ( !_Heap_Is_prev_used( next_block ) ) {
      if ( !_Heap_Walk_check_free_block( source, printer, heap, block ) ) {
        return false;
      }
    } else if (prev_used) {
      (*printer)(
        source,
        false,
        "block 0x%08x: size %u\n",
        block,
        block_size
      );
    } else {
      (*printer)(
        source,
        false,
        "block 0x%08x: size %u, prev_size %u\n",
        block,
        block_size,
        block->prev_size
      );
    }

    block = next_block;
  } while ( block != first_block );

  return true;
}
Example #20
0
rtems_status_code rtems_task_mode(
    rtems_mode  mode_set,
    rtems_mode  mask,
    rtems_mode *previous_mode_set
)
{
    Thread_Control     *executing;
    RTEMS_API_Control  *api;
    ASR_Information    *asr;
    bool                is_asr_enabled = false;
    bool                needs_asr_dispatching = false;
    rtems_mode          old_mode;

    if ( !previous_mode_set )
        return RTEMS_INVALID_ADDRESS;

    executing     = _Thread_Executing;
    api = executing->API_Extensions[ THREAD_API_RTEMS ];
    asr = &api->Signal;

    old_mode  = (executing->is_preemptible) ? RTEMS_PREEMPT : RTEMS_NO_PREEMPT;

    if ( executing->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_NONE )
        old_mode |= RTEMS_NO_TIMESLICE;
    else
        old_mode |= RTEMS_TIMESLICE;

    old_mode |= (asr->is_enabled) ? RTEMS_ASR : RTEMS_NO_ASR;
    old_mode |= _ISR_Get_level();

    *previous_mode_set = old_mode;

    /*
     *  These are generic thread scheduling characteristics.
     */

    if ( mask & RTEMS_PREEMPT_MASK )
        executing->is_preemptible = _Modes_Is_preempt(mode_set) ? true : false;

    if ( mask & RTEMS_TIMESLICE_MASK ) {
        if ( _Modes_Is_timeslice(mode_set) ) {
            executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE;
            executing->cpu_time_budget  = _Thread_Ticks_per_timeslice;
        } else
            executing->budget_algorithm = THREAD_CPU_BUDGET_ALGORITHM_NONE;
    }

    /*
     *  Set the new interrupt level
     */

    if ( mask & RTEMS_INTERRUPT_MASK )
        _Modes_Set_interrupt_level( mode_set );

    /*
     *  This is specific to the RTEMS API
     */

    is_asr_enabled = false;
    needs_asr_dispatching = false;

    if ( mask & RTEMS_ASR_MASK ) {
        is_asr_enabled = _Modes_Is_asr_disabled( mode_set ) ? false : true;
        if ( is_asr_enabled != asr->is_enabled ) {
            asr->is_enabled = is_asr_enabled;
            _ASR_Swap_signals( asr );
            if ( _ASR_Are_signals_pending( asr ) ) {
                needs_asr_dispatching = true;
                executing->do_post_task_switch_extension = true;
            }
        }
    }

    if ( _System_state_Is_up( _System_state_Get() ) )
        if ( _Thread_Evaluate_mode() || needs_asr_dispatching )
            _Thread_Dispatch();

    return RTEMS_SUCCESSFUL;
}
Example #21
0
void bsp_interrupt_unlock(void)
{
  if (_System_state_Is_up(_System_state_Get())) {
    _RTEMS_Unlock_allocator();
  }
}
void *malloc(
  size_t  size
)
{
  void        *return_this;

  MSBUMP(malloc_calls, 1);

  /*
   *  If some free's have been deferred, then do them now.
   */
  malloc_deferred_frees_process();

  /*
   * Validate the parameters
   */
  if ( !size )
    return (void *) 0;

  /*
   *  Do not attempt to allocate memory if not in correct system state.
   */
  if ( _System_state_Is_up(_System_state_Get()) &&
       !malloc_is_system_state_OK() )
    return NULL;

  /*
   *  Walk the heap and verify its integrity
   */
  #if defined(RTEMS_HEAP_DEBUG)
    _Protected_heap_Walk( RTEMS_Malloc_Heap, 0, false );
  #endif

  #if defined(RTEMS_MALLOC_BOUNDARY_HELPERS)
    /*
     *  If the support for a boundary area at the end of the heap
     *  block allocated is turned on, then adjust the size.
     */
    if (rtems_malloc_boundary_helpers)
      size += (*rtems_malloc_boundary_helpers->overhead)();
  #endif

  /*
   * Try to give a segment in the current heap if there is not
   * enough space then try to grow the heap.
   * If this fails then return a NULL pointer.
   */

  return_this = _Protected_heap_Allocate( RTEMS_Malloc_Heap, size );

  if ( !return_this ) {
    if (rtems_malloc_sbrk_helpers)
      return_this = (*rtems_malloc_sbrk_helpers->extend)( size );
    if ( !return_this ) {
      errno = ENOMEM;
      return (void *) 0;
    }
  }

  /*
   *  If the user wants us to dirty the allocated memory, then do it.
   */
  if ( rtems_malloc_dirty_helper )
    (*rtems_malloc_dirty_helper)( return_this, size );

  /*
   *  If configured, update the statistics
   */
  if ( rtems_malloc_statistics_helpers )
    (*rtems_malloc_statistics_helpers->at_malloc)(return_this);

  #if defined(RTEMS_MALLOC_BOUNDARY_HELPERS)
    /*
     * If configured, set the boundary area
     */
    if (rtems_malloc_boundary_helpers)
      (*rtems_malloc_boundary_helpers->at_malloc)(return_this, size);
  #endif

  return return_this;
}