/*FUNCTION*----------------------------------------------------- * * Function Name : _mem_alloc_internal_align * Returned Value : pointer. NULL is returned upon error. * Comments : * Allocate a aligned block of memory for a task from the free list * * *END*---------------------------------------------------------*/ pointer _mem_alloc_internal_align ( /* [IN] the size of the memory block */ _mem_size requested_size, /* [IN] requested alignement (e.g. 8 for alignement to 8 bytes) */ _mem_size req_align, /* [IN] owner TD */ TD_STRUCT_PTR td_ptr, /* [IN] which pool to allocate from */ MEMPOOL_STRUCT_PTR mem_pool_ptr, /* [OUT] error code for operation */ _mqx_uint_ptr error_ptr ) { /* Body */ STOREBLOCK_STRUCT_PTR block_ptr, free_block_ptr; STOREBLOCK_STRUCT_PTR next_block_ptr = NULL; STOREBLOCK_STRUCT_PTR next_next_block_ptr; _mem_size block_size; _mem_size next_block_size; _mem_size shift; *error_ptr = MQX_OK; /* ** Adjust message size to allow for block management overhead ** and force size to be aligned. */ requested_size += (_mem_size)(FIELD_OFFSET(STOREBLOCK_STRUCT,USER_AREA)); #if MQX_CHECK_ERRORS if (requested_size < MQX_MIN_MEMORY_STORAGE_SIZE) { requested_size = MQX_MIN_MEMORY_STORAGE_SIZE; } /* Endif */ #endif _MEMORY_ALIGN_VAL_LARGER(requested_size); block_ptr = mem_pool_ptr->POOL_FREE_LIST_PTR; while ( TRUE ) { /* ** Save the current block pointer. ** We will be enabling access to higher priority tasks. ** A higher priority task may pre-empt the current task ** and may do a memory allocation. If this is true, ** the higher priority task will reset the POOL_ALLOC_CURRENT_BLOCK ** upon exit, and the current task will start the search over ** again. */ mem_pool_ptr->POOL_ALLOC_CURRENT_BLOCK = block_ptr; /* allow pending interrupts */ _int_enable(); _int_disable(); /* Get block pointer in case reset by a higher priority task */ block_ptr = mem_pool_ptr->POOL_ALLOC_CURRENT_BLOCK; if (block_ptr == NULL) { /* Null pointer */ mem_pool_ptr->POOL_BLOCK_IN_ERROR = block_ptr; *error_ptr = MQX_OUT_OF_MEMORY; return( NULL ); /* request failed */ } /* Endif */ #if MQX_CHECK_VALIDITY if ( !_MEMORY_ALIGNED(block_ptr) || BLOCK_IS_USED(block_ptr) ) { mem_pool_ptr->POOL_BLOCK_IN_ERROR = block_ptr; *error_ptr = MQX_CORRUPT_STORAGE_POOL_FREE_LIST; return((pointer)NULL); } if ( ! VALID_CHECKSUM(block_ptr) ) { mem_pool_ptr->POOL_BLOCK_IN_ERROR = block_ptr; *error_ptr = MQX_INVALID_CHECKSUM; return((pointer)NULL); } #endif block_size = block_ptr->BLOCKSIZE; shift = (((_mem_size)&block_ptr->USER_AREA + req_align) & ~(req_align - 1)) - (_mem_size)&block_ptr->USER_AREA; if (shift < (2 * MQX_MIN_MEMORY_STORAGE_SIZE)) { shift = (((_mem_size)&block_ptr->USER_AREA + (3 * MQX_MIN_MEMORY_STORAGE_SIZE) + req_align) & ~(req_align - 1)) - (_mem_size)&block_ptr->USER_AREA; } if (block_size >= requested_size + shift) { /* request fits into this block */ // create new free block free_block_ptr = (STOREBLOCK_STRUCT_PTR)((char _PTR_)block_ptr); block_ptr = (STOREBLOCK_STRUCT_PTR)(((char _PTR_)block_ptr) + shift); block_ptr->PREVBLOCK = (STOREBLOCK_STRUCT_PTR)free_block_ptr; next_block_size = block_size - requested_size - shift; if (next_block_size >= (2 * MQX_MIN_MEMORY_STORAGE_SIZE) ) { /* ** The current block is big enough to split. ** into 2 blocks.... the part to be allocated is one block, ** and the rest remains as a free block on the free list. */ next_block_ptr = (STOREBLOCK_STRUCT_PTR)((char _PTR_)block_ptr + requested_size); /* Initialize the new physical block values */ next_block_ptr->BLOCKSIZE = next_block_size; next_block_ptr->PREVBLOCK = (STOREBLOCK_STRUCT_PTR)block_ptr; MARK_BLOCK_AS_FREE(next_block_ptr); CALC_CHECKSUM(next_block_ptr); /* Link new block into the free list */ next_block_ptr->NEXTBLOCK = free_block_ptr->NEXTBLOCK; block_ptr->NEXTBLOCK = (pointer)next_block_ptr; next_block_ptr->USER_AREA = (pointer)free_block_ptr; if (next_block_ptr->NEXTBLOCK != NULL ) { ((STOREBLOCK_STRUCT_PTR)next_block_ptr->NEXTBLOCK)->USER_AREA = (pointer)next_block_ptr; } /* ** Modify the current block, to point to this newly created ** block which is now the next physical block. */ block_ptr->BLOCKSIZE = requested_size; /* ** Modify the block on the other side of the next block ** (the next next block) so that it's previous block pointer ** correctly point to the next block. */ next_next_block_ptr = (STOREBLOCK_STRUCT_PTR)NEXT_PHYS(next_block_ptr); PREV_PHYS(next_next_block_ptr) = next_block_ptr; CALC_CHECKSUM(next_next_block_ptr); } else { /* Take the entire block */ requested_size = next_block_size; } /* Endif */ /* modify the new physical block values */ free_block_ptr->BLOCKSIZE = shift; free_block_ptr->NEXTBLOCK = (pointer)next_block_ptr; //free_block_ptr->USER_AREA MARK_BLOCK_AS_FREE(free_block_ptr); CALC_CHECKSUM(free_block_ptr); /* Set the size of the block */ block_ptr->BLOCKSIZE = requested_size; block_ptr->PREVBLOCK = (STOREBLOCK_STRUCT_PTR)free_block_ptr; block_ptr->MEM_TYPE = 0; MARK_BLOCK_AS_USED(block_ptr, td_ptr->TASK_ID); CALC_CHECKSUM(block_ptr); /* Remember some statistics */ next_block_ptr = NEXT_PHYS(block_ptr); if ( (char _PTR_)(next_block_ptr) > (char _PTR_)mem_pool_ptr->POOL_HIGHEST_MEMORY_USED ) { mem_pool_ptr->POOL_HIGHEST_MEMORY_USED = ((char _PTR_)(next_block_ptr) - 1); } /* Endif */ /* Link the block onto the task descriptor. */ block_ptr->NEXTBLOCK = td_ptr->MEMORY_RESOURCE_LIST; td_ptr->MEMORY_RESOURCE_LIST = (pointer)(&block_ptr->USER_AREA); block_ptr->MEM_POOL_PTR = (pointer)mem_pool_ptr; #if MQX_CHECK_VALIDITY /* Check that user area is aligned on a cache line boundary */ if ( !_MEMORY_ALIGNED(&block_ptr->USER_AREA) ) { *error_ptr = MQX_INVALID_CONFIGURATION; return((pointer)NULL); } /* Endif */ #endif return( (pointer)(&block_ptr->USER_AREA ) ); } else { block_ptr = (STOREBLOCK_STRUCT_PTR)NEXT_FREE(block_ptr); } } #ifdef lint return( NULL ); /* to satisfy lint */ #endif }
_mqx_uint _mem_extend_pool_internal ( /* [IN] the address of the start of the memory pool addition */ pointer start_of_pool, /* [IN] the size of the memory pool addition */ _mem_size size, /* [IN] the memory pool to extend */ MEMPOOL_STRUCT_PTR mem_pool_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; STOREBLOCK_STRUCT_PTR block_ptr; STOREBLOCK_STRUCT_PTR end_ptr; STOREBLOCK_STRUCT_PTR free_ptr; STOREBLOCK_STRUCT_PTR tmp_ptr; MEMPOOL_EXTENSION_STRUCT_PTR ext_ptr; uchar_ptr real_start_ptr; uchar_ptr end_of_pool; _mem_size block_size; _mem_size real_size; _mem_size free_block_size; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_ERRORS if (size < (_mem_size)(3*MQX_MIN_MEMORY_STORAGE_SIZE)) { /* Pool must be big enough to hold at least 3 memory blocks */ return(MQX_INVALID_SIZE); }/* Endif */ #endif #if MQX_CHECK_VALIDITY if (mem_pool_ptr->VALID != MEMPOOL_VALID) { return(MQX_INVALID_COMPONENT_HANDLE); }/* Endif */ #endif ext_ptr = (MEMPOOL_EXTENSION_STRUCT_PTR) _ALIGN_ADDR_TO_HIGHER_MEM(start_of_pool); real_start_ptr = (uchar_ptr)ext_ptr + sizeof(MEMPOOL_EXTENSION_STRUCT); real_start_ptr = (uchar_ptr)_ALIGN_ADDR_TO_HIGHER_MEM(real_start_ptr); end_of_pool = (uchar_ptr)start_of_pool + size; end_of_pool = (uchar_ptr)_ALIGN_ADDR_TO_LOWER_MEM(end_of_pool); real_size = (_mem_size)(end_of_pool - real_start_ptr); ext_ptr->START = start_of_pool; ext_ptr->SIZE = size; ext_ptr->REAL_START = real_start_ptr; block_ptr = (STOREBLOCK_STRUCT_PTR)real_start_ptr; block_size = MQX_MIN_MEMORY_STORAGE_SIZE; free_ptr = (STOREBLOCK_STRUCT_PTR)((uchar_ptr)block_ptr + block_size); free_block_size = real_size - (_mem_size)(2 * MQX_MIN_MEMORY_STORAGE_SIZE); end_ptr = (STOREBLOCK_STRUCT_PTR)((uchar_ptr)free_ptr + free_block_size); /* ** Make a small minimal sized memory block to be as ** the first block in the pool. This will be an in-use block ** and will thus avoid problems with memory co-allescing during ** memory frees */ block_ptr->BLOCKSIZE = block_size; block_ptr->MEM_TYPE = 0; block_ptr->USER_AREA = 0; block_ptr->PREVBLOCK = (struct storeblock_struct _PTR_)NULL; block_ptr->NEXTBLOCK = free_ptr; MARK_BLOCK_AS_USED(block_ptr, SYSTEM_TASK_ID(kernel_data)); CALC_CHECKSUM(block_ptr); /* ** Let the next block be the actual free block that will be added ** to the free list */ free_ptr->BLOCKSIZE = free_block_size; free_ptr->MEM_TYPE = 0; free_ptr->USER_AREA = 0; free_ptr->PREVBLOCK = block_ptr; free_ptr->NEXTBLOCK = end_ptr; MARK_BLOCK_AS_FREE(free_ptr); CALC_CHECKSUM(free_ptr); /* ** Set up a minimal sized block at the end of the pool, and also ** mark it as being allocated. Again this is to comply with the ** _mem_free algorithm */ end_ptr->BLOCKSIZE = block_size; end_ptr->MEM_TYPE = 0; end_ptr->USER_AREA = 0; end_ptr->PREVBLOCK = free_ptr; end_ptr->NEXTBLOCK = NULL; MARK_BLOCK_AS_USED(end_ptr, SYSTEM_TASK_ID(kernel_data)); CALC_CHECKSUM(end_ptr); _int_disable(); /* Add the block to the free list */ tmp_ptr = mem_pool_ptr->POOL_FREE_LIST_PTR; mem_pool_ptr->POOL_FREE_LIST_PTR = free_ptr; if (tmp_ptr != NULL) { PREV_FREE(tmp_ptr) = free_ptr; } /* Endif */ PREV_FREE(free_ptr) = NULL; NEXT_FREE(free_ptr) = tmp_ptr; /* Reset the free list queue walker for some other task */ mem_pool_ptr->POOL_FREE_CURRENT_BLOCK = mem_pool_ptr->POOL_FREE_LIST_PTR; /* Link in the extension */ _QUEUE_ENQUEUE(&mem_pool_ptr->EXT_LIST, &ext_ptr->LINK); _int_enable(); return(MQX_OK); } /* Endbody */
pointer _mem_alloc_internal ( /* [IN] the size of the memory block */ _mem_size requested_size, /* [IN] owner TD */ TD_STRUCT_PTR td_ptr, /* [IN] which pool to allocate from */ MEMPOOL_STRUCT_PTR mem_pool_ptr, /* [OUT] error code for operation */ _mqx_uint_ptr error_ptr ) { /* Body */ STOREBLOCK_STRUCT_PTR block_ptr; STOREBLOCK_STRUCT_PTR next_block_ptr; STOREBLOCK_STRUCT_PTR next_next_block_ptr; _mem_size block_size; _mem_size next_block_size; *error_ptr = MQX_OK; /* ** Adjust message size to allow for block management overhead ** and force size to be aligned. */ requested_size += (_mem_size)(FIELD_OFFSET(STOREBLOCK_STRUCT,USER_AREA)); #if MQX_CHECK_ERRORS if (requested_size < MQX_MIN_MEMORY_STORAGE_SIZE) { requested_size = MQX_MIN_MEMORY_STORAGE_SIZE; } /* Endif */ #endif _MEMORY_ALIGN_VAL_LARGER(requested_size); block_ptr = mem_pool_ptr->POOL_FREE_LIST_PTR; while ( TRUE ) { /* ** Save the current block pointer. ** We will be enabling access to higher priority tasks. ** A higher priority task may pre-empt the current task ** and may do a memory allocation. If this is true, ** the higher priority task will reset the POOL_ALLOC_CURRENT_BLOCK ** upon exit, and the current task will start the search over ** again. */ mem_pool_ptr->POOL_ALLOC_CURRENT_BLOCK = block_ptr; /* allow pending interrupts */ _int_enable(); _int_disable(); /* Get block pointer in case reset by a higher priority task */ block_ptr = mem_pool_ptr->POOL_ALLOC_CURRENT_BLOCK; if (block_ptr == NULL) { /* Null pointer */ mem_pool_ptr->POOL_BLOCK_IN_ERROR = block_ptr; *error_ptr = MQX_OUT_OF_MEMORY; return( NULL ); /* request failed */ } /* Endif */ #if MQX_CHECK_VALIDITY if ( !_MEMORY_ALIGNED(block_ptr) || BLOCK_IS_USED(block_ptr) ) { mem_pool_ptr->POOL_BLOCK_IN_ERROR = block_ptr; *error_ptr = MQX_CORRUPT_STORAGE_POOL_FREE_LIST; return((pointer)NULL); } /* Endif */ #endif #if MQX_CHECK_VALIDITY if ( ! VALID_CHECKSUM(block_ptr) ) { mem_pool_ptr->POOL_BLOCK_IN_ERROR = block_ptr; *error_ptr = MQX_INVALID_CHECKSUM; return((pointer)NULL); } /* Endif */ #endif block_size = block_ptr->BLOCKSIZE; if (block_size >= requested_size) { /* request fits into this block */ next_block_size = block_size - requested_size; if ( next_block_size >= (2 * MQX_MIN_MEMORY_STORAGE_SIZE) ) { /* ** The current block is big enough to split. ** into 2 blocks.... the part to be allocated is one block, ** and the rest remains as a free block on the free list. */ next_block_ptr = (STOREBLOCK_STRUCT_PTR) ((char _PTR_)block_ptr + requested_size); /* Initialize the new physical block values */ next_block_ptr->BLOCKSIZE = next_block_size; next_block_ptr->PREVBLOCK = (STOREBLOCK_STRUCT_PTR)block_ptr; MARK_BLOCK_AS_FREE(next_block_ptr); CALC_CHECKSUM(next_block_ptr); /* Link new block into the free list */ next_block_ptr->NEXTBLOCK = block_ptr->NEXTBLOCK; block_ptr->NEXTBLOCK = (pointer)next_block_ptr; next_block_ptr->USER_AREA = (pointer)block_ptr; if ( next_block_ptr->NEXTBLOCK != NULL ) { ((STOREBLOCK_STRUCT_PTR)next_block_ptr->NEXTBLOCK)-> USER_AREA = (pointer)next_block_ptr; } /* Endif */ /* ** Modify the block on the other side of the next block ** (the next next block) so that it's previous block pointer ** correctly point to the next block. */ next_next_block_ptr = (STOREBLOCK_STRUCT_PTR) NEXT_PHYS(next_block_ptr); PREV_PHYS(next_next_block_ptr) = next_block_ptr; CALC_CHECKSUM(next_next_block_ptr); } else { /* Take the entire block */ requested_size = block_size; } /* Endif */ /* Set the size of the block */ block_ptr->BLOCKSIZE = requested_size; /* Indicate the block is in use */ MARK_BLOCK_AS_USED(block_ptr, td_ptr->TASK_ID); block_ptr->MEM_TYPE = 0; CALC_CHECKSUM(block_ptr); /* Unlink the block from the free list */ if ( block_ptr == mem_pool_ptr->POOL_FREE_LIST_PTR ) { /* At the head of the free list */ mem_pool_ptr->POOL_FREE_LIST_PTR = (STOREBLOCK_STRUCT_PTR) NEXT_FREE(block_ptr); if (mem_pool_ptr->POOL_FREE_LIST_PTR != NULL ) { PREV_FREE(mem_pool_ptr->POOL_FREE_LIST_PTR) = 0; } /* Endif */ } else { /* ** NOTE: PREV_FREE guaranteed to be non-zero ** Have to make the PREV_FREE of this block ** point to the NEXT_FREE of this block */ NEXT_FREE(PREV_FREE(block_ptr)) = NEXT_FREE(block_ptr); if ( NEXT_FREE(block_ptr) != NULL ) { /* ** Now have to make the NEXT_FREE of this block ** point to the PREV_FREE of this block */ PREV_FREE(NEXT_FREE(block_ptr)) = PREV_FREE(block_ptr); } /* Endif */ } /* Endif */ #if MQX_MEMORY_FREE_LIST_SORTED == 1 if ( block_ptr == mem_pool_ptr->POOL_FREE_CURRENT_BLOCK ) { /* Reset the freelist insertion sort by _mem_free */ mem_pool_ptr->POOL_FREE_CURRENT_BLOCK = mem_pool_ptr->POOL_FREE_LIST_PTR; } /* Endif */ #endif /* Reset the __mem_test freelist pointer */ mem_pool_ptr->POOL_FREE_CHECK_BLOCK = mem_pool_ptr->POOL_FREE_LIST_PTR; /* ** Set the curent pool block to the start of the free list, so ** that if this task pre-empted another that was performing a ** _mem_alloc, the other task will restart it's search for a block */ mem_pool_ptr->POOL_ALLOC_CURRENT_BLOCK = mem_pool_ptr->POOL_FREE_LIST_PTR; /* Remember some statistics */ next_block_ptr = NEXT_PHYS(block_ptr); if ( (char _PTR_)(next_block_ptr) > (char _PTR_) mem_pool_ptr->POOL_HIGHEST_MEMORY_USED ) { mem_pool_ptr->POOL_HIGHEST_MEMORY_USED = ((char _PTR_)(next_block_ptr) - 1); } /* Endif */ /* Link the block onto the task descriptor. */ block_ptr->NEXTBLOCK = td_ptr->MEMORY_RESOURCE_LIST; td_ptr->MEMORY_RESOURCE_LIST = (pointer)(&block_ptr->USER_AREA); block_ptr->MEM_POOL_PTR = (pointer)mem_pool_ptr; #if MQX_CHECK_VALIDITY /* Check that user area is aligned on a cache line boundary */ if ( !_MEMORY_ALIGNED(&block_ptr->USER_AREA) ) { *error_ptr = MQX_INVALID_CONFIGURATION; return((pointer)NULL); } /* Endif */ #endif return( (pointer)(&block_ptr->USER_AREA ) ); } else { block_ptr = (STOREBLOCK_STRUCT_PTR)NEXT_FREE(block_ptr); } /* Endif */ } /* Endwhile */ #ifdef lint return( NULL ); /* to satisfy lint */ #endif } /* Endbody */
_mqx_uint _mem_create_pool_internal ( /* [IN] the start of the memory pool */ pointer start, /* [IN] the end of the memory pool */ pointer end, /* [IN] where to store the memory pool context info. */ MEMPOOL_STRUCT_PTR mem_pool_ptr ) { /* Body */ KERNEL_DATA_STRUCT_PTR kernel_data; STOREBLOCK_STRUCT_PTR block_ptr; STOREBLOCK_STRUCT_PTR end_block_ptr; _GET_KERNEL_DATA(kernel_data); #if MQX_CHECK_VALIDITY _INT_DISABLE(); if (kernel_data->MEM_COMP.VALID != MEMPOOL_VALID) { /* The RTOS memory system has been corrupted */ _int_enable(); return(MQX_CORRUPT_MEMORY_SYSTEM); } /* Endif */ _INT_ENABLE(); #endif /* Align the start of the pool */ mem_pool_ptr->POOL_PTR = (STOREBLOCK_STRUCT_PTR) _ALIGN_ADDR_TO_HIGHER_MEM(start); /* Set the end of memory (aligned) */ mem_pool_ptr->POOL_LIMIT = (STOREBLOCK_STRUCT_PTR) _ALIGN_ADDR_TO_LOWER_MEM(end); #if MQX_CHECK_ERRORS if ( (uchar_ptr)mem_pool_ptr->POOL_LIMIT <= ((uchar_ptr)mem_pool_ptr->POOL_PTR + MQX_MIN_MEMORY_POOL_SIZE) ) { return MQX_MEM_POOL_TOO_SMALL; } /* Endif */ #endif block_ptr = (STOREBLOCK_STRUCT_PTR)mem_pool_ptr->POOL_PTR; mem_pool_ptr->POOL_HIGHEST_MEMORY_USED = (pointer)block_ptr; mem_pool_ptr->POOL_CHECK_POOL_PTR = (char _PTR_)mem_pool_ptr->POOL_PTR; mem_pool_ptr->POOL_BLOCK_IN_ERROR = NULL; /* Compute the pool size. */ mem_pool_ptr->POOL_SIZE = (_mem_size)((uchar_ptr)mem_pool_ptr->POOL_LIMIT - (uchar_ptr)mem_pool_ptr->POOL_PTR); /* Set up the first block as an idle block */ block_ptr->BLOCKSIZE = mem_pool_ptr->POOL_SIZE - MQX_MIN_MEMORY_STORAGE_SIZE; block_ptr->USER_AREA = NULL; block_ptr->PREVBLOCK = NULL; block_ptr->NEXTBLOCK = NULL; MARK_BLOCK_AS_FREE(block_ptr); CALC_CHECKSUM(block_ptr); mem_pool_ptr->POOL_FREE_LIST_PTR = block_ptr; /* ** Set up last block as an in_use block, so that the _mem_free algorithm ** will work (block coalescing) */ end_block_ptr = (STOREBLOCK_STRUCT_PTR) ((uchar_ptr)block_ptr + block_ptr->BLOCKSIZE); end_block_ptr->BLOCKSIZE = (_mem_size)(MQX_MIN_MEMORY_STORAGE_SIZE); end_block_ptr->USER_AREA = 0; end_block_ptr->PREVBLOCK = (struct storeblock_struct _PTR_)block_ptr; end_block_ptr->NEXTBLOCK = NULL; MARK_BLOCK_AS_USED(end_block_ptr, SYSTEM_TASK_ID(kernel_data)); CALC_CHECKSUM(end_block_ptr); mem_pool_ptr->POOL_END_PTR = end_block_ptr; /* Initialize the list of extensions to this pool */ _QUEUE_INIT(&mem_pool_ptr->EXT_LIST, 0); mem_pool_ptr->VALID = MEMPOOL_VALID; /* Protect the list of pools while adding new pool */ _lwsem_wait((LWSEM_STRUCT_PTR)&kernel_data->MEM_COMP.SEM); _QUEUE_ENQUEUE(&kernel_data->MEM_COMP.POOLS, &mem_pool_ptr->LINK); _lwsem_post((LWSEM_STRUCT_PTR)&kernel_data->MEM_COMP.SEM); return MQX_OK; } /* Endbody */