void krhino_stack_ovf_check(void) { cpu_stack_t *stack_start; uint8_t i; stack_start = g_active_task[cpu_cur_get()]->task_stack_base; for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) { if (*stack_start++ != RHINO_TASK_STACK_OVF_MAGIC) { k_err_proc(RHINO_TASK_STACK_OVF); } } if ((cpu_stack_t *)(g_active_task[cpu_cur_get()]->task_stack) < stack_start) { k_err_proc(RHINO_TASK_STACK_OVF); } }
static void k_mm_smallblk_free(k_mm_head *mmhead, void *ptr) { kstat_t sta; if (!mmhead || !ptr) { return; } sta = krhino_mblk_free((mblk_pool_t *)mmhead->fix_pool, ptr); if (sta != RHINO_SUCCESS) { k_err_proc(RHINO_SYS_FATAL_ERR); } stats_removesize(mmhead, RHINO_CONFIG_MM_BLK_SIZE); }
void *krhino_mm_alloc(size_t size) { void *tmp; #if (RHINO_CONFIG_MM_DEBUG > 0u && RHINO_CONFIG_GCC_RETADDR > 0u) uint32_t app_malloc = size & AOS_UNSIGNED_INT_MSB; size = size & (~AOS_UNSIGNED_INT_MSB); #endif if (size == 0) { printf("WARNING, malloc size = 0\r\n"); return NULL; } tmp = k_mm_alloc(g_kmm_head, size); if (tmp == NULL) { #if (RHINO_CONFIG_MM_DEBUG > 0) static int32_t dumped; printf("WARNING, malloc failed!!!!\r\n"); if (dumped) { return tmp; } dumped = 1; dumpsys_mm_info_func(0); #if (RHINO_CONFIG_MM_LEAKCHECK > 0) dump_mmleak(); #endif k_err_proc(RHINO_NO_MEM); #endif } #if (RHINO_CONFIG_USER_HOOK > 0) krhino_mm_alloc_hook(tmp, size); #endif #if (RHINO_CONFIG_MM_DEBUG > 0u && RHINO_CONFIG_GCC_RETADDR > 0u) if (app_malloc == 0) { #if defined (__CC_ARM) krhino_owner_attach(g_kmm_head, tmp, __return_address()); #elif defined (__GNUC__) krhino_owner_attach(g_kmm_head, tmp, (size_t)__builtin_return_address(0)); #endif /* __CC_ARM */ } #endif return tmp; }
void krhino_intrpt_exit(void) { CPSR_ALLOC(); uint8_t cur_cpu_num; #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0) krhino_intrpt_stack_ovf_check(); #endif RHINO_CPU_INTRPT_DISABLE(); cur_cpu_num = cpu_cur_get(); if (g_intrpt_nested_level[cur_cpu_num] == 0u) { RHINO_CPU_INTRPT_ENABLE(); k_err_proc(RHINO_INV_INTRPT_NESTED_LEVEL); } g_intrpt_nested_level[cur_cpu_num]--; if (g_intrpt_nested_level[cur_cpu_num] > 0u) { RHINO_CPU_INTRPT_ENABLE(); return; } if (g_sched_lock[cur_cpu_num] > 0u) { RHINO_CPU_INTRPT_ENABLE(); return; } preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num); if (g_preferred_ready_task[cur_cpu_num] == g_active_task[cur_cpu_num]) { RHINO_CPU_INTRPT_ENABLE(); return; } TRACE_INTRPT_TASK_SWITCH(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]); #if (RHINO_CONFIG_CPU_NUM > 1) g_active_task[cur_cpu_num]->cur_exc = 0; #endif cpu_intrpt_switch(); RHINO_CPU_INTRPT_ENABLE(); }
kstat_t krhino_intrpt_enter(void) { CPSR_ALLOC(); #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0) krhino_intrpt_stack_ovf_check(); #endif RHINO_CPU_INTRPT_DISABLE(); if (g_intrpt_nested_level[cpu_cur_get()] >= RHINO_CONFIG_INTRPT_MAX_NESTED_LEVEL) { k_err_proc(RHINO_INTRPT_NESTED_LEVEL_OVERFLOW); RHINO_CPU_INTRPT_ENABLE(); return RHINO_INTRPT_NESTED_LEVEL_OVERFLOW; } g_intrpt_nested_level[cpu_cur_get()]++; RHINO_CPU_INTRPT_ENABLE(); return RHINO_SUCCESS; }
void *krhino_mm_realloc(void *oldmem, size_t newsize) { void *tmp; #if (RHINO_CONFIG_MM_DEBUG > 0u && RHINO_CONFIG_GCC_RETADDR > 0u) uint32_t app_malloc = newsize & AOS_UNSIGNED_INT_MSB; newsize = newsize & (~AOS_UNSIGNED_INT_MSB); #endif tmp = k_mm_realloc(g_kmm_head, oldmem, newsize); #if (RHINO_CONFIG_MM_DEBUG > 0u && RHINO_CONFIG_GCC_RETADDR > 0u) if (app_malloc == 0) { #if defined (__CC_ARM) krhino_owner_attach(g_kmm_head, tmp, __return_address()); #elif defined (__GNUC__) krhino_owner_attach(g_kmm_head, tmp, (size_t)__builtin_return_address(0)); #endif /* __CC_ARM */ } #endif if (tmp == NULL && newsize != 0) { #if (RHINO_CONFIG_MM_DEBUG > 0) static int32_t reallocdumped; printf("WARNING, realloc failed!!!!\r\n"); if (reallocdumped) { return tmp; } reallocdumped = 1; dumpsys_mm_info_func(0); #if (RHINO_CONFIG_MM_LEAKCHECK > 0) dump_mmleak(); #endif k_err_proc(RHINO_SYS_FATAL_ERR); #endif } return tmp; }
void k_mm_free(k_mm_head *mmhead, void *ptr) { k_mm_list_t *free_b, *next_b, *prev_b; if (!ptr || !mmhead) { return; } MM_CRITICAL_ENTER(mmhead); #if (RHINO_CONFIG_MM_BLK > 0) /* fix blk, free to mm_pool */ if (krhino_mblk_check(mmhead->fix_pool, ptr)) { /*it's fixed size memory block*/ k_mm_smallblk_free(mmhead, ptr); MM_CRITICAL_EXIT(mmhead); return; } #endif free_b = MM_GET_THIS_BLK(ptr); #if (RHINO_CONFIG_MM_DEBUG > 0u) if (free_b->dye == RHINO_MM_FREE_DYE) { MM_CRITICAL_EXIT(mmhead); printf("WARNING!! memory maybe double free!!\r\n"); k_err_proc(RHINO_SYS_FATAL_ERR); } if (free_b->dye != RHINO_MM_CORRUPT_DYE) { MM_CRITICAL_EXIT(mmhead); printf("WARNING,memory maybe corrupt!!\r\n"); k_err_proc(RHINO_SYS_FATAL_ERR); } free_b->dye = RHINO_MM_FREE_DYE; free_b->owner = 0; #endif free_b->buf_size |= RHINO_MM_FREE; stats_removesize(mmhead, MM_GET_BLK_SIZE(free_b)); /* if the blk after this freed one is freed too, merge them */ next_b = MM_GET_NEXT_BLK(free_b); if (next_b->buf_size & RHINO_MM_FREE) { k_mm_freelist_delete(mmhead, next_b); free_b->buf_size += MM_GET_BLK_SIZE(next_b); } /* if the blk before this freed one is freed too, merge them */ if (free_b->buf_size & RHINO_MM_PREVFREE) { prev_b = free_b->prev; #if (RHINO_CONFIG_MM_DEBUG > 0u) if (prev_b->dye != RHINO_MM_FREE_DYE) { MM_CRITICAL_EXIT(mmhead); printf("WARNING,memory overwritten!!\r\n"); k_err_proc(RHINO_SYS_FATAL_ERR); } #endif k_mm_freelist_delete(mmhead, prev_b); prev_b->buf_size += MM_GET_BLK_SIZE(free_b); free_b = prev_b; } /* after merge, free to list */ k_mm_freelist_insert(mmhead, free_b); next_b = MM_GET_NEXT_BLK(free_b); #if (RHINO_CONFIG_MM_DEBUG > 0u) if (next_b->dye != RHINO_MM_FREE_DYE && next_b->dye != RHINO_MM_CORRUPT_DYE) { MM_CRITICAL_EXIT(mmhead); printf("WARNING,memory overwritten!!\r\n"); k_err_proc(RHINO_SYS_FATAL_ERR); } #endif next_b->prev = free_b; next_b->buf_size |= RHINO_MM_PREVFREE; MM_CRITICAL_EXIT(mmhead); }
void krhino_intrpt_stack_ovf_check(void) { if (*g_intrpt_stack_top != RHINO_INTRPT_STACK_OVF_MAGIC) { k_err_proc(RHINO_INTRPT_STACK_OVF); } }