void sh_css_acc_unload(const struct sh_css_acc_fw *firmware) { struct sh_css_acc_fw_hdr *header = (struct sh_css_acc_fw_hdr *)&firmware->header; struct sh_css_acc_sp *sp = &header->sp; if (sp->code) mmgr_free(HOST_ADDRESS(sp->code)); if (header->isp_code) mmgr_free(HOST_ADDRESS(header->isp_code)); sp->code = NULL; header->isp_code = NULL; header->loaded = false; }
bool sh_css_refcount_release(int32_t id, hrt_vaddress ptr) { struct sh_css_refcount_entry *entry; sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_release(%x) 0x%x\n", id, ptr); if (ptr == mmgr_NULL) return false; entry = find_entry(ptr, false); if (entry) { assert_exit_code(entry->id == id, false); if (entry->count > 0) { entry->count -= 1; if (entry->count == 0) { /* sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_release: freeing\n");*/ mmgr_free(ptr); entry->data = mmgr_NULL; entry->id = 0; } return true; } } /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not valid anymore */ assert_exit_code(false, false); return false; }
void ia_css_refcount_uninit(void) { struct ia_css_refcount_entry *entry; uint32_t i; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_uninit() entry\n"); for (i = 0; i < myrefcount.size; i++) { /* driver verifier tool has issues with &arr[i] and prefers arr + i; as these are actually equivalent the line below uses + i */ entry = myrefcount.items + i; if (entry->data != mmgr_NULL) { /* ia_css_debug_dtrace(IA_CSS_DBG_TRACE, "ia_css_refcount_uninit: freeing (%x)\n", entry->data);*/ mmgr_free(entry->data); entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; } } sh_css_free(myrefcount.items); myrefcount.items = NULL; myrefcount.size = 0; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_uninit() leave\n"); }
bool ia_css_refcount_decrement(int32_t id, hrt_vaddress ptr) { struct ia_css_refcount_entry *entry; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr); if (ptr == mmgr_NULL) return false; entry = refcount_find_entry(ptr, false); if (entry) { assert(entry->id == id); if (entry->count > 0) { entry->count -= 1; if (entry->count == 0) { /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE, "ia_css_refcount_decrement: freeing\n");*/ mmgr_free(ptr); entry->data = mmgr_NULL; entry->id = 0; } return true; } } /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not valid anymore */ assert(false); return false; }
void sh_css_refcount_clear(int32_t id, void (*clear_func)(hrt_vaddress ptr)) { struct sh_css_refcount_entry *entry; uint32_t i; uint32_t count = 0; assert_exit(clear_func != NULL); sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear(%x)\n", id); for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if ((entry->data != mmgr_NULL) && (entry->id == id)) { sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear:" " %x: 0x%x\n", id, entry->data); if (clear_func) { /* clear using provided function */ clear_func(entry->data); } else { sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear: " "using mmgr_free: no clear_func\n"); mmgr_free(entry->data); } assert_exit(entry->count == 0); entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; count++; } } sh_css_dtrace(SH_DBG_TRACE, "sh_css_refcount_clear(%x): cleared %d\n", id, count); }
/** * @brief Uninitialize the resource pool (host, vbuf) * * @param pool The pointer to the pool */ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool) { uint32_t i; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_rmgr_uninit_vbuf()\n"); if (pool == NULL) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_rmgr_uninit_vbuf(): NULL argument\n"); return; } if (pool->handles != NULL) { /* free the hmm buffers */ for (i = 0; i < pool->size; i++) { if (pool->handles[i] != NULL) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " freeing/releasing %x (count=%d)\n", pool->handles[i]->vptr, pool->handles[i]->count); /* free memory */ mmgr_free(pool->handles[i]->vptr); /* remove from refcount admin */ ia_css_rmgr_refcount_release_vbuf( &pool->handles[i]); } } /* now free the pool handles list */ sh_css_free(pool->handles); pool->handles = NULL; } }
void sh_css_acc_unload_extension(const struct sh_css_fw_info *firmware) { if (firmware->isp_code) mmgr_free(HOST_ADDRESS(firmware->isp_code)); ((struct sh_css_fw_info *)firmware)->isp_code = NULL; ((struct sh_css_fw_info *)firmware)->loaded = false; }
void ia_css_frame_free(struct ia_css_frame *frame) { IA_CSS_ENTER_PRIVATE("frame = %p", frame); if (frame != NULL) { mmgr_free(frame->data); sh_css_free(frame); } IA_CSS_LEAVE_PRIVATE("void"); }
enum ia_css_err ia_css_spctrl_unload_fw(sp_ID_t sp_id) { if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id]))) return IA_CSS_ERR_INVALID_ARGUMENTS; /* freeup the resource */ if (spctrl_cofig_info[sp_id].code_addr) mmgr_free(spctrl_cofig_info[sp_id].code_addr); spctrl_loaded[sp_id] = false; return IA_CSS_SUCCESS; }
void ia_css_frame_free(struct ia_css_frame *frame) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_free() enter: frame=%p\n", frame); if (frame != NULL) { mmgr_free(frame->data); sh_css_free(frame); } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_frame_free() leave: return_void\n"); }
enum ia_css_err sh_css_binary_uninit(void) { unsigned int i; struct ia_css_binary_info *b; for (i = 0; i < SH_CSS_BINARY_NUM_MODES; i++) { for (b = binary_infos[i]; b; b = b->next) { if (b->xmem_addr) mmgr_free(b->xmem_addr); b->xmem_addr = mmgr_NULL; } binary_infos[i] = NULL; } sh_css_free(all_binaries); return IA_CSS_SUCCESS; }
void ia_css_isp_param_destroy_isp_parameters( struct ia_css_isp_param_host_segments *mem_params, struct ia_css_isp_param_css_segments *css_params) { unsigned mem, pclass; for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) { for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) { if (mem_params->params[pclass][mem].address) sh_css_free(mem_params->params[pclass][mem].address); if (css_params->params[pclass][mem].address) mmgr_free(css_params->params[pclass][mem].address); mem_params->params[pclass][mem].address = NULL; css_params->params[pclass][mem].address = 0x0; } } }
void ia_css_refcount_clear(int32_t id, clear_func clear_func_ptr) { struct ia_css_refcount_entry *entry; uint32_t i; uint32_t count = 0; assert(clear_func_ptr != NULL); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n", id); for (i = 0; i < myrefcount.size; i++) { /* driver verifier tool has issues with &arr[i] and prefers arr + i; as these are actually equivalent the line below uses + i */ entry = myrefcount.items + i; if ((entry->data != mmgr_NULL) && (entry->id == id)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear:" " %x: 0x%x\n", id, entry->data); if (clear_func_ptr) { /* clear using provided function */ clear_func_ptr(entry->data); } else { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear: " "using mmgr_free: " "no clear_func\n"); mmgr_free(entry->data); } assert(entry->count == 0); if (entry->count != 0) { IA_CSS_WARNING("Ref count for entry %x is not zero!", entry->id); } entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; count++; } } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x): cleared %d\n", id, count); }
bool ia_css_refcount_decrement(int32_t id, hrt_vaddress ptr) { struct ia_css_refcount_entry *entry; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr); if (ptr == mmgr_NULL) return false; entry = refcount_find_entry(ptr, false); if (entry) { if (entry->id != id) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_refcount_decrement(): Ref count IDS do not match!\n"); return false; } if (entry->count > 0) { entry->count -= 1; if (entry->count == 0) { /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE, "ia_css_refcount_decrement: freeing\n");*/ mmgr_free(ptr); entry->data = mmgr_NULL; entry->id = 0; } return true; } } /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not valid anymore */ if (entry) IA_CSS_ERROR("id %x, ptr 0x%x entry %p entry->id %x entry->count %d\n", id, ptr, entry, entry->id, entry->count); else IA_CSS_ERROR("entry NULL for ptr 0x%x\n", ptr); /* assert(false); */ return false; }
/** * @brief Release a handle to the pool (host, vbuf) * * @param pool The pointer to the pool * @param handle The pointer to the handle */ void ia_css_rmgr_rel_vbuf(struct ia_css_rmgr_vbuf_pool *pool, struct ia_css_rmgr_vbuf_handle **handle) { if ((pool == NULL) || (handle == NULL) || (*handle == NULL)) { IA_CSS_LOG("Invalid inputs"); return; } /* release the handle */ if ((*handle)->count == 1) { if (!pool->recycle) { /* non recycling pool, free mem */ mmgr_free((*handle)->vptr); } else { /* recycle to pool */ rmgr_push_handle(pool, handle); } } ia_css_rmgr_refcount_release_vbuf(handle); *handle = NULL; }
void sh_css_refcount_uninit(void) { struct sh_css_refcount_entry *entry; uint32_t i; for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if (entry->data != mmgr_NULL) { /* sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_uninit: freeing (%x)\n", entry->data);*/ mmgr_free(entry->data); entry->data = mmgr_NULL; entry->size = 0; entry->count = 0; entry->id = 0; } } sh_css_free(myrefcount.items); myrefcount.items = NULL; myrefcount.size = 0; }
void sh_css_refcount_clear(int32_t id, void (*clear_func)(hrt_vaddress ptr)) { struct sh_css_refcount_entry *entry; uint32_t i; uint32_t count = 0; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear(%x)\n", id); for (i = 0; i < myrefcount.size; i++) { entry = &myrefcount.items[i]; if ((entry->data != mmgr_NULL) && (entry->id == id)) { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear: %x: 0x%x refcnt %d\n", id, entry->data, entry->count); if (clear_func) { /* clear using provided function */ /* This function will update the entry */ /* administration (we should not do that) */ clear_func(entry->data); assert(entry->count == 0); } else { sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear: " "using default mmgr_free\n"); mmgr_free(entry->data); assert(entry->count == 0); entry->data = mmgr_NULL; entry->size = 0; entry->count = 0; entry->id = 0; } count++; } } sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_refcount_clear(%x): cleared %d\n", id, count); }
void ia_css_psys_sppipeline_cmd_free( ia_css_process_group_t *process_group, ia_css_psys_pgpoc_context_t *context) { unsigned mem; struct sh_css_ddr_address_map *mem_map; hrt_vaddress dvs_stats_vaddr; ia_css_psysapi_cmd_t *host_cmd = NULL; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_psys_sppipeline_cmd_free(): enter\n "); assert(process_group != NULL && context != 0); host_cmd = &context->host_cmd; /* TODO: binary needs to be part of host side cookie */ ia_css_binary_destroy_isp_parameters(&context->binary); ia_css_frame_free_multiple(NUM_VIDEO_DELAY_FRAMES, context->delay_frames); ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES, context->tnr_frames); mem_map = &host_cmd->isp_param_info.mem_map; for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) { hrt_vaddress ddr_mem_ptr = mem_map->isp_mem_param[0][mem]; if(ddr_mem_ptr) mmgr_free(ddr_mem_ptr); mem_map->isp_mem_param[0][mem] = mmgr_NULL; } /* TODO: remove DVS logic */ dvs_stats_vaddr = host_cmd->sp_stage.frames.dvs_buf.buf_src.xmem_addr; if(dvs_stats_vaddr != mmgr_NULL && dvs_stats_vaddr != mmgr_EXCEPTION) { ia_css_isp_dvs_statistics_free(context->dvs_stats); mmgr_free(dvs_stats_vaddr); } /* TODO: Tables need to be part of host side cookie */ if(mem_map->sc_tbl != mmgr_NULL && mem_map->sc_tbl != mmgr_EXCEPTION) { mmgr_free(mem_map->sc_tbl); mem_map->sc_tbl = mmgr_NULL; } if(mem_map->macc_tbl != mmgr_NULL && mem_map->macc_tbl != mmgr_EXCEPTION) { mmgr_free(mem_map->macc_tbl); mem_map->macc_tbl = mmgr_NULL; } if(mem_map->dvs_6axis_params_y != mmgr_NULL && mem_map->dvs_6axis_params_y != mmgr_EXCEPTION) { mmgr_free(mem_map->dvs_6axis_params_y); mem_map->dvs_6axis_params_y = mmgr_NULL; } spstage_set_static_frame_defaults(&host_cmd->sp_stage); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_psys_sppipeline_cmd_free(): exit\n "); }
/* Load firmware */ enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg) { hrt_vaddress code_addr = mmgr_NULL; struct ia_css_sp_init_dmem_cfg *init_dmem_cfg; if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0)) return IA_CSS_ERR_INVALID_ARGUMENTS; spctrl_cofig_info[sp_id].code_addr = mmgr_NULL; #if defined(C_RUN) || defined(HRT_UNSCHED) (void)init_dmem_cfg; code_addr = mmgr_malloc(1); if (code_addr == mmgr_NULL) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; #else init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config; init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr; init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr; init_dmem_cfg->data_size = spctrl_cfg->data_size; init_dmem_cfg->bss_size = spctrl_cfg->bss_size; init_dmem_cfg->sp_id = sp_id; spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr; spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr; /* store code (text + icache) and data to DDR * * Data used to be stored separately, because of access alignment constraints, * fix the FW generation instead */ code_addr = mmgr_malloc(spctrl_cfg->code_size); if (code_addr == mmgr_NULL) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size); if (sizeof(hrt_vaddress) > sizeof(hrt_data)) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "size of hrt_vaddress can not be greater than hrt_data\n"); mmgr_free(spctrl_cfg->code_size); spctrl_cfg->code_size = mmgr_NULL; return IA_CSS_ERR_INTERNAL_ERROR; } init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset; if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "DDR address pointer is not properly aligned for DMA transfer\n"); mmgr_free(spctrl_cfg->code_size); spctrl_cfg->code_size = mmgr_NULL; return IA_CSS_ERR_INTERNAL_ERROR; } #endif spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry; spctrl_cofig_info[sp_id].code_addr = code_addr; spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name; #ifdef HRT_CSIM /* Secondary SP is named as SP2 in SDK, however we are using secondary SP as SP1 in the HSS and secondary SP Firmware */ if (sp_id == SP0_ID) { hrt_cell_set_icache_base_address(SP, spctrl_cofig_info[sp_id].code_addr); hrt_cell_invalidate_icache(SP); hrt_cell_load_program(SP, spctrl_cofig_info[sp_id].program_name); } #if defined(HAS_SEC_SP) else { hrt_cell_set_icache_base_address(SP2, spctrl_cofig_info[sp_id].code_addr); hrt_cell_invalidate_icache(SP2); hrt_cell_load_program(SP2, spctrl_cofig_info[sp_id].program_name); } #endif /* HAS_SEC_SP */ #else /* now we program the base address into the icache and * invalidate the cache. */ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr); sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT); #endif spctrl_loaded[sp_id] = true; return IA_CSS_SUCCESS; }
void sh_css_sp_uninit(void) { mmgr_free(init_dmem_ddr); init_dmem_ddr = mmgr_NULL; }