/* Load firmware */ enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg) { hrt_vaddress code_addr = mmgr_NULL; struct ia_css_sp_init_dmem_cfg *init_dmem_cfg; if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0)) return IA_CSS_ERR_INVALID_ARGUMENTS; spctrl_cofig_info[sp_id].code_addr = mmgr_NULL; #if defined(C_RUN) || defined(HRT_UNSCHED) (void)init_dmem_cfg; code_addr = mmgr_malloc(1); if (code_addr == mmgr_NULL) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; #else init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config; init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr; init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr; init_dmem_cfg->data_size = spctrl_cfg->data_size; init_dmem_cfg->bss_size = spctrl_cfg->bss_size; init_dmem_cfg->sp_id = sp_id; spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr; spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr; /* store code (text + icache) and data to DDR * * Data used to be stored separately, because of access alignment constraints, * fix the FW generation instead */ code_addr = mmgr_malloc(spctrl_cfg->code_size); if (code_addr == mmgr_NULL) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size); assert(sizeof(hrt_vaddress) <= sizeof(hrt_data)); init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset; assert((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) == 0); #endif spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry; spctrl_cofig_info[sp_id].code_addr = code_addr; spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name; #ifdef HRT_CSIM hrt_cell_set_icache_base_address(SP, spctrl_cofig_info[sp_id].code_addr); hrt_cell_invalidate_icache(SP); hrt_cell_load_program(SP, spctrl_cofig_info[sp_id].program_name); #else /* now we program the base address into the icache and * invalidate the cache. */ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr); sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT); #endif spctrl_loaded[sp_id] = true; return IA_CSS_SUCCESS; }
bool sh_css_param_shading_table_store( hrt_vaddress isp_sc_tbl, unsigned int sensor_binning, bool raw_binning, const struct sh_css_binary *binary) { struct sh_css_shading_table_isp *tmp_sc_table_isp; sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_param_shading_table_store() enter:\n"); /* shading table is full resolution, reduce */ sh_css_param_shading_table_prepare(sc_table, sensor_binning, raw_binning, &tmp_sc_table_isp, binary); mmgr_store(isp_sc_tbl, sh_table_entry(tmp_sc_table_isp, 0, 0, 0), SH_CSS_SC_NUM_COLORS * tmp_sc_table_isp->height * tmp_sc_table_isp->stride * sizeof(short)); sh_css_free(tmp_sc_table_isp->data); sh_css_free(tmp_sc_table_isp); sh_css_dtrace(SH_DBG_TRACE_PRIVATE, "sh_css_param_shading_table_store() leave:\n"); return true; }
static void handle_dis (ia_css_psys_pgpoc_context_t *context) { struct ia_css_dvs_grid_info *dvs_grid = NULL; struct ia_css_grid_info grid; hrt_vaddress dvs_stats_vaddr; ia_css_psysapi_cmd_t *host_cmd; assert(context != NULL); host_cmd = &context->host_cmd; assert(host_cmd != NULL); dvs_stats_vaddr = host_cmd->sp_stage.frames.dvs_buf.buf_src.xmem_addr; /* reuse dvs buffers until process group is destroyed. */ if(dvs_stats_vaddr == mmgr_NULL || dvs_stats_vaddr == mmgr_EXCEPTION) { ia_css_binary_dvs_grid_info(&context->binary, &grid); dvs_grid = &grid.dvs_grid; context->dvs_stats = ia_css_isp_dvs2_statistics_allocate(dvs_grid); assert(context->dvs_stats != NULL); dvs_stats_vaddr = mmgr_malloc(sizeof(struct ia_css_isp_dvs_statistics)); assert(dvs_stats_vaddr != 0); host_cmd->sp_stage.frames.dvs_buf.buf_src.xmem_addr = dvs_stats_vaddr; mmgr_store(dvs_stats_vaddr, context->dvs_stats, sizeof(struct ia_css_isp_dvs_statistics)); } return; }
hrt_vaddress sh_css_load_blob(const unsigned char *blob, unsigned size) { hrt_vaddress target_addr = mmgr_malloc(size); /* this will allocate memory aligned to a DDR word boundary which is required for the CSS DMA to read the instructions. */ assert(blob != NULL); if (target_addr) mmgr_store(target_addr, blob, size); return target_addr; }
hrt_vaddress sh_css_sp_load_program( const struct sh_css_fw_info *fw, const char *sp_prog, hrt_vaddress code_addr) { if (code_addr == mmgr_NULL) { /* store code (text section) to DDR */ code_addr = mmgr_malloc(fw->blob.text_size); if (code_addr == mmgr_NULL) return code_addr; mmgr_store(code_addr, fw->blob.text, fw->blob.text_size); } /* Set the correct start address for the SP program */ sh_css_sp_activate_program(fw, code_addr, sp_prog); return code_addr; }
hrt_vaddress sh_css_load_blob(const unsigned char *blob, unsigned size) { hrt_vaddress target_addr = mmgr_malloc(size); /* this will allocate memory aligned to a DDR word boundary which is required for the CSS DMA to read the instructions. */ assert(blob != NULL); if (target_addr) { mmgr_store(target_addr, blob, size); #ifdef HRT_CSIM { unsigned padded_size = CEIL_MUL(size, HIVE_ISP_DDR_WORD_BYTES); mmgr_clear(target_addr + size, padded_size - size); } #endif } return target_addr; }
/* Initialize the entire contents of the DMEM at once -- does not need to * do this from the host */ void sh_css_sp_store_init_dmem(const struct sh_css_fw_info *fw) { struct sh_css_sp_init_dmem_cfg init_dmem_cfg; mmgr_store(init_dmem_ddr, fw->blob.data, fw->blob.data_size); /* Configure the data structure to initialize dmem */ init_dmem_cfg.done = false; init_dmem_cfg.ddr_data_addr = init_dmem_ddr; init_dmem_cfg.dmem_data_addr = (hrt_vaddress)fw->blob.data_target; init_dmem_cfg.data_size = fw->blob.data_size; init_dmem_cfg.dmem_bss_addr = (hrt_vaddress)fw->blob.bss_target; init_dmem_cfg.bss_size = fw->blob.bss_size; sp_dmem_store(SP0_ID, (unsigned)fw->info.sp.init_dmem_data, &init_dmem_cfg, sizeof(init_dmem_cfg)); }
enum ia_css_err ia_css_isp_param_copy_isp_mem_if_to_ddr( struct ia_css_isp_param_css_segments *ddr, const struct ia_css_isp_param_host_segments *host, enum ia_css_param_class pclass) { unsigned mem; for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++) { size_t size = host->params[pclass][mem].size; hrt_vaddress ddr_mem_ptr = ddr->params[pclass][mem].address; char *host_mem_ptr = host->params[pclass][mem].address; if (size != ddr->params[pclass][mem].size) return IA_CSS_ERR_INTERNAL_ERROR; if (!size) continue; mmgr_store(ddr_mem_ptr, host_mem_ptr, size); } return IA_CSS_SUCCESS; }
/* Load firmware */ enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg) { hrt_vaddress code_addr = mmgr_NULL; struct ia_css_sp_init_dmem_cfg *init_dmem_cfg; if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0)) return IA_CSS_ERR_INVALID_ARGUMENTS; spctrl_cofig_info[sp_id].code_addr = mmgr_NULL; #if defined(C_RUN) || defined(HRT_UNSCHED) (void)init_dmem_cfg; code_addr = mmgr_malloc(1); if (code_addr == mmgr_NULL) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; #else init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config; init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr; init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr; init_dmem_cfg->data_size = spctrl_cfg->data_size; init_dmem_cfg->bss_size = spctrl_cfg->bss_size; init_dmem_cfg->sp_id = sp_id; spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr; spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr; /* store code (text + icache) and data to DDR * * Data used to be stored separately, because of access alignment constraints, * fix the FW generation instead */ code_addr = mmgr_malloc(spctrl_cfg->code_size); if (code_addr == mmgr_NULL) return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY; mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size); if (sizeof(hrt_vaddress) > sizeof(hrt_data)) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "size of hrt_vaddress can not be greater than hrt_data\n"); mmgr_free(spctrl_cfg->code_size); spctrl_cfg->code_size = mmgr_NULL; return IA_CSS_ERR_INTERNAL_ERROR; } init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset; if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "DDR address pointer is not properly aligned for DMA transfer\n"); mmgr_free(spctrl_cfg->code_size); spctrl_cfg->code_size = mmgr_NULL; return IA_CSS_ERR_INTERNAL_ERROR; } #endif spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry; spctrl_cofig_info[sp_id].code_addr = code_addr; spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name; #ifdef HRT_CSIM /* Secondary SP is named as SP2 in SDK, however we are using secondary SP as SP1 in the HSS and secondary SP Firmware */ if (sp_id == SP0_ID) { hrt_cell_set_icache_base_address(SP, spctrl_cofig_info[sp_id].code_addr); hrt_cell_invalidate_icache(SP); hrt_cell_load_program(SP, spctrl_cofig_info[sp_id].program_name); } #if defined(HAS_SEC_SP) else { hrt_cell_set_icache_base_address(SP2, spctrl_cofig_info[sp_id].code_addr); hrt_cell_invalidate_icache(SP2); hrt_cell_load_program(SP2, spctrl_cofig_info[sp_id].program_name); } #endif /* HAS_SEC_SP */ #else /* now we program the base address into the icache and * invalidate the cache. */ sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr); sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT); #endif spctrl_loaded[sp_id] = true; return IA_CSS_SUCCESS; }