/* Load firmware */
enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
				ia_css_spctrl_cfg *spctrl_cfg)
{
	hrt_vaddress code_addr = mmgr_NULL;
	struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;

	if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0))
		return IA_CSS_ERR_INVALID_ARGUMENTS;

	spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;

#if defined(C_RUN) || defined(HRT_UNSCHED)
	(void)init_dmem_cfg;
	code_addr = mmgr_malloc(1);
	if (code_addr == mmgr_NULL)
		return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
#else
	init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
	init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
	init_dmem_cfg->dmem_bss_addr  = spctrl_cfg->dmem_bss_addr;
	init_dmem_cfg->data_size      = spctrl_cfg->data_size;
	init_dmem_cfg->bss_size       = spctrl_cfg->bss_size;
	init_dmem_cfg->sp_id          = sp_id;

	spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr;
	spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr;

	/* store code (text + icache) and data to DDR
	 *
	 * Data used to be stored separately, because of access alignment constraints,
	 * fix the FW generation instead
	 */
	code_addr = mmgr_malloc(spctrl_cfg->code_size);
	if (code_addr == mmgr_NULL)
		return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
	mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);

	assert(sizeof(hrt_vaddress) <= sizeof(hrt_data));

	init_dmem_cfg->ddr_data_addr  = code_addr + spctrl_cfg->ddr_data_offset;
	assert((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) == 0);
#endif
	spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
	spctrl_cofig_info[sp_id].code_addr = code_addr;
	spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;

#ifdef HRT_CSIM
	hrt_cell_set_icache_base_address(SP, spctrl_cofig_info[sp_id].code_addr);
	hrt_cell_invalidate_icache(SP);
	hrt_cell_load_program(SP, spctrl_cofig_info[sp_id].program_name);
#else
	/* now we program the base address into the icache and
	 * invalidate the cache.
	 */
	sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr);
	sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
#endif
	spctrl_loaded[sp_id] = true;
	return IA_CSS_SUCCESS;
}
static void handle_dis (ia_css_psys_pgpoc_context_t *context)
{
	struct ia_css_dvs_grid_info *dvs_grid = NULL;
	struct ia_css_grid_info grid;
	hrt_vaddress dvs_stats_vaddr;
	ia_css_psysapi_cmd_t *host_cmd;

	assert(context != NULL);
	host_cmd = &context->host_cmd;
	assert(host_cmd != NULL);

	dvs_stats_vaddr =
		host_cmd->sp_stage.frames.dvs_buf.buf_src.xmem_addr;
	/* reuse dvs buffers until process group is destroyed. */
	if(dvs_stats_vaddr == mmgr_NULL || dvs_stats_vaddr == mmgr_EXCEPTION) {
		ia_css_binary_dvs_grid_info(&context->binary,
				&grid);
		dvs_grid = &grid.dvs_grid;
		context->dvs_stats = ia_css_isp_dvs2_statistics_allocate(dvs_grid);
		assert(context->dvs_stats != NULL);

		dvs_stats_vaddr = mmgr_malloc(sizeof(struct ia_css_isp_dvs_statistics));
		assert(dvs_stats_vaddr != 0);
		host_cmd->sp_stage.frames.dvs_buf.buf_src.xmem_addr =
			dvs_stats_vaddr;
		mmgr_store(dvs_stats_vaddr,
				context->dvs_stats,
				sizeof(struct ia_css_isp_dvs_statistics));
	}
	return;

}
Example #3
0
enum sh_css_err
sh_css_sp_init(void)
{
	init_dmem_ddr = mmgr_malloc(SP_DMEM_SIZE);
	if (init_dmem_ddr == mmgr_NULL)
		return sh_css_err_cannot_allocate_memory;

	return sh_css_success;
}
/**
 * @brief Acquire a handle from the pool (host, vbuf)
 *
 * @param pool		The pointer to the pool
 * @param handle	The pointer to the handle
 */
void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool,
                          struct ia_css_rmgr_vbuf_handle **handle)
{
#ifdef __KLOCWORK__
    /* KW sees the *handle = h; assignment about 20 lines down
       and thinks that we are assigning a local to a global.
       What it does not see is that in ia_css_i_host_rmgr_pop_handle
       a new value is assigned to handle.
       So this is a false positive KW issue.
       To fix that we make the struct static for KW so it will
       think that h remains alive; we do not want this in our
       production code though as it breaks reentrancy of the code
     */

    static struct ia_css_rmgr_vbuf_handle h;
#else /* __KLOCWORK__ */
    struct ia_css_rmgr_vbuf_handle h;
#endif /* __KLOCWORK__ */

    if ((pool == NULL) || (handle == NULL) || (*handle == NULL)) {
        IA_CSS_LOG("Invalid inputs");
        return;
    }

    if (pool->copy_on_write) {
        /* only one reference, reuse (no new retain) */
        if ((*handle)->count == 1)
            return;
        /* more than one reference, release current buffer */
        if ((*handle)->count > 1) {
            /* store current values */
            h.vptr = 0x0;
            h.size = (*handle)->size;
            /* release ref to current buffer */
            ia_css_rmgr_refcount_release_vbuf(handle);
            *handle = &h;
        }
        /* get new buffer for needed size */
        if ((*handle)->vptr == 0x0) {
            if (pool->recycle) {
                /* try and pop from pool */
                rmgr_pop_handle(pool, handle);
            }
            if ((*handle)->vptr == 0x0) {
                /* we need to allocate */
                (*handle)->vptr = mmgr_malloc((*handle)->size);
            } else {
                /* we popped a buffer */
                return;
            }
        }
    }
    /* Note that handle will change to an internally maintained one */
    ia_css_rmgr_refcount_retain_vbuf(handle);
}
Example #5
0
hrt_vaddress
sh_css_load_blob(const unsigned char *blob, unsigned size)
{
	hrt_vaddress target_addr = mmgr_malloc(size);
	/* this will allocate memory aligned to a DDR word boundary which
	   is required for the CSS DMA to read the instructions. */

	assert(blob != NULL);
	if (target_addr) 
		mmgr_store(target_addr, blob, size);
	return target_addr;
}
hrt_vaddress sh_css_sp_load_program(
	const struct sh_css_fw_info *fw,
	const char *sp_prog,
	hrt_vaddress code_addr)
{
	if (code_addr == mmgr_NULL) {
		/* store code (text section) to DDR */
		code_addr = mmgr_malloc(fw->blob.text_size);
		if (code_addr == mmgr_NULL)
			return code_addr;
		mmgr_store(code_addr, fw->blob.text, fw->blob.text_size);
	}

	/* Set the correct start address for the SP program */
	sh_css_sp_activate_program(fw, code_addr, sp_prog);

return code_addr;
}
hrt_vaddress
sh_css_load_blob(const unsigned char *blob, unsigned size)
{
	hrt_vaddress target_addr = mmgr_malloc(size);
	/* this will allocate memory aligned to a DDR word boundary which
	   is required for the CSS DMA to read the instructions. */

	assert(blob != NULL);
	if (target_addr) {
		mmgr_store(target_addr, blob, size);
#ifdef HRT_CSIM
		{
			unsigned padded_size = CEIL_MUL(size, HIVE_ISP_DDR_WORD_BYTES);
			mmgr_clear(target_addr + size, padded_size - size);
		}
#endif
	}
	return target_addr;
}
enum ia_css_err
ia_css_isp_param_allocate_isp_parameters(
	struct ia_css_isp_param_host_segments *mem_params,
	struct ia_css_isp_param_css_segments *css_params,
	const struct ia_css_isp_param_isp_segments *mem_initializers)
{
	enum ia_css_err err = IA_CSS_SUCCESS;
	unsigned mem, pclass;

	pclass = IA_CSS_PARAM_CLASS_PARAM;
	for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
		for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
			uint32_t size = 0;
			if (mem_initializers)
				size = mem_initializers->params[pclass][mem].size;
			mem_params->params[pclass][mem].size = size;
			mem_params->params[pclass][mem].address = NULL;
			css_params->params[pclass][mem].size = size;
			css_params->params[pclass][mem].address = 0x0;
			if (size) {
				mem_params->params[pclass][mem].address = sh_css_calloc(1, size);
				if (!mem_params->params[pclass][mem].address) {
					err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
					goto cleanup;
				}
				if (pclass != IA_CSS_PARAM_CLASS_PARAM) {
					css_params->params[pclass][mem].address = mmgr_malloc(size);
					if (!css_params->params[pclass][mem].address) {
						err = IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
						goto cleanup;
					}
				}
			}
		}
	}
	return err;
cleanup:
	ia_css_isp_param_destroy_isp_parameters(mem_params, css_params);
	return err;
}
/* Load firmware */
enum ia_css_err ia_css_spctrl_load_fw(sp_ID_t sp_id,
				ia_css_spctrl_cfg *spctrl_cfg)
{
	hrt_vaddress code_addr = mmgr_NULL;
	struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;

	if ((sp_id >= N_SP_ID) || (spctrl_cfg == 0))
		return IA_CSS_ERR_INVALID_ARGUMENTS;

	spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;

#if defined(C_RUN) || defined(HRT_UNSCHED)
	(void)init_dmem_cfg;
	code_addr = mmgr_malloc(1);
	if (code_addr == mmgr_NULL)
		return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
#else
	init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
	init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
	init_dmem_cfg->dmem_bss_addr  = spctrl_cfg->dmem_bss_addr;
	init_dmem_cfg->data_size      = spctrl_cfg->data_size;
	init_dmem_cfg->bss_size       = spctrl_cfg->bss_size;
	init_dmem_cfg->sp_id          = sp_id;

	spctrl_cofig_info[sp_id].spctrl_config_dmem_addr = spctrl_cfg->spctrl_config_dmem_addr;
	spctrl_cofig_info[sp_id].spctrl_state_dmem_addr = spctrl_cfg->spctrl_state_dmem_addr;

	/* store code (text + icache) and data to DDR
	 *
	 * Data used to be stored separately, because of access alignment constraints,
	 * fix the FW generation instead
	 */
	code_addr = mmgr_malloc(spctrl_cfg->code_size);
	if (code_addr == mmgr_NULL)
		return IA_CSS_ERR_CANNOT_ALLOCATE_MEMORY;
	mmgr_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);

	if (sizeof(hrt_vaddress) > sizeof(hrt_data)) {
		ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
				    "size of hrt_vaddress can not be greater than hrt_data\n");
		mmgr_free(spctrl_cfg->code_size);
		spctrl_cfg->code_size = mmgr_NULL;
		return IA_CSS_ERR_INTERNAL_ERROR;
	}

	init_dmem_cfg->ddr_data_addr  = code_addr + spctrl_cfg->ddr_data_offset;
	if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) {
		ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
				    "DDR address pointer is not properly aligned for DMA transfer\n");
		mmgr_free(spctrl_cfg->code_size);
		spctrl_cfg->code_size = mmgr_NULL;
		return IA_CSS_ERR_INTERNAL_ERROR;
	}
#endif
	spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
	spctrl_cofig_info[sp_id].code_addr = code_addr;
	spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;

#ifdef HRT_CSIM
	/* Secondary SP is named as SP2 in SDK, however we are using secondary
	   SP as SP1 in the HSS and secondary SP Firmware */
	if (sp_id == SP0_ID) {
		hrt_cell_set_icache_base_address(SP, spctrl_cofig_info[sp_id].code_addr);
		hrt_cell_invalidate_icache(SP);
		hrt_cell_load_program(SP, spctrl_cofig_info[sp_id].program_name);
	}
#if defined(HAS_SEC_SP)
	else {
		hrt_cell_set_icache_base_address(SP2, spctrl_cofig_info[sp_id].code_addr);
		hrt_cell_invalidate_icache(SP2);
		hrt_cell_load_program(SP2, spctrl_cofig_info[sp_id].program_name);
	}
#endif /* HAS_SEC_SP */
#else
	/* now we program the base address into the icache and
	 * invalidate the cache.
	 */
	sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG, (hrt_data)spctrl_cofig_info[sp_id].code_addr);
	sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
#endif
	spctrl_loaded[sp_id] = true;
	return IA_CSS_SUCCESS;
}