/* * ======== bridge_dev_create ======== * Creates a driver object. Puts DSP in self loop. */ static int bridge_dev_create(struct bridge_dev_context **dev_cntxt, struct dev_object *hdev_obj, struct cfg_hostres *config_param) { int status = 0; struct bridge_dev_context *dev_context = NULL; s32 entry_ndx; struct cfg_hostres *resources = config_param; struct drv_data *drv_datap = dev_get_drvdata(bridge); /* Allocate and initialize a data structure to contain the bridge driver * state, which becomes the context for later calls into this driver */ dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL); if (!dev_context) { status = -ENOMEM; goto func_end; } dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE; dev_context->dw_self_loop = (u32) NULL; dev_context->dsp_per_clks = 0; dev_context->dw_internal_size = OMAP_DSP_SIZE; /* Clear dev context MMU table entries. * These get set on bridge_io_on_loaded() call after program loaded. */ for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) { dev_context->atlb_entry[entry_ndx].ul_gpp_pa = dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0; } dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) (config_param-> dw_mem_base [3]), config_param-> dw_mem_length [3]); if (!dev_context->dw_dsp_base_addr) status = -EPERM; if (!status) { dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; dev_context->hdev_obj = hdev_obj; /* Store current board state. */ dev_context->dw_brd_state = BRD_UNKNOWN; dev_context->resources = resources; dsp_clk_enable(DSP_CLK_IVA2); bridge_brd_stop(dev_context); /* Return ptr to our device state to the DSP API for storage */ *dev_cntxt = dev_context; } else { kfree(dev_context); } func_end: return status; }
/* * ======== write_dsp_data ======== * purpose: * Copies buffers to the DSP internal/external memory. */ int write_dsp_data(struct bridge_dev_context *dev_context, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type) { u32 offset; u32 dw_base_addr = dev_context->dsp_base_addr; struct cfg_hostres *resources = dev_context->resources; int status = 0; u32 base1, base2, base3; base1 = OMAP_DSP_MEM1_SIZE; base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; if (!resources) return -EPERM; offset = dsp_addr - dev_context->dsp_start_add; if (offset < base1) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[2], resources->mem_length[2]); } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[3], resources->mem_length[3]); offset = offset - base2; } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && offset < base3 + OMAP_DSP_MEM3_SIZE) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->mem_base[4], resources->mem_length[4]); offset = offset - base3; } else { return -EPERM; } if (ul_num_bytes) memcpy((u8 *) (dw_base_addr + offset), host_buff, ul_num_bytes); else *((u32 *) host_buff) = dw_base_addr + offset; return status; }
/* * ======== write_dsp_data ======== * purpose: * Copies buffers to the DSP internal/external memory. */ int write_dsp_data(struct wmd_dev_context *hDevContext, IN u8 *pbHostBuf, u32 dwDSPAddr, u32 ul_num_bytes, u32 ulMemType) { u32 offset; u32 dw_base_addr = hDevContext->dw_dsp_base_addr; struct cfg_hostres *resources = hDevContext->resources; int status = 0; u32 base1, base2, base3; base1 = OMAP_DSP_MEM1_SIZE; base2 = OMAP_DSP_MEM2_BASE - OMAP_DSP_MEM1_BASE; base3 = OMAP_DSP_MEM3_BASE - OMAP_DSP_MEM1_BASE; if (!resources) return -EPERM; offset = dwDSPAddr - hDevContext->dw_dsp_start_add; if (offset < base1) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[2], resources->dw_mem_length[2]); } else if (offset > base1 && offset < base2 + OMAP_DSP_MEM2_SIZE) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[3], resources->dw_mem_length[3]); offset = offset - base2; } else if (offset >= base2 + OMAP_DSP_MEM2_SIZE && offset < base3 + OMAP_DSP_MEM3_SIZE) { dw_base_addr = MEM_LINEAR_ADDRESS(resources->dw_mem_base[4], resources->dw_mem_length[4]); offset = offset - base3; } else { return -EPERM; } if (ul_num_bytes) memcpy((u8 *) (dw_base_addr + offset), pbHostBuf, ul_num_bytes); else *((u32 *) pbHostBuf) = dw_base_addr + offset; return status; }
/* * ======== write_ext_dsp_data ======== * purpose: * Copies buffers to the external memory. * */ int write_ext_dsp_data(struct bridge_dev_context *dev_context, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type, bool dynamic_load) { u32 dw_base_addr = dev_context->dsp_ext_base_addr; u32 dw_offset = 0; u8 temp_byte1, temp_byte2; u8 remain_byte[4]; s32 i; int ret = 0; u32 dw_ext_prog_virt_mem; u32 ul_tlb_base_virt = 0; u32 ul_shm_offset_virt = 0; struct cfg_hostres *host_res = dev_context->resources; bool trace_load = false; temp_byte1 = 0x0; temp_byte2 = 0x0; if (symbols_reloaded) { /* Check if it is a load to Trace section */ ret = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_BEG, &ul_trace_sec_beg); if (!ret) ret = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_END, &ul_trace_sec_end); } if (!ret) { if ((dsp_addr <= ul_trace_sec_end) && (dsp_addr >= ul_trace_sec_beg)) trace_load = true; } /* If dynamic, force remap/unmap */ if ((dynamic_load || trace_load) && dw_base_addr) { dw_base_addr = 0; MEM_UNMAP_LINEAR_ADDRESS((void *) dev_context->dsp_ext_base_addr); dev_context->dsp_ext_base_addr = 0x0; } if (!dw_base_addr) { if (symbols_reloaded) /* Get SHM_BEG EXT_BEG and EXT_END. */ ret = dev_get_symbol(dev_context->dev_obj, SHMBASENAME, &ul_shm_base_virt); if (dynamic_load) { if (!ret) { if (symbols_reloaded) ret = dev_get_symbol (dev_context->dev_obj, DYNEXTBASE, &ul_ext_base); } if (!ret) { /* DR OMAPS00013235 : DLModules array may be * in EXTMEM. It is expected that DYNEXTMEM and * EXTMEM are contiguous, so checking for the * upper bound at EXTEND should be Ok. */ if (symbols_reloaded) ret = dev_get_symbol (dev_context->dev_obj, EXTEND, &ul_ext_end); } } else { if (symbols_reloaded) { if (!ret) ret = dev_get_symbol (dev_context->dev_obj, EXTBASE, &ul_ext_base); if (!ret) ret = dev_get_symbol (dev_context->dev_obj, EXTEND, &ul_ext_end); } } /* Trace buffer it right after the shm SEG0, so set the * base address to SHMBASE */ if (trace_load) ul_ext_base = ul_shm_base_virt; if (ul_ext_end < ul_ext_base) ret = -EPERM; if (!ret) { ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; if (symbols_reloaded) { ret = dev_get_symbol (dev_context->dev_obj, DSP_TRACESEC_END, &shm0_end); if (!ret) { ret = dev_get_symbol (dev_context->dev_obj, DYNEXTBASE, &ul_dyn_ext_base); } } ul_shm_offset_virt = ul_shm_base_virt - ul_tlb_base_virt; if (trace_load) { dw_ext_prog_virt_mem = dev_context->atlb_entry[0].gpp_va; } else { dw_ext_prog_virt_mem = host_res->mem_base[1]; dw_ext_prog_virt_mem += (ul_ext_base - ul_dyn_ext_base); } dev_context->dsp_ext_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) dw_ext_prog_virt_mem, ul_ext_end - ul_ext_base); dw_base_addr += dev_context->dsp_ext_base_addr; /* This dsp_ext_base_addr will get cleared only when * the board is stopped. */ if (!dev_context->dsp_ext_base_addr) ret = -EPERM; } } if (!dw_base_addr || !ul_ext_base || !ul_ext_end) ret = -EPERM; if (!ret) { for (i = 0; i < 4; i++) remain_byte[i] = 0x0; dw_offset = dsp_addr - ul_ext_base; /* Also make sure the dsp_addr is < ul_ext_end */ if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) ret = -EPERM; } if (!ret) { if (ul_num_bytes) memcpy((u8 *) dw_base_addr + dw_offset, host_buff, ul_num_bytes); else *((u32 *) host_buff) = dw_base_addr + dw_offset; } /* Unmap here to force remap for other Ext loads */ if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) { MEM_UNMAP_LINEAR_ADDRESS((void *) dev_context->dsp_ext_base_addr); dev_context->dsp_ext_base_addr = 0x0; } symbols_reloaded = false; return ret; }
int bridge_io_on_loaded(struct io_mgr *hio_mgr) { struct cod_manager *cod_man; struct chnl_mgr *hchnl_mgr; struct msg_mgr *hmsg_mgr; u32 ul_shm_base; u32 ul_shm_base_offset; u32 ul_shm_limit; u32 ul_shm_length = -1; u32 ul_mem_length = -1; u32 ul_msg_base; u32 ul_msg_limit; u32 ul_msg_length = -1; u32 ul_ext_end; u32 ul_gpp_pa = 0; u32 ul_gpp_va = 0; u32 ul_dsp_va = 0; u32 ul_seg_size = 0; u32 ul_pad_size = 0; u32 i; int status = 0; u8 num_procs = 0; s32 ndx = 0; struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; struct cfg_hostres *host_res; struct bridge_dev_context *pbridge_context; u32 map_attrs; u32 shm0_end; u32 ul_dyn_ext_base; u32 ul_seg1_size = 0; u32 pa_curr = 0; u32 va_curr = 0; u32 gpp_va_curr = 0; u32 num_bytes = 0; u32 all_bits = 0; u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB }; status = dev_get_bridge_context(hio_mgr->dev_obj, &pbridge_context); if (!pbridge_context) { status = -EFAULT; goto func_end; } host_res = pbridge_context->resources; if (!host_res) { status = -EFAULT; goto func_end; } status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man); if (!cod_man) { status = -EFAULT; goto func_end; } hchnl_mgr = hio_mgr->chnl_mgr; dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr); hmsg_mgr = hio_mgr->msg_mgr; if (!hchnl_mgr || !hmsg_mgr) { status = -EFAULT; goto func_end; } if (hio_mgr->shared_mem) hio_mgr->shared_mem = NULL; status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM, &ul_shm_base); if (status) { status = -EFAULT; goto func_end; } status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM, &ul_shm_limit); if (status) { status = -EFAULT; goto func_end; } if (ul_shm_limit <= ul_shm_base) { status = -EINVAL; goto func_end; } ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size; dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n", __func__, (ul_shm_length - sizeof(struct shm))); status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM, &ul_msg_base); if (!status) { status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM, &ul_msg_limit); if (!status) { if (ul_msg_limit <= ul_msg_base) { status = -EINVAL; } else { ul_msg_length = (ul_msg_limit - ul_msg_base + 1) * hio_mgr->word_size; ul_mem_length = ul_shm_length + ul_msg_length; } } else { status = -EFAULT; } } else { status = -EFAULT; } if (!status) { #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end); #else status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &shm0_end); #endif if (status) status = -EFAULT; } if (!status) { status = cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base); if (status) status = -EFAULT; } if (!status) { status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end); if (status) status = -EFAULT; } if (!status) { (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *) &hio_mgr->ext_proc_info, sizeof(struct mgr_processorextinfo), &num_procs); ndx = 0; ul_gpp_pa = host_res->mem_phys[1]; ul_gpp_va = host_res->mem_base[1]; ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt; ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size; ul_seg1_size = (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size; ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL); ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL); ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) % UL_PAGE_ALIGN_SIZE); if (ul_pad_size == UL_PAGE_ALIGN_SIZE) ul_pad_size = 0x0; dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, " "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, " "ul_seg_size %x ul_seg1_size %x \n", __func__, ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end, ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size); if ((ul_seg_size + ul_seg1_size + ul_pad_size) > host_res->mem_length[1]) { pr_err("%s: shm Error, reserved 0x%x required 0x%x\n", __func__, host_res->mem_length[1], ul_seg_size + ul_seg1_size + ul_pad_size); status = -ENOMEM; } } if (status) goto func_end; pa_curr = ul_gpp_pa; va_curr = ul_dyn_ext_base * hio_mgr->word_size; gpp_va_curr = ul_gpp_va; num_bytes = ul_seg1_size; map_attrs = 0x00000000; map_attrs = DSP_MAPLITTLEENDIAN; map_attrs |= DSP_MAPPHYSICALADDR; map_attrs |= DSP_MAPELEMSIZE32; map_attrs |= DSP_MAPDONOTLOCK; while (num_bytes) { all_bits = pa_curr | va_curr; dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, " "num_bytes %x\n", all_bits, pa_curr, va_curr, num_bytes); for (i = 0; i < 4; i++) { if ((num_bytes >= page_size[i]) && ((all_bits & (page_size[i] - 1)) == 0)) { status = hio_mgr->intf_fxns-> brd_mem_map(hio_mgr->bridge_context, pa_curr, va_curr, page_size[i], map_attrs, NULL); if (status) goto func_end; pa_curr += page_size[i]; va_curr += page_size[i]; gpp_va_curr += page_size[i]; num_bytes -= page_size[i]; break; } } } pa_curr += ul_pad_size; va_curr += ul_pad_size; gpp_va_curr += ul_pad_size; num_bytes = ul_seg_size; va_curr = ul_dsp_va * hio_mgr->word_size; while (num_bytes) { all_bits = pa_curr | va_curr; dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, " "va_curr %x, num_bytes %x\n", all_bits, pa_curr, va_curr, num_bytes); for (i = 0; i < 4; i++) { if (!(num_bytes >= page_size[i]) || !((all_bits & (page_size[i] - 1)) == 0)) continue; if (ndx < MAX_LOCK_TLB_ENTRIES) { /* * This is the physical address written to * DSP MMU. */ ae_proc[ndx].gpp_pa = pa_curr; ae_proc[ndx].gpp_va = gpp_va_curr; ae_proc[ndx].dsp_va = va_curr / hio_mgr->word_size; ae_proc[ndx].size = page_size[i]; ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT; ae_proc[ndx].mixed_mode = HW_MMU_CPUES; dev_dbg(bridge, "shm MMU TLB entry PA %x" " VA %x DSP_VA %x Size %x\n", ae_proc[ndx].gpp_pa, ae_proc[ndx].gpp_va, ae_proc[ndx].dsp_va * hio_mgr->word_size, page_size[i]); ndx++; } else { status = hio_mgr->intf_fxns-> brd_mem_map(hio_mgr->bridge_context, pa_curr, va_curr, page_size[i], map_attrs, NULL); dev_dbg(bridge, "shm MMU PTE entry PA %x" " VA %x DSP_VA %x Size %x\n", ae_proc[ndx].gpp_pa, ae_proc[ndx].gpp_va, ae_proc[ndx].dsp_va * hio_mgr->word_size, page_size[i]); if (status) goto func_end; } pa_curr += page_size[i]; va_curr += page_size[i]; gpp_va_curr += page_size[i]; num_bytes -= page_size[i]; break; } } for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) { if (hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys == 0) continue; if ((hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys > ul_gpp_pa - 0x100000 && hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys <= ul_gpp_pa + ul_seg_size) || (hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt > ul_dsp_va - 0x100000 / hio_mgr->word_size && hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt <= ul_dsp_va + ul_seg_size / hio_mgr->word_size)) { dev_dbg(bridge, "CDB MMU entry %d conflicts with " "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: " "GppPa %x, DspVa %x, Bytes %x.\n", i, hio_mgr->ext_proc_info.ty_tlb[i].gpp_phys, hio_mgr->ext_proc_info.ty_tlb[i].dsp_virt, ul_gpp_pa, ul_dsp_va, ul_seg_size); status = -EPERM; } else { if (ndx < MAX_LOCK_TLB_ENTRIES) { ae_proc[ndx].dsp_va = hio_mgr->ext_proc_info.ty_tlb[i]. dsp_virt; ae_proc[ndx].gpp_pa = hio_mgr->ext_proc_info.ty_tlb[i]. gpp_phys; ae_proc[ndx].gpp_va = 0; ae_proc[ndx].size = 0x100000; dev_dbg(bridge, "shm MMU entry PA %x " "DSP_VA 0x%x\n", ae_proc[ndx].gpp_pa, ae_proc[ndx].dsp_va); ndx++; } else { status = hio_mgr->intf_fxns->brd_mem_map (hio_mgr->bridge_context, hio_mgr->ext_proc_info.ty_tlb[i]. gpp_phys, hio_mgr->ext_proc_info.ty_tlb[i]. dsp_virt, 0x100000, map_attrs, NULL); } } if (status) goto func_end; } map_attrs = 0x00000000; map_attrs = DSP_MAPLITTLEENDIAN; map_attrs |= DSP_MAPPHYSICALADDR; map_attrs |= DSP_MAPELEMSIZE32; map_attrs |= DSP_MAPDONOTLOCK; i = 0; while (l4_peripheral_table[i].phys_addr) { status = hio_mgr->intf_fxns->brd_mem_map (hio_mgr->bridge_context, l4_peripheral_table[i].phys_addr, l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, map_attrs, NULL); if (status) goto func_end; i++; } for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { ae_proc[i].dsp_va = 0; ae_proc[i].gpp_pa = 0; ae_proc[i].gpp_va = 0; ae_proc[i].size = 0; } hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys = (ul_gpp_va + ul_seg1_size + ul_pad_size); if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys || num_procs != 1) { status = -EFAULT; goto func_end; } else { if (ae_proc[0].dsp_va > ul_shm_base) { status = -EPERM; goto func_end; } ul_shm_base_offset = (ul_shm_base - ae_proc[0].dsp_va) * hio_mgr->word_size; status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context, BRDIOCTL_SETMMUCONFIG, ae_proc); if (status) goto func_end; ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys; ul_shm_base += ul_shm_base_offset; ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base, ul_mem_length); if (ul_shm_base == 0) { status = -EFAULT; goto func_end; } status = register_shm_segs(hio_mgr, cod_man, ae_proc[0].gpp_pa); } hio_mgr->shared_mem = (struct shm *)ul_shm_base; hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm); hio_mgr->output = hio_mgr->input + (ul_shm_length - sizeof(struct shm)) / 2; hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input; hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + ul_shm_length); hio_mgr->msg_input = (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl); hio_mgr->msg_output_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl + ul_msg_length / 2); hio_mgr->msg_output = (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl); hmsg_mgr->max_msgs = ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) / sizeof(struct msg_dspmsg); dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, " "output %p, msg_input_ctrl %p, msg_input %p, " "msg_output_ctrl %p, msg_output %p\n", (u8 *) hio_mgr->shared_mem, hio_mgr->input, hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl, hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl, hio_mgr->msg_output); dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n", hmsg_mgr->max_msgs); memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm)); #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) status = cod_get_sym_value(cod_man, SYS_PUTCBEG, &hio_mgr->trace_buffer_begin); if (status) { status = -EFAULT; goto func_end; } hio_mgr->gpp_read_pointer = hio_mgr->trace_buffer_begin = (ul_gpp_va + ul_seg1_size + ul_pad_size) + (hio_mgr->trace_buffer_begin - ul_dsp_va); status = cod_get_sym_value(cod_man, SYS_PUTCEND, &hio_mgr->trace_buffer_end); if (status) { status = -EFAULT; goto func_end; } hio_mgr->trace_buffer_end = (ul_gpp_va + ul_seg1_size + ul_pad_size) + (hio_mgr->trace_buffer_end - ul_dsp_va); status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT, &hio_mgr->trace_buffer_current); if (status) { status = -EFAULT; goto func_end; } hio_mgr->trace_buffer_current = (ul_gpp_va + ul_seg1_size + ul_pad_size) + (hio_mgr->trace_buffer_current - ul_dsp_va); kfree(hio_mgr->msg); hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end - hio_mgr->trace_buffer_begin) * hio_mgr->word_size) + 2, GFP_KERNEL); if (!hio_mgr->msg) status = -ENOMEM; hio_mgr->dsp_va = ul_dsp_va; hio_mgr->gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size); #endif func_end: return status; }
int write_ext_dsp_data(struct bridge_dev_context *dev_context, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type, bool dynamic_load) { u32 dw_base_addr = dev_context->dsp_ext_base_addr; u32 dw_offset = 0; u8 temp_byte1, temp_byte2; u8 remain_byte[4]; s32 i; int ret = 0; u32 dw_ext_prog_virt_mem; u32 ul_tlb_base_virt = 0; u32 ul_shm_offset_virt = 0; struct cfg_hostres *host_res = dev_context->resources; bool trace_load = false; temp_byte1 = 0x0; temp_byte2 = 0x0; if (symbols_reloaded) { ret = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_BEG, &ul_trace_sec_beg); if (!ret) ret = dev_get_symbol(dev_context->dev_obj, DSP_TRACESEC_END, &ul_trace_sec_end); } if (!ret) { if ((dsp_addr <= ul_trace_sec_end) && (dsp_addr >= ul_trace_sec_beg)) trace_load = true; } if ((dynamic_load || trace_load) && dw_base_addr) { dw_base_addr = 0; MEM_UNMAP_LINEAR_ADDRESS((void *) dev_context->dsp_ext_base_addr); dev_context->dsp_ext_base_addr = 0x0; } if (!dw_base_addr) { if (symbols_reloaded) ret = dev_get_symbol(dev_context->dev_obj, SHMBASENAME, &ul_shm_base_virt); if (dynamic_load) { if (!ret) { if (symbols_reloaded) ret = dev_get_symbol (dev_context->dev_obj, DYNEXTBASE, &ul_ext_base); } if (!ret) { if (symbols_reloaded) ret = dev_get_symbol (dev_context->dev_obj, EXTEND, &ul_ext_end); } } else { if (symbols_reloaded) { if (!ret) ret = dev_get_symbol (dev_context->dev_obj, EXTBASE, &ul_ext_base); if (!ret) ret = dev_get_symbol (dev_context->dev_obj, EXTEND, &ul_ext_end); } } if (trace_load) ul_ext_base = ul_shm_base_virt; if (ul_ext_end < ul_ext_base) ret = -EPERM; if (!ret) { ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va * DSPWORDSIZE; if (symbols_reloaded) { ret = dev_get_symbol (dev_context->dev_obj, DSP_TRACESEC_END, &shm0_end); if (!ret) { ret = dev_get_symbol (dev_context->dev_obj, DYNEXTBASE, &ul_dyn_ext_base); } } ul_shm_offset_virt = ul_shm_base_virt - ul_tlb_base_virt; if (trace_load) { dw_ext_prog_virt_mem = dev_context->atlb_entry[0].gpp_va; } else { dw_ext_prog_virt_mem = host_res->mem_base[1]; dw_ext_prog_virt_mem += (ul_ext_base - ul_dyn_ext_base); } dev_context->dsp_ext_base_addr = (u32) MEM_LINEAR_ADDRESS((void *) dw_ext_prog_virt_mem, ul_ext_end - ul_ext_base); dw_base_addr += dev_context->dsp_ext_base_addr; if (!dev_context->dsp_ext_base_addr) ret = -EPERM; } } if (!dw_base_addr || !ul_ext_base || !ul_ext_end) ret = -EPERM; if (!ret) { for (i = 0; i < 4; i++) remain_byte[i] = 0x0; dw_offset = dsp_addr - ul_ext_base; if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) ret = -EPERM; } if (!ret) { if (ul_num_bytes) memcpy((u8 *) dw_base_addr + dw_offset, host_buff, ul_num_bytes); else *((u32 *) host_buff) = dw_base_addr + dw_offset; } if ((dynamic_load || trace_load) && dev_context->dsp_ext_base_addr) { MEM_UNMAP_LINEAR_ADDRESS((void *) dev_context->dsp_ext_base_addr); dev_context->dsp_ext_base_addr = 0x0; } symbols_reloaded = false; return ret; }