int afe_rt_proxy_port_write(u32 buf_addr_p, u32 mem_map_handle, int bytes) { int ret = 0; struct afe_port_data_cmd_rt_proxy_port_write_v2 afecmd_wr; if (this_afe.apr == NULL) { pr_err("%s:register to AFE is not done\n", __func__); ret = -ENODEV; return ret; } pr_debug("%s: buf_addr_p = 0x%08x bytes = %d\n", __func__, buf_addr_p, bytes); afecmd_wr.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); afecmd_wr.hdr.pkt_size = sizeof(afecmd_wr); afecmd_wr.hdr.src_port = 0; afecmd_wr.hdr.dest_port = 0; afecmd_wr.hdr.token = 0; afecmd_wr.hdr.opcode = AFE_PORT_DATA_CMD_RT_PROXY_PORT_WRITE_V2; afecmd_wr.port_id = RT_PROXY_PORT_001_TX; afecmd_wr.buffer_address_lsw = (uint32_t)buf_addr_p; afecmd_wr.buffer_address_msw = 0x00; afecmd_wr.mem_map_handle = mem_map_handle; afecmd_wr.available_bytes = bytes; afecmd_wr.reserved = 0; ret = apr_send_pkt(this_afe.apr, (uint32_t *) &afecmd_wr); if (ret < 0) { pr_err("%s: AFE rtproxy write to port 0x%x failed %d\n", __func__, afecmd_wr.port_id, ret); ret = -EINVAL; return ret; } return 0; }
int afe_pseudo_port_stop_nowait(u16 port_id) { int ret = 0; struct afe_pseudoport_stop_command stop; int index = 0; pr_debug("%s: port_id=%d\n", __func__, port_id); if (this_afe.apr == NULL) { pr_err("%s: AFE is already closed\n", __func__); return -EINVAL; } index = q6audio_get_port_index(port_id); if (q6audio_validate_port(port_id) < 0) return -EINVAL; stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); stop.hdr.pkt_size = sizeof(stop); stop.hdr.src_port = 0; stop.hdr.dest_port = 0; stop.hdr.token = 0; stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP; stop.port_id = port_id; stop.reserved = 0; stop.hdr.token = index; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); if (ret < 0) { pr_err("%s: AFE close failed %d\n", __func__, ret); return -EINVAL; } return 0; }
int afe_port_stop_nowait(int port_id) { struct afe_port_stop_command stop; int ret = 0; if (this_afe.apr == NULL) { pr_err("AFE is already closed\n"); ret = -EINVAL; goto fail_cmd; } pr_debug("%s: port_id=%d\n", __func__, port_id); port_id = afe_convert_virtual_to_portid(port_id); stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); stop.hdr.pkt_size = sizeof(stop); stop.hdr.src_port = 0; stop.hdr.dest_port = 0; stop.hdr.token = 0; stop.hdr.opcode = AFE_PORT_CMD_STOP; stop.port_id = port_id; stop.reserved = 0; ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); if (ret == -ENETRESET) { pr_info("%s: Need to reset, calling APR deregister", __func__); return apr_deregister(this_afe.apr); } else if (IS_ERR_VALUE(ret)) { pr_err("%s: AFE close failed\n", __func__); ret = -EINVAL; } fail_cmd: return ret; }
static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr, uint32_t pkt_size, bool cmd_flg) { pr_debug("%s: pkt size=%d; cmd_flg=%d\n", __func__, pkt_size, cmd_flg); pr_debug("**************\n"); mutex_lock(&usc->cmd_lock); hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ APR_HDR_LEN(sizeof(struct apr_hdr)),\ APR_PKT_VER); hdr->src_svc = ((struct apr_svc *)usc->apr)->id; hdr->src_domain = APR_DOMAIN_APPS; hdr->dest_svc = APR_SVC_USM; hdr->dest_domain = APR_DOMAIN_ADSP; hdr->src_port = (usc->session << 8) | 0x0001; hdr->dest_port = (usc->session << 8) | 0x0001; if (cmd_flg) { hdr->token = usc->session; atomic_set(&usc->cmd_state, 1); } hdr->pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, pkt_size); mutex_unlock(&usc->cmd_lock); return; }
int avcs_core_disable_power_collapse(int disable) { struct adsp_power_collapse pc; int rc = 0; if (core_handle) { pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, sizeof(uint32_t)); pc.hdr.src_port = 0; pc.hdr.dest_port = 0; pc.hdr.token = 0; pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE; /* * When power_collapse set to 1 -- If the aDSP is in the power * collapsed state when this command is received, it is awakened * from this state. The aDSP does not power collapse again until * the client revokes this command * When power_collapse set to 0 -- This indicates to the aDSP * that the remote client does not need it to be out of power * collapse any longer. This may not always put the aDSP into * power collapse; the aDSP must honor an internal client's * power requirements as well. */ pc.power_collapse = disable; rc = apr_send_pkt(core_handle, (uint32_t *)&pc); if (rc < 0) { pr_debug("disable power collapse = %d failed\n", disable); return rc; } pr_debug("disable power collapse = %d\n", disable); } return 0; }
int afe_loopback_gain(u16 port_id, u16 volume) { struct afe_port_cmd_set_param set_param; int ret = 0; if (this_afe.apr == NULL) { pr_err("%s: AFE is not opened\n", __func__); ret = -EPERM; goto fail_cmd; } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* RX ports numbers are even .TX ports numbers are odd. */ if (port_id % 2 == 0) { pr_err("%s: Failed : afe loopback gain only for TX ports." " port_id %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } pr_debug("%s: %d %hX\n", __func__, port_id, volume); set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); set_param.hdr.pkt_size = sizeof(set_param); set_param.hdr.src_port = 0; set_param.hdr.dest_port = 0; set_param.hdr.token = 0; set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM; set_param.port_id = port_id; set_param.payload_size = sizeof(struct afe_param_payload); set_param.payload_address = 0; set_param.payload.module_id = AFE_MODULE_ID_PORT_INFO; set_param.payload.param_id = AFE_PARAM_ID_LOOPBACK_GAIN; set_param.payload.param_size = sizeof(struct afe_param_loopback_gain); set_param.payload.reserved = 0; set_param.payload.param.loopback_gain.gain = volume; set_param.payload.param.loopback_gain.reserved = 0; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param); if (ret < 0) { pr_err("%s: AFE param set failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (ret < 0) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } return 0; fail_cmd: return ret; }
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate) { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret = 0; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_info("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) return -EINVAL; if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) port_id = VIRTUAL_ID_TO_PORTID(port_id); ret = afe_q6_interface_prepare(); if (ret != 0) return ret; #ifdef CONFIG_LGE_COMPRESSED_PATH if (port_id == HDMI_RX) { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG; } else { #endif config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; #ifdef CONFIG_LGE_COMPRESSED_PATH } #endif if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; atomic_set(&this_afe.state, 1); atomic_set(&this_afe.status, 0); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } if (atomic_read(&this_afe.status) != 0) { pr_err("%s: config cmd failed\n", __func__); ret = -EINVAL; goto fail_cmd; } start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
/* This function should be used by 8660 exclusively */ int afe_open(u16 port_id, union afe_port_config *afe_config, int rate) { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret = 0; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_debug("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) return 0; if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) port_id = VIRTUAL_ID_TO_PORTID(port_id); ret = afe_q6_interface_prepare(); if (ret != 0) return ret; config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; switch (port_id) { case SLIMBUS_0_RX: case SLIMBUS_0_TX: case SLIMBUS_1_RX: case SLIMBUS_1_TX: case SLIMBUS_2_RX: case SLIMBUS_2_TX: case SLIMBUS_3_RX: case SLIMBUS_3_TX: case SLIMBUS_4_RX: case SLIMBUS_4_TX: config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG; break; case MI2S_TX: case MI2S_RX: case SECONDARY_I2S_RX: case SECONDARY_I2S_TX: case PRIMARY_I2S_RX: case PRIMARY_I2S_TX: /* AFE_PORT_CMD_I2S_CONFIG command is not supported * in the LPASS EL 1.0. So we have to distiguish * which AFE command, AFE_PORT_CMD_I2S_CONFIG or * AFE_PORT_AUDIO_IF_CONFIG to use. If the format * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used * to make the backward compatible. */ pr_debug("%s: afe_config->mi2s.format = %d\n", __func__, afe_config->mi2s.format); if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM) config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; else config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG; break; default: config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; break; } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; atomic_set(&this_afe.state, 1); atomic_set(&this_afe.status, 0); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } if (atomic_read(&this_afe.status) != 0) { pr_err("%s: config cmd failed\n", __func__); ret = -EINVAL; goto fail_cmd; } start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
static int afe_send_hw_delay(u16 port_id, u32 rate) { struct hw_delay_entry delay_entry; struct afe_port_cmd_set_param config; int index = 0; int ret = -EINVAL; pr_debug("%s\n", __func__); delay_entry.sample_rate = rate; if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX) ret = get_hw_delay(TX_CAL, &delay_entry); else if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_RX) ret = get_hw_delay(RX_CAL, &delay_entry); if (ret != 0) { pr_debug("%s: Failed to get hw delay info\n", __func__); goto done; } index = port_id; if (index < 0) { pr_debug("%s: AFE port index invalid!\n", __func__); goto done; } config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = sizeof(config); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = index; config.hdr.opcode = AFE_PORT_CMD_SET_PARAM; config.port_id = port_id; config.payload_size = sizeof(struct afe_param_payload_base)+ sizeof(struct afe_param_id_device_hw_delay_cfg); config.payload_address = 0; config.payload.base.module_id = AFE_MODULE_ID_PORT_INFO ; config.payload.base.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY; config.payload.base.param_size = sizeof(struct afe_param_id_device_hw_delay_cfg); config.payload.base.reserved = 0; config.payload.param.hw_delay.delay_in_us = delay_entry.delay_usec; config.payload.param.hw_delay.device_hw_delay_minor_version = AFE_API_VERSION_DEVICE_HW_DELAY; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto done; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout IF CONFIG\n", __func__); ret = -EINVAL; goto done; } if (atomic_read(&this_afe.status) != 0) { pr_err("%s: config cmd failed\n", __func__); ret = -EINVAL; goto done; } done: pr_debug("%s port_id %u rate %u delay_usec %d status %d\n", __func__, port_id, rate, delay_entry.delay_usec, ret); return ret; }
u32 send_adm_apr(void *buf, u32 opcode) { s32 result; u32 user_buf_size = 0; u32 bytes_returned = 0; u32 port_index = 0; u32 copp_id; int port_id; u32 payload_size; u32 data_size = 0; struct apr_hdr adm_params; pr_debug("%s\n", __func__); if (rtac_cal[ADM_RTAC_CAL].map_data.ion_handle == NULL) { result = rtac_allocate_cal_buffer(ADM_RTAC_CAL); if (result < 0) { pr_err("%s: allocate buffer failed!", __func__); goto done; } } if (rtac_cal[ADM_RTAC_CAL].map_data.map_handle == 0) { result = rtac_map_cal_buffer(ADM_RTAC_CAL); if (result < 0) { pr_err("%s: map buffer failed!", __func__); goto done; } } if (copy_from_user(&user_buf_size, (void *)buf, sizeof(user_buf_size))) { pr_err("%s: Copy from user failed! buf = 0x%x\n", __func__, (unsigned int)buf); goto done; } if (user_buf_size <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, user_buf_size); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy port id from user buffer\n", __func__); goto done; } for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) { if (adm_get_copp_id(port_index) == copp_id) break; if (adm_get_lowlatency_copp_id(port_index) == copp_id) break; } if (port_index >= AFE_MAX_PORTS) { pr_err("%s: Could not find port index for copp = %d\n", __func__, copp_id); goto done; } port_id = q6audio_get_port_id_from_index(port_index); if (port_id < 0) { pr_err("%s: Could not find port id mapped for port_idx %d\n", __func__, port_index); goto done; } mutex_lock(&rtac_adm_apr_mutex); if (rtac_adm_apr_data.apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } if (opcode == ADM_CMD_SET_PP_PARAMS_V5) { data_size = payload_size - 4 * sizeof(u32); if (data_size > rtac_cal[ADM_RTAC_CAL].map_data.map_size) { pr_err("%s: Invalid data size = %d\n", __func__, data_size); goto done; } payload_size = 4 * sizeof(u32); if (copy_from_user((void *) rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr, buf + 7 * sizeof(u32), data_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } rtac_adm_buffer[8] = data_size; } else { if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(rtac_adm_buffer + sizeof(adm_params)/sizeof(u32), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } } adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); adm_params.src_svc = APR_SVC_ADM; adm_params.src_domain = APR_DOMAIN_APPS; adm_params.src_port = copp_id; adm_params.dest_svc = APR_SVC_ADM; adm_params.dest_domain = APR_DOMAIN_ADSP; adm_params.dest_port = copp_id; adm_params.token = port_id; adm_params.opcode = opcode; rtac_adm_buffer[5] = rtac_cal[ADM_RTAC_CAL].cal_data.paddr; rtac_adm_buffer[6] = 0; rtac_adm_buffer[7] = rtac_cal[ADM_RTAC_CAL].map_data.map_handle; memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params)); atomic_set(&rtac_adm_apr_data.cmd_state, 1); pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%x\n", __func__, opcode, rtac_cal[ADM_RTAC_CAL].cal_data.paddr); result = apr_send_pkt(rtac_adm_apr_data.apr_handle, (uint32_t *)rtac_adm_buffer); if (result < 0) { pr_err("%s: Set params failed port = %d, copp = %d\n", __func__, port_index, copp_id); goto err; } result = wait_event_timeout(rtac_adm_apr_data.cmd_wait, (atomic_read(&rtac_adm_apr_data.cmd_state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!result) { pr_err("%s: Set params timed out port = %d, copp = %d\n", __func__, port_index, copp_id); goto err; } if (atomic_read(&rtac_common.apr_err_code)) { pr_err("%s: DSP returned error code = %d, opcode = 0x%x\n", __func__, atomic_read(&rtac_common.apr_err_code), opcode); goto err; } if (opcode == ADM_CMD_GET_PP_PARAMS_V5) { bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data. kvaddr)[2] + 3 * sizeof(u32); if (bytes_returned > user_buf_size) { pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", __func__, user_buf_size, bytes_returned); goto err; } if (copy_to_user(buf, (void *) rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr, bytes_returned)) { pr_err("%s: Could not copy buffer to user,size = %d\n", __func__, bytes_returned); goto err; } } else { bytes_returned = data_size; } err: mutex_unlock(&rtac_adm_apr_mutex); done: return bytes_returned; }
u32 send_rtac_asm_apr(void *buf, u32 opcode) { s32 result; u32 user_buf_size = 0; u32 bytes_returned = 0; u32 session_id = 0; u32 payload_size; u32 data_size = 0; struct apr_hdr asm_params; pr_debug("%s\n", __func__); if (rtac_cal[ASM_RTAC_CAL].map_data.ion_handle == NULL) { result = rtac_allocate_cal_buffer(ASM_RTAC_CAL); if (result < 0) { pr_err("%s: allocate buffer failed!", __func__); goto done; } } if (rtac_cal[ASM_RTAC_CAL].map_data.map_handle == 0) { result = rtac_map_cal_buffer(ASM_RTAC_CAL); if (result < 0) { pr_err("%s: map buffer failed!", __func__); goto done; } } if (copy_from_user(&user_buf_size, (void *)buf, sizeof(user_buf_size))) { pr_err("%s: Copy from user failed! buf = 0x%x\n", __func__, (unsigned int)buf); goto done; } if (user_buf_size <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, user_buf_size); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy session id from user buffer\n", __func__); goto done; } if (session_id >= (SESSION_MAX + 1)) { pr_err("%s: Invalid Session = %d\n", __func__, session_id); goto done; } mutex_lock(&rtac_asm_apr_mutex); if (rtac_asm_apr_data[session_id].apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } if (opcode == ASM_STREAM_CMD_SET_PP_PARAMS_V2) { data_size = payload_size - 4 * sizeof(u32); if (data_size > rtac_cal[ASM_RTAC_CAL].map_data.map_size) { pr_err("%s: Invalid data size = %d\n", __func__, data_size); goto err; } payload_size = 4 * sizeof(u32); if (copy_from_user((void *) rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr, buf + 7 * sizeof(u32), data_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } rtac_asm_buffer[8] = data_size; } else { if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto err; } if (copy_from_user(rtac_asm_buffer + sizeof(asm_params)/sizeof(u32), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } } asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); asm_params.src_svc = q6asm_get_apr_service_id(session_id); if (asm_params.src_svc == -EINVAL) { pr_err("%s: Could not get service id form session %d", __func__, session_id); goto err; } asm_params.src_domain = APR_DOMAIN_APPS; asm_params.src_port = (session_id << 8) | 0x0001; asm_params.dest_svc = APR_SVC_ASM; asm_params.dest_domain = APR_DOMAIN_ADSP; asm_params.dest_port = (session_id << 8) | 0x0001; asm_params.token = session_id; asm_params.opcode = opcode; rtac_asm_buffer[5] = rtac_cal[ASM_RTAC_CAL].cal_data.paddr; rtac_asm_buffer[6] = 0; rtac_asm_buffer[7] = rtac_cal[ASM_RTAC_CAL].map_data.map_handle; memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params)); atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1); pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%x\n", __func__, opcode, rtac_cal[ASM_RTAC_CAL].cal_data.paddr); result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle, (uint32_t *)rtac_asm_buffer); if (result < 0) { pr_err("%s: Set params failed session = %d\n", __func__, session_id); goto err; } result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait, (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0), 5 * HZ); if (!result) { pr_err("%s: Set params timed out session = %d\n", __func__, session_id); goto err; } if (atomic_read(&rtac_common.apr_err_code)) { pr_err("%s: DSP returned error code = %d, opcode = 0x%x\n", __func__, atomic_read(&rtac_common.apr_err_code), opcode); goto err; } if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V2) { bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data. kvaddr)[2] + 3 * sizeof(u32); if (bytes_returned > user_buf_size) { pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", __func__, user_buf_size, bytes_returned); goto err; } if (copy_to_user(buf, (void *) rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr, bytes_returned)) { pr_err("%s: Could not copy buffer to user,size = %d\n", __func__, bytes_returned); goto err; } } else { bytes_returned = data_size; } err: mutex_unlock(&rtac_asm_apr_mutex); done: return bytes_returned; }
u32 send_voice_apr(u32 mode, void *buf, u32 opcode) { s32 result; u32 count = 0; u32 bytes_returned = 0; u32 payload_size; u32 dest_port; struct apr_hdr voice_params; pr_debug("%s\n", __func__); if (copy_from_user(&count, (void *)buf, sizeof(count))) { pr_err("%s: Copy to user failed! buf = 0x%x\n", __func__, (unsigned int)buf); result = -EFAULT; goto done; } if (count <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, count); goto done; } if (copy_from_user(&payload_size, buf + sizeof(payload_size), sizeof(payload_size))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(&dest_port, buf + 2 * sizeof(dest_port), sizeof(dest_port))) { pr_err("%s: Could not copy port id from user buffer\n", __func__); goto done; } if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) { pr_err("%s: Invalid Mode for APR, mode = %d\n", __func__, mode); goto done; } mutex_lock(&rtac_voice_apr_mutex); if (rtac_voice_apr_data[mode].apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } rtac_voice_user_buf_size = count; if (copy_from_user(rtac_voice_buffer + sizeof(voice_params), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); voice_params.src_svc = 0; voice_params.src_domain = APR_DOMAIN_APPS; voice_params.src_port = voice_session_id[ get_voice_index(mode, dest_port)]; voice_params.dest_svc = 0; voice_params.dest_domain = APR_DOMAIN_MODEM; voice_params.dest_port = (u16)dest_port; voice_params.token = 0; voice_params.opcode = opcode; memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params)); atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1); pr_debug("%s: Sending RTAC command size = %d, opcode = %x\n", __func__, voice_params.pkt_size, opcode); result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle, (uint32_t *)rtac_voice_buffer); if (result < 0) { pr_err("%s: apr_send_pkt failed opcode = %x\n", __func__, opcode); goto err; } result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait, (atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0), msecs_to_jiffies(TIMEOUT_MS)); mutex_unlock(&rtac_voice_apr_mutex); if (!result) { pr_err("%s: apr_send_pkt timed out opcode = %x\n", __func__, opcode); goto done; } if (rtac_voice_payload_size != 0) { if (copy_to_user(buf, rtac_voice_buffer, rtac_voice_payload_size + sizeof(u32))) { pr_err("%s: Could not copy buffer to user,size = %d\n", __func__, payload_size); goto done; } } if (opcode == VOICE_CMD_GET_PARAM) bytes_returned = rtac_voice_payload_size; else bytes_returned = payload_size; done: return bytes_returned; err: mutex_unlock(&rtac_voice_apr_mutex); return bytes_returned; }
u32 send_rtac_asm_apr(void *buf, u32 opcode) { s32 result; u32 count = 0; u32 bytes_returned = 0; u32 session_id = 0; u32 payload_size; struct apr_hdr asm_params; pr_debug("%s\n", __func__); if (copy_from_user(&count, (void *)buf, sizeof(count))) { pr_err("%s: Copy to user failed! buf = 0x%x\n", __func__, (unsigned int)buf); result = -EFAULT; goto done; } if (count <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, count); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy session id from user buffer\n", __func__); goto done; } if (session_id > (SESSION_MAX + 1)) { pr_err("%s: Invalid Session = %d\n", __func__, session_id); goto done; } mutex_lock(&rtac_asm_apr_mutex); if (session_id < SESSION_MAX+1) { if (rtac_asm_apr_data[session_id].apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } } rtac_asm_user_buf_size = count; if (copy_from_user(rtac_asm_buffer + sizeof(asm_params), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); asm_params.src_svc = q6asm_get_apr_service_id(session_id); asm_params.src_domain = APR_DOMAIN_APPS; asm_params.src_port = (session_id << 8) | 0x0001; asm_params.dest_svc = APR_SVC_ASM; asm_params.dest_domain = APR_DOMAIN_ADSP; asm_params.dest_port = (session_id << 8) | 0x0001; asm_params.token = session_id; asm_params.opcode = opcode; memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params)); if (session_id < SESSION_MAX+1) atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1); pr_debug("%s: Sending RTAC command size = %d, session_id=%d\n", __func__, asm_params.pkt_size, session_id); result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle, (uint32_t *)rtac_asm_buffer); if (result < 0) { pr_err("%s: Set params failed session = %d\n", __func__, session_id); goto err; } result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait, (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0), 5 * HZ); mutex_unlock(&rtac_asm_apr_mutex); if (!result) { pr_err("%s: Set params timed out session = %d\n", __func__, session_id); goto done; } if (rtac_asm_payload_size != 0) { if (copy_to_user(buf, rtac_asm_buffer, rtac_asm_payload_size + sizeof(u32))) { pr_err("%s: Could not copy buffer to user,size = %d\n", __func__, payload_size); goto done; } } if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS) bytes_returned = rtac_asm_payload_size; else bytes_returned = payload_size; done: return bytes_returned; err: mutex_unlock(&rtac_asm_apr_mutex); return bytes_returned; }
u32 send_adm_apr(void *buf, u32 opcode) { s32 result; u32 count = 0; u32 bytes_returned = 0; u32 port_index = 0; u32 copp_id; u32 payload_size; struct apr_hdr adm_params; pr_debug("%s\n", __func__); if (copy_from_user(&count, (void *)buf, sizeof(count))) { pr_err("%s: Copy to user failed! buf = 0x%x\n", __func__, (unsigned int)buf); result = -EFAULT; goto done; } if (count <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, count); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy port id from user buffer\n", __func__); goto done; } for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) { if (adm_get_copp_id(port_index) == copp_id) break; } if (port_index >= AFE_MAX_PORTS) { pr_err("%s: Could not find port index for copp = %d\n", __func__, copp_id); goto done; } mutex_lock(&rtac_adm_apr_mutex); if (rtac_adm_apr_data.apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } rtac_adm_user_buf_size = count; if (copy_from_user(rtac_adm_buffer + sizeof(adm_params), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); adm_params.src_svc = APR_SVC_ADM; adm_params.src_domain = APR_DOMAIN_APPS; adm_params.src_port = copp_id; adm_params.dest_svc = APR_SVC_ADM; adm_params.dest_domain = APR_DOMAIN_ADSP; adm_params.dest_port = copp_id; adm_params.token = copp_id; adm_params.opcode = opcode; memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params)); atomic_set(&rtac_adm_apr_data.cmd_state, 1); pr_debug("%s: Sending RTAC command size = %d\n", __func__, adm_params.pkt_size); result = apr_send_pkt(rtac_adm_apr_data.apr_handle, (uint32_t *)rtac_adm_buffer); if (result < 0) { pr_err("%s: Set params failed port = %d, copp = %d\n", __func__, port_index, copp_id); goto err; } result = wait_event_timeout(rtac_adm_apr_data.cmd_wait, (atomic_read(&rtac_adm_apr_data.cmd_state) == 0), msecs_to_jiffies(TIMEOUT_MS)); mutex_unlock(&rtac_adm_apr_mutex); if (!result) { pr_err("%s: Set params timed out port = %d, copp = %d\n", __func__, port_index, copp_id); goto done; } if (rtac_adm_payload_size != 0) { if (copy_to_user(buf, rtac_adm_buffer, rtac_adm_payload_size + sizeof(u32))) { pr_err("%s: Could not copy buffer to user, size = %d\n", __func__, payload_size); goto done; } } if (opcode == ADM_CMD_GET_PARAMS) bytes_returned = rtac_adm_payload_size; else bytes_returned = payload_size; done: return bytes_returned; err: mutex_unlock(&rtac_adm_apr_mutex); return bytes_returned; }
int adm_memory_map_regions(int port_id, uint32_t *buf_add, uint32_t mempool_id, uint32_t *bufsz, uint32_t bufcnt) { struct avs_cmd_shared_mem_map_regions *mmap_regions = NULL; struct avs_shared_map_region_payload *mregions = NULL; void *mmap_region_cmd = NULL; void *payload = NULL; int ret = 0; int i = 0; int cmd_size = 0; int index = 0; pr_debug("%s\n", __func__); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } port_id = q6audio_convert_virtual_to_portid(port_id); if (q6audio_validate_port(port_id) < 0) { pr_err("%s port id[%d] is invalid\n", __func__, port_id); return -ENODEV; } index = q6audio_get_port_index(port_id); cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions) + sizeof(struct avs_shared_map_region_payload) * bufcnt; mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (!mmap_region_cmd) { pr_err("%s: allocate mmap_region_cmd failed\n", __func__); return -ENOMEM; } mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd; mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mmap_regions->hdr.pkt_size = cmd_size; mmap_regions->hdr.src_port = 0; mmap_regions->hdr.dest_port = 0; mmap_regions->hdr.token = 0; mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS; mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff; mmap_regions->num_regions = bufcnt & 0x00ff; mmap_regions->property_flag = 0x00; pr_debug("%s: map_regions->num_regions = %d\n", __func__, mmap_regions->num_regions); payload = ((u8 *) mmap_region_cmd + sizeof(struct avs_cmd_shared_mem_map_regions)); mregions = (struct avs_shared_map_region_payload *)payload; for (i = 0; i < bufcnt; i++) { mregions->shm_addr_lsw = buf_add[i]; mregions->shm_addr_msw = 0x00; mregions->mem_size_bytes = bufsz[i]; ++mregions; } atomic_set(&this_adm.copp_stat[0], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd); if (ret < 0) { pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, mmap_regions->hdr.opcode, ret); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait[index], atomic_read(&this_adm.copp_stat[0]), 5 * HZ); if (!ret) { pr_err("%s: timeout. waited for memory_map\n", __func__); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(mmap_region_cmd); return ret; }
int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode, int topology, int perfmode) { struct adm_multi_ch_copp_open_command open; int ret = 0; int index; pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__, port_id, path, rate, channel_mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } /* Create a COPP if port id are not enabled */ if (atomic_read(&this_adm.copp_cnt[index]) == 0) { open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open.hdr.pkt_size = sizeof(struct adm_multi_ch_copp_open_command); if (perfmode) { pr_debug("%s Performance mode", __func__); open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3; open.flags = ADM_MULTI_CH_COPP_OPEN_PERF_MODE_BIT; open.reserved = PCM_BITS_PER_SAMPLE; } else { open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN; open.reserved = 0; } memset(open.dev_channel_mapping, 0, 8); if (channel_mode == 1) { open.dev_channel_mapping[0] = PCM_CHANNEL_FC; } else if (channel_mode == 2) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; } else if (channel_mode == 4) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_RB; open.dev_channel_mapping[3] = PCM_CHANNEL_LB; } else if (channel_mode == 6) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_LFE; open.dev_channel_mapping[3] = PCM_CHANNEL_FC; open.dev_channel_mapping[4] = PCM_CHANNEL_LB; open.dev_channel_mapping[5] = PCM_CHANNEL_RB; } else if (channel_mode == 8) { open.dev_channel_mapping[0] = PCM_CHANNEL_FL; open.dev_channel_mapping[1] = PCM_CHANNEL_FR; open.dev_channel_mapping[2] = PCM_CHANNEL_LFE; open.dev_channel_mapping[3] = PCM_CHANNEL_FC; open.dev_channel_mapping[4] = PCM_CHANNEL_LB; open.dev_channel_mapping[5] = PCM_CHANNEL_RB; open.dev_channel_mapping[6] = PCM_CHANNEL_FLC; open.dev_channel_mapping[7] = PCM_CHANNEL_FRC; } else { pr_err("%s invalid num_chan %d\n", __func__, channel_mode); return -EINVAL; } open.hdr.src_svc = APR_SVC_ADM; open.hdr.src_domain = APR_DOMAIN_APPS; open.hdr.src_port = port_id; open.hdr.dest_svc = APR_SVC_ADM; open.hdr.dest_domain = APR_DOMAIN_ADSP; open.hdr.dest_port = port_id; open.hdr.token = port_id; open.mode = path; open.endpoint_id1 = port_id; if (this_adm.ec_ref_rx == 0) { open.endpoint_id2 = 0xFFFF; } else if (this_adm.ec_ref_rx && (path != 1)) { open.endpoint_id2 = this_adm.ec_ref_rx; this_adm.ec_ref_rx = 0; } pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d", __func__, open.endpoint_id1, open.endpoint_id2); /* convert path to acdb path */ if (path == ADM_PATH_PLAYBACK) open.topology_id = get_adm_rx_topology(); else { open.topology_id = get_adm_tx_topology(); if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) || (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY)) rate = 16000; } if ((open.topology_id == 0) || (port_id == VOICE_RECORD_RX) || (port_id == VOICE_RECORD_TX)) open.topology_id = topology; open.channel_config = channel_mode & 0x00FF; open.rate = rate; pr_debug("%s: channel_config=%d port_id=%d rate=%d" " topology_id=0x%X\n", __func__, open.channel_config, open.endpoint_id1, open.rate, open.topology_id); atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; }
int adm_matrix_map(int session_id, int path, int num_copps, unsigned int *port_id, int copp_id) { struct adm_routings_command route; int ret = 0, i = 0; /* Assumes port_ids have already been validated during adm_open */ int index = afe_get_port_index(copp_id); int copp_cnt; if (index < 0 || index >= AFE_MAX_PORTS) { pr_err("%s: invalid port idx %d token %d\n", __func__, index, copp_id); return 0; } pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n", __func__, session_id, path, num_copps, port_id[0]); route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); route.hdr.pkt_size = sizeof(route); route.hdr.src_svc = 0; route.hdr.src_domain = APR_DOMAIN_APPS; route.hdr.src_port = copp_id; route.hdr.dest_svc = APR_SVC_ADM; route.hdr.dest_domain = APR_DOMAIN_ADSP; route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]); route.hdr.token = copp_id; route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS; route.num_sessions = 1; route.session[0].id = session_id; if (num_copps < ADM_MAX_COPPS) { copp_cnt = num_copps; } else { copp_cnt = ADM_MAX_COPPS; /* print out warning for now as playback/capture to/from * COPPs more than maximum allowed is extremely unlikely */ pr_warn("%s: max out routable COPPs\n", __func__); } route.session[0].num_copps = copp_cnt; for (i = 0; i < copp_cnt; i++) { int tmp; port_id[i] = afe_convert_virtual_to_portid(port_id[i]); tmp = afe_get_port_index(port_id[i]); pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i, port_id[i], tmp); if (tmp >= 0 && tmp < AFE_MAX_PORTS) route.session[0].copp_id[i] = atomic_read(&this_adm.copp_id[tmp]); } if (copp_cnt % 2) route.session[0].copp_id[i] = 0; switch (path) { case 0x1: route.path = AUDIO_RX; break; case 0x2: case 0x3: route.path = AUDIO_TX; break; default: pr_err("%s: Wrong path set[%d]\n", __func__, path); break; } atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route); if (ret < 0) { pr_err("%s: ADM routing for port %d failed\n", __func__, port_id[0]); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: ADM cmd Route failed for port %d\n", __func__, port_id[0]); ret = -EINVAL; goto fail_cmd; } for (i = 0; i < num_copps; i++) send_adm_cal(port_id[i], path); for (i = 0; i < num_copps; i++) { int tmp; tmp = afe_get_port_index(port_id[i]); if (tmp >= 0 && tmp < AFE_MAX_PORTS) rtac_add_adm_device(port_id[i], atomic_read(&this_adm.copp_id[tmp]), path, session_id); else pr_debug("%s: Invalid port index %d", __func__, tmp); } return 0; fail_cmd: return ret; }
int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate) { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_info("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) { pr_debug("%s: before incrementing pcm_afe_instance %d"\ " port_id %d\n", __func__, pcm_afe_instance[port_id & 0x1], port_id); port_id = VIRTUAL_ID_TO_PORTID(port_id); pcm_afe_instance[port_id & 0x1]++; return 0; } if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) { pr_debug("%s: before incrementing proxy_afe_instance %d"\ " port_id %d\n", __func__, proxy_afe_instance[port_id & 0x1], port_id); if (!afe_close_done[port_id & 0x1]) { /*close pcm dai corresponding to the proxy dai*/ afe_close(port_id - 0x10); pcm_afe_instance[port_id & 0x1]++; pr_debug("%s: reconfigure afe port again\n", __func__); } proxy_afe_instance[port_id & 0x1]++; afe_close_done[port_id & 0x1] = false; port_id = VIRTUAL_ID_TO_PORTID(port_id); } ret = afe_q6_interface_prepare(); if (IS_ERR_VALUE(ret)) return ret; if (port_id == HDMI_RX) { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG; } else { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; switch (port_id) { case SLIMBUS_0_RX: case SLIMBUS_0_TX: case SLIMBUS_1_RX: case SLIMBUS_1_TX: case SLIMBUS_2_RX: case SLIMBUS_2_TX: case SLIMBUS_3_RX: case SLIMBUS_3_TX: case SLIMBUS_4_RX: case SLIMBUS_4_TX: config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG; break; case MI2S_TX: case MI2S_RX: case SECONDARY_I2S_RX: case SECONDARY_I2S_TX: case PRIMARY_I2S_RX: case PRIMARY_I2S_TX: pr_debug("%s: afe_config->mi2s.format = %d\n", __func__, afe_config->mi2s.format); if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM) config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; else config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG; break; default: config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; break; } } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; atomic_set(&this_afe.state, 1); atomic_set(&this_afe.status, 0); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); HTC_Q6_BUG(); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout IF CONFIG\n", __func__); HTC_Q6_BUG(); ret = -EINVAL; goto fail_cmd; } if (atomic_read(&this_afe.status) != 0) { pr_err("%s: config cmd failed\n", __func__); ret = -EINVAL; goto fail_cmd; } afe_send_cal(port_id); start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (IS_ERR_VALUE(ret)) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(AFE_TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout PORT START\n", __func__); HTC_Q6_BUG(); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
u32 send_voice_apr(u32 mode, void *buf, u32 opcode) { s32 result; u32 user_buf_size = 0; u32 bytes_returned = 0; u32 payload_size; u32 dest_port; u32 data_size = 0; struct apr_hdr voice_params; pr_debug("%s\n", __func__); if (rtac_cal[VOICE_RTAC_CAL].map_data.ion_handle == NULL) { result = rtac_allocate_cal_buffer(VOICE_RTAC_CAL); if (result < 0) { pr_err("%s: allocate buffer failed!", __func__); goto done; } } if (rtac_cal[VOICE_RTAC_CAL].map_data.map_handle == 0) { result = rtac_map_cal_buffer(VOICE_RTAC_CAL); if (result < 0) { pr_err("%s: map buffer failed!", __func__); goto done; } } if (copy_from_user(&user_buf_size, (void *)buf, sizeof(user_buf_size))) { pr_err("%s: Copy from user failed! buf = 0x%x\n", __func__, (unsigned int)buf); goto done; } if (user_buf_size <= 0) { pr_err("%s: Invalid buffer size = %d\n", __func__, user_buf_size); goto done; } if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy payload size from user buffer\n", __func__); goto done; } if (copy_from_user(&dest_port, buf + 2 * sizeof(u32), sizeof(u32))) { pr_err("%s: Could not copy port id from user buffer\n", __func__); goto done; } if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) { pr_err("%s: Invalid Mode for APR, mode = %d\n", __func__, mode); goto done; } mutex_lock(&rtac_voice_apr_mutex); if (rtac_voice_apr_data[mode].apr_handle == NULL) { pr_err("%s: APR not initialized\n", __func__); goto err; } if (opcode == VOICE_CMD_SET_PARAM) { data_size = payload_size - 4 * sizeof(u32); if (data_size > rtac_cal[VOICE_RTAC_CAL].map_data.map_size) { pr_err("%s: Invalid data size = %d\n", __func__, data_size); goto done; } payload_size = 4 * sizeof(u32); if (copy_from_user((void *) rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr, buf + 7 * sizeof(u32), data_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } rtac_voice_buffer[8] = data_size; } else { if (payload_size > MAX_PAYLOAD_SIZE) { pr_err("%s: Invalid payload size = %d\n", __func__, payload_size); goto done; } if (copy_from_user(rtac_voice_buffer + sizeof(voice_params)/sizeof(u32), buf + 3 * sizeof(u32), payload_size)) { pr_err("%s: Could not copy payload from user buffer\n", __func__); goto err; } } voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(20), APR_PKT_VER); voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, payload_size); voice_params.src_svc = 0; voice_params.src_domain = APR_DOMAIN_APPS; voice_params.src_port = get_voice_index(mode, dest_port); voice_params.dest_svc = 0; voice_params.dest_domain = APR_DOMAIN_MODEM; voice_params.dest_port = (u16)dest_port; voice_params.token = 0; voice_params.opcode = opcode; rtac_voice_buffer[5] = rtac_cal[VOICE_RTAC_CAL].map_data.map_handle; rtac_voice_buffer[6] = rtac_cal[VOICE_RTAC_CAL].cal_data.paddr; rtac_voice_buffer[7] = 0; memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params)); atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1); pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%x\n", __func__, opcode, rtac_cal[VOICE_RTAC_CAL].cal_data.paddr); result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle, (uint32_t *)rtac_voice_buffer); if (result < 0) { pr_err("%s: apr_send_pkt failed opcode = %x\n", __func__, opcode); goto err; } result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait, (atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!result) { pr_err("%s: apr_send_pkt timed out opcode = %x\n", __func__, opcode); goto err; } if (atomic_read(&rtac_common.apr_err_code)) { pr_err("%s: DSP returned error code = %d, opcode = 0x%x\n", __func__, atomic_read(&rtac_common.apr_err_code), opcode); goto err; } if (opcode == VOICE_CMD_GET_PARAM) { bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data. kvaddr)[2] + 3 * sizeof(u32); if (bytes_returned > user_buf_size) { pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n", __func__, user_buf_size, bytes_returned); goto err; } if (copy_to_user(buf, (void *) rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr, bytes_returned)) { pr_err("%s: Could not copy buffer to user, size = %d\n", __func__, bytes_returned); goto err; } } else { bytes_returned = data_size; } err: mutex_unlock(&rtac_voice_apr_mutex); done: return bytes_returned; }
int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id, uint32_t *bufsz, uint32_t bufcnt) { struct adm_cmd_memory_map_regions *mmap_regions = NULL; struct adm_memory_map_regions *mregions = NULL; void *mmap_region_cmd = NULL; void *payload = NULL; int ret = 0; int i = 0; int cmd_size = 0; pr_debug("%s\n", __func__); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } cmd_size = sizeof(struct adm_cmd_memory_map_regions) + sizeof(struct adm_memory_map_regions) * bufcnt; mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (!mmap_region_cmd) { pr_err("%s: allocate mmap_region_cmd failed\n", __func__); return -ENOMEM; } mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd; mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); mmap_regions->hdr.pkt_size = cmd_size; mmap_regions->hdr.src_port = 0; mmap_regions->hdr.dest_port = 0; mmap_regions->hdr.token = 0; mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS; mmap_regions->mempool_id = mempool_id & 0x00ff; mmap_regions->nregions = bufcnt & 0x00ff; pr_debug("%s: map_regions->nregions = %d\n", __func__, mmap_regions->nregions); payload = ((u8 *) mmap_region_cmd + sizeof(struct adm_cmd_memory_map_regions)); mregions = (struct adm_memory_map_regions *)payload; for (i = 0; i < bufcnt; i++) { mregions->phys = buf_add[i]; mregions->buf_size = bufsz[i]; ++mregions; } atomic_set(&this_adm.copp_stat[0], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd); if (ret < 0) { pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, mmap_regions->hdr.opcode, ret); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[0]), 5 * HZ); if (!ret) { pr_err("%s: timeout. waited for memory_map\n", __func__); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(mmap_region_cmd); return ret; }
int afe_close(int port_id) { struct afe_port_stop_command stop; int ret = 0; if (this_afe.apr == NULL) { pr_err("AFE is already closed\n"); ret = -EINVAL; goto fail_cmd; } pr_debug("%s: port_id=%d\n", __func__, port_id); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) { pr_debug("%s: before decrementing pcm_afe_instance %d\n", __func__, pcm_afe_instance[port_id & 0x1]); port_id = VIRTUAL_ID_TO_PORTID(port_id); pcm_afe_instance[port_id & 0x1]--; if (!(pcm_afe_instance[port_id & 0x1] == 0 && proxy_afe_instance[port_id & 0x1] == 0)) return 0; else afe_close_done[port_id & 0x1] = true; } if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) { pr_debug("%s: before decrementing proxy_afe_instance %d\n", __func__, proxy_afe_instance[port_id & 0x1]); port_id = VIRTUAL_ID_TO_PORTID(port_id); proxy_afe_instance[port_id & 0x1]--; if (!(pcm_afe_instance[port_id & 0x1] == 0 && proxy_afe_instance[port_id & 0x1] == 0)) return 0; else afe_close_done[port_id & 0x1] = true; } port_id = afe_convert_virtual_to_portid(port_id); stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); stop.hdr.pkt_size = sizeof(stop); stop.hdr.src_port = 0; stop.hdr.dest_port = 0; stop.hdr.token = 0; stop.hdr.opcode = AFE_PORT_CMD_STOP; stop.port_id = port_id; stop.reserved = 0; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop); if (ret == -ENETRESET) { pr_info("%s: Need to reset, calling APR deregister", __func__); return apr_deregister(this_afe.apr); } if (ret < 0) { pr_err("%s: AFE close failed\n", __func__); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } fail_cmd: return ret; }
int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz, uint32_t bufcnt) { struct adm_cmd_memory_unmap_regions *unmap_regions = NULL; struct adm_memory_unmap_regions *mregions = NULL; void *unmap_region_cmd = NULL; void *payload = NULL; int ret = 0; int i = 0; int cmd_size = 0; pr_debug("%s\n", __func__); if (this_adm.apr == NULL) { pr_err("%s APR handle NULL\n", __func__); return -EINVAL; } cmd_size = sizeof(struct adm_cmd_memory_unmap_regions) + sizeof(struct adm_memory_unmap_regions) * bufcnt; unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL); if (!unmap_region_cmd) { pr_err("%s: allocate unmap_region_cmd failed\n", __func__); return -ENOMEM; } unmap_regions = (struct adm_cmd_memory_unmap_regions *) unmap_region_cmd; unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); unmap_regions->hdr.pkt_size = cmd_size; unmap_regions->hdr.src_port = 0; unmap_regions->hdr.dest_port = 0; unmap_regions->hdr.token = 0; unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS; unmap_regions->nregions = bufcnt & 0x00ff; unmap_regions->reserved = 0; pr_debug("%s: unmap_regions->nregions = %d\n", __func__, unmap_regions->nregions); payload = ((u8 *) unmap_region_cmd + sizeof(struct adm_cmd_memory_unmap_regions)); mregions = (struct adm_memory_unmap_regions *)payload; for (i = 0; i < bufcnt; i++) { mregions->phys = buf_add[i]; ++mregions; } atomic_set(&this_adm.copp_stat[0], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd); if (ret < 0) { pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__, unmap_regions->hdr.opcode, ret); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[0]), 5 * HZ); if (!ret) { pr_err("%s: timeout. waited for memory_unmap\n", __func__); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(unmap_region_cmd); return ret; }
/* This function sends multi-channel HDMI configuration command and AFE * calibration which is only supported by QDSP6 on 8960 and onward. */ int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate) { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret; // [email protected] : Please refer to QCT case#01306695 regarding AFE recovery #ifdef CONFIG_LGE_AFE_RECOVERY int count = 0; #endif // CONFIG_LGE_AFE_RECOVERY if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_debug("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) { pr_debug("%s: before incrementing pcm_afe_instance %d"\ " port_id %d\n", __func__, pcm_afe_instance[port_id & 0x1], port_id); port_id = VIRTUAL_ID_TO_PORTID(port_id); pcm_afe_instance[port_id & 0x1]++; return 0; } if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) { pr_debug("%s: before incrementing proxy_afe_instance %d"\ " port_id %d\n", __func__, proxy_afe_instance[port_id & 0x1], port_id); if (!afe_close_done[port_id & 0x1]) { /*close pcm dai corresponding to the proxy dai*/ afe_close(port_id - 0x10); pcm_afe_instance[port_id & 0x1]++; pr_debug("%s: reconfigure afe port again\n", __func__); } proxy_afe_instance[port_id & 0x1]++; afe_close_done[port_id & 0x1] = false; port_id = VIRTUAL_ID_TO_PORTID(port_id); } ret = afe_q6_interface_prepare(); if (IS_ERR_VALUE(ret)) return ret; if (port_id == HDMI_RX) { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG; } else { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; switch (port_id) { case SLIMBUS_0_RX: case SLIMBUS_0_TX: case SLIMBUS_1_RX: case SLIMBUS_1_TX: case SLIMBUS_2_RX: case SLIMBUS_2_TX: case SLIMBUS_3_RX: case SLIMBUS_3_TX: case SLIMBUS_4_RX: case SLIMBUS_4_TX: config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG; break; case MI2S_TX: case MI2S_RX: case SECONDARY_I2S_RX: case SECONDARY_I2S_TX: case PRIMARY_I2S_RX: case PRIMARY_I2S_TX: /* AFE_PORT_CMD_I2S_CONFIG command is not supported * in the LPASS EL 1.0. So we have to distiguish * which AFE command, AFE_PORT_CMD_I2S_CONFIG or * AFE_PORT_AUDIO_IF_CONFIG to use. If the format * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used * to make the backward compatible. */ pr_debug("%s: afe_config->mi2s.format = %d\n", __func__, afe_config->mi2s.format); if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM) config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; else config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG; break; default: config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; break; } } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; // [email protected] : Please refer to QCT case#01306695 regarding AFE recovery #ifdef CONFIG_LGE_AFE_RECOVERY send_cfg_cmd: #endif // CONFIG_LGE_AFE_RECOVERY atomic_set(&this_afe.state, 1); atomic_set(&this_afe.status, 0); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout IF CONFIG\n", __func__); ret = -EINVAL; goto fail_cmd; } if (atomic_read(&this_afe.status) != 0) { pr_err("%s: config cmd failed\n", __func__); // [email protected] : Please refer to QCT case#01306695 regarding AFE recovery #ifdef CONFIG_LGE_AFE_RECOVERY if (count < 2) { afe_close(port_id); count++; goto send_cfg_cmd; } #endif // CONFIG_LGE_AFE_RECOVERY ret = -EINVAL; goto fail_cmd; } /* send AFE cal */ afe_send_cal(port_id); afe_send_hw_delay(port_id, rate); start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (IS_ERR_VALUE(ret)) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout PORT START\n", __func__); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
int adm_close(int port_id) { struct apr_hdr close; int ret = 0; int index = 0; port_id = afe_convert_virtual_to_portid(port_id); index = afe_get_port_index(port_id); if (afe_validate_port(port_id) < 0) return -EINVAL; pr_debug("%s port_id=%d index %d\n", __func__, port_id, index); if (!(atomic_read(&this_adm.copp_cnt[index]))) { pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id); goto fail_cmd; } atomic_dec(&this_adm.copp_cnt[index]); if (!(atomic_read(&this_adm.copp_cnt[index]))) { close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); close.pkt_size = sizeof(close); close.src_svc = APR_SVC_ADM; close.src_domain = APR_DOMAIN_APPS; close.src_port = port_id; close.dest_svc = APR_SVC_ADM; close.dest_domain = APR_DOMAIN_ADSP; close.dest_port = atomic_read(&this_adm.copp_id[index]); close.token = port_id; close.opcode = ADM_CMD_COPP_CLOSE; atomic_set(&this_adm.copp_id[index], RESET_COPP_ID); atomic_set(&this_adm.copp_stat[index], 0); this_adm.prev_index = 0xffff; pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n", __func__, atomic_read(&this_adm.copp_id[index]), port_id, index, atomic_read(&this_adm.copp_cnt[index])); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close); if (ret < 0) { pr_err("%s ADM close failed\n", __func__); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: ADM cmd Route failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } rtac_remove_adm_device(port_id); } fail_cmd: return ret; }
int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config, u32 rate) /* This function is no blocking */ { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_debug("%s: %d %d\n", __func__, port_id, rate); if ((port_id == RT_PROXY_DAI_001_RX) || (port_id == RT_PROXY_DAI_002_TX)) return -EINVAL; if ((port_id == RT_PROXY_DAI_002_RX) || (port_id == RT_PROXY_DAI_001_TX)) port_id = VIRTUAL_ID_TO_PORTID(port_id); if (this_afe.apr == NULL) { pr_err("%s: AFE APR is not registered\n", __func__); ret = -ENODEV; return ret; } if (port_id == HDMI_RX) { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG; } else { config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* send AFE cal */ afe_send_cal(port_id); start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (IS_ERR_VALUE(ret)) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params) { struct asm_pp_params_command *open = NULL; int ret = 0, sz = 0; int index; pr_debug("SRS - %s", __func__); index = afe_get_port_index(port_id); if (IS_ERR_VALUE(index)) { pr_err("%s: invald port id\n", __func__); return index; } switch (srs_tech_id) { case SRS_ID_GLOBAL: { struct srs_trumedia_params_GLOBAL *glb_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_GLOBAL); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_GLOBAL) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS; open->params.param_size = sizeof(struct srs_trumedia_params_GLOBAL); glb_params = (struct srs_trumedia_params_GLOBAL *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(glb_params, srs_params, sizeof(struct srs_trumedia_params_GLOBAL)); pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x," " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n", __func__, (int)glb_params->v1, (int)glb_params->v2, (int)glb_params->v3, (int)glb_params->v4, (int)glb_params->v5, (int)glb_params->v6, (int)glb_params->v7, (int)glb_params->v8); break; } case SRS_ID_WOWHD: { struct srs_trumedia_params_WOWHD *whd_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_WOWHD); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_WOWHD) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD; open->params.param_size = sizeof(struct srs_trumedia_params_WOWHD); whd_params = (struct srs_trumedia_params_WOWHD *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(whd_params, srs_params, sizeof(struct srs_trumedia_params_WOWHD)); pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x," " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x," " 10 = %x, 11 = %x\n", __func__, (int)whd_params->v1, (int)whd_params->v2, (int)whd_params->v3, (int)whd_params->v4, (int)whd_params->v5, (int)whd_params->v6, (int)whd_params->v7, (int)whd_params->v8, (int)whd_params->v9, (int)whd_params->v10, (int)whd_params->v11); break; } case SRS_ID_CSHP: { struct srs_trumedia_params_CSHP *chp_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_CSHP); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_CSHP) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP; open->params.param_size = sizeof(struct srs_trumedia_params_CSHP); chp_params = (struct srs_trumedia_params_CSHP *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(chp_params, srs_params, sizeof(struct srs_trumedia_params_CSHP)); pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x," " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x," " 9 = %x\n", __func__, (int)chp_params->v1, (int)chp_params->v2, (int)chp_params->v3, (int)chp_params->v4, (int)chp_params->v5, (int)chp_params->v6, (int)chp_params->v7, (int)chp_params->v8, (int)chp_params->v9); break; } case SRS_ID_HPF: { struct srs_trumedia_params_HPF *hpf_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_HPF); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_HPF) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_HPF; open->params.param_size = sizeof(struct srs_trumedia_params_HPF); hpf_params = (struct srs_trumedia_params_HPF *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(hpf_params, srs_params, sizeof(struct srs_trumedia_params_HPF)); pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__, (int)hpf_params->v1); break; } case SRS_ID_PEQ: { struct srs_trumedia_params_PEQ *peq_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_PEQ); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_PEQ) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ; open->params.param_size = sizeof(struct srs_trumedia_params_PEQ); peq_params = (struct srs_trumedia_params_PEQ *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(peq_params, srs_params, sizeof(struct srs_trumedia_params_PEQ)); pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x," " 4 = %x\n", __func__, (int)peq_params->v1, (int)peq_params->v2, (int)peq_params->v3, (int)peq_params->v4); break; } case SRS_ID_HL: { struct srs_trumedia_params_HL *hl_params = NULL; sz = sizeof(struct asm_pp_params_command) + sizeof(struct srs_trumedia_params_HL); open = kzalloc(sz, GFP_KERNEL); open->payload_size = sizeof(struct srs_trumedia_params_HL) + sizeof(struct asm_pp_param_data_hdr); open->params.param_id = SRS_TRUMEDIA_PARAMS_HL; open->params.param_size = sizeof(struct srs_trumedia_params_HL); hl_params = (struct srs_trumedia_params_HL *)((u8 *)open + sizeof(struct asm_pp_params_command)); memcpy(hl_params, srs_params, sizeof(struct srs_trumedia_params_HL)); pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x," " 5 = %x, 6 = %x, 7 = %x\n", __func__, (int)hl_params->v1, (int)hl_params->v2, (int)hl_params->v3, (int)hl_params->v4, (int)hl_params->v5, (int)hl_params->v6, (int)hl_params->v7); break; } default: goto fail_cmd; } open->payload = NULL; open->params.module_id = SRS_TRUMEDIA_MODULE_ID; open->params.reserved = 0; open->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open->hdr.pkt_size = sz; open->hdr.src_svc = APR_SVC_ADM; open->hdr.src_domain = APR_DOMAIN_APPS; open->hdr.src_port = port_id; open->hdr.dest_svc = APR_SVC_ADM; open->hdr.dest_domain = APR_DOMAIN_ADSP; open->hdr.dest_port = atomic_read(&this_adm.copp_id[index]); open->hdr.token = port_id; open->hdr.opcode = ADM_CMD_SET_PARAMS; pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d," " size %d, module id %x, param id %x.\n", __func__, open->hdr.dest_port, open->payload_size, open->params.module_id, open->params.param_id); ret = apr_send_pkt(this_adm.apr, (uint32_t *)open); if (ret < 0) { pr_err("SRS - %s: ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, 1, msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("SRS - %s: ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } fail_cmd: kfree(open); return ret; }
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate) { struct afe_port_start_command start; struct afe_audioif_config_command config; int ret = 0; if (!afe_config) { pr_err("%s: Error, no configuration data\n", __func__); ret = -EINVAL; return ret; } pr_info("%s: %d %d\n", __func__, port_id, rate); if (this_afe.apr == NULL) { this_afe.apr = apr_register("ADSP", "AFE", afe_callback, 0xFFFFFFFF, &this_afe); pr_info("%s: Register AFE\n", __func__); if (this_afe.apr == NULL) { pr_err("%s: Unable to register AFE\n", __func__); ret = -ENODEV; return ret; } } config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); config.hdr.pkt_size = sizeof(config); config.hdr.src_port = 0; config.hdr.dest_port = 0; config.hdr.token = 0; config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG; if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } config.port_id = port_id; config.port = *afe_config; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); start.hdr.pkt_size = sizeof(start); start.hdr.src_port = 0; start.hdr.dest_port = 0; start.hdr.token = 0; start.hdr.opcode = AFE_PORT_CMD_START; start.port_id = port_id; start.gain = 0x2000; start.sample_rate = rate; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start); if (ret < 0) { pr_err("%s: AFE enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } if (this_afe.task != current) this_afe.task = current; pr_debug("task_name = %s pid = %d\n", this_afe.task->comm, this_afe.task->pid); return 0; fail_cmd: return ret; }
int adm_disconnect_afe_port(int mode, int session_id, int port_id) { struct adm_cmd_connect_afe_port cmd; int ret = 0; int index; pr_debug("%s: port %d session id:%d mode:%d\n", __func__, port_id, session_id, mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); cmd.hdr.pkt_size = sizeof(cmd); cmd.hdr.src_svc = APR_SVC_ADM; cmd.hdr.src_domain = APR_DOMAIN_APPS; cmd.hdr.src_port = port_id; cmd.hdr.dest_svc = APR_SVC_ADM; cmd.hdr.dest_domain = APR_DOMAIN_ADSP; cmd.hdr.dest_port = port_id; cmd.hdr.token = port_id; cmd.hdr.opcode = ADM_CMD_DISCONNECT_AFE_PORT; cmd.mode = mode; cmd.session_id = session_id; cmd.afe_port_id = port_id; atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM connect AFE failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } atomic_dec(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; }
//Qualcomm CR Add int afe_apply_gain(u16 port_id, u16 gain) { struct afe_port_gain_command set_gain; int ret = 0; if (this_afe.apr == NULL) { pr_err("%s: AFE is not opened\n", __func__); ret = -EPERM; goto fail_cmd; } if (afe_validate_port(port_id) < 0) { pr_err("%s: Failed : Invalid Port id = %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* RX ports numbers are even .TX ports numbers are odd. */ if (port_id % 2 == 0) { pr_err("%s: Failed : afe apply gain only for TX ports." " port_id %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } pr_debug("%s: %d %hX\n", __func__, port_id, gain); set_gain.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); set_gain.hdr.pkt_size = sizeof(set_gain); set_gain.hdr.src_port = 0; set_gain.hdr.dest_port = 0; set_gain.hdr.token = 0; set_gain.hdr.opcode = AFE_PORT_CMD_APPLY_GAIN; set_gain.port_id = port_id; set_gain.gain = gain; atomic_set(&this_afe.state, 1); ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_gain); if (ret < 0) { pr_err("%s: AFE Gain set failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } ret = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (ret < 0) { pr_err("%s: wait_event timeout\n", __func__); ret = -EINVAL; goto fail_cmd; } return 0; fail_cmd: return ret; }
int adm_open(int port_id, int path, int rate, int channel_mode, int topology) { struct adm_copp_open_command open; int ret = 0; int index; pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__, port_id, path, rate, channel_mode); port_id = afe_convert_virtual_to_portid(port_id); if (afe_validate_port(port_id) < 0) { pr_err("%s port idi[%d] is invalid\n", __func__, port_id); return -ENODEV; } index = afe_get_port_index(port_id); pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index); if (this_adm.apr == NULL) { this_adm.apr = apr_register("ADSP", "ADM", adm_callback, 0xFFFFFFFF, &this_adm); if (this_adm.apr == NULL) { pr_err("%s: Unable to register ADM\n", __func__); ret = -ENODEV; return ret; } rtac_set_adm_handle(this_adm.apr); } /* Create a COPP if port id are not enabled */ if (atomic_read(&this_adm.copp_cnt[index]) == 0) { open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); open.hdr.pkt_size = sizeof(open); open.hdr.src_svc = APR_SVC_ADM; open.hdr.src_domain = APR_DOMAIN_APPS; open.hdr.src_port = port_id; open.hdr.dest_svc = APR_SVC_ADM; open.hdr.dest_domain = APR_DOMAIN_ADSP; open.hdr.dest_port = port_id; open.hdr.token = port_id; open.hdr.opcode = ADM_CMD_COPP_OPEN; open.mode = path; open.endpoint_id1 = port_id; if (this_adm.ec_ref_rx == 0) { #if defined(CONFIG_MACH_SERRANO) || defined(CONFIG_MACH_GOLDEN) \ || defined(CONFIG_MACH_MELIUS_ATT) || defined(CONFIG_MACH_MELIUS_TMO) \ || defined(CONFIG_MACH_MELIUS_VZW) || defined(CONFIG_MACH_MELIUS_SPR) \ || defined(CONFIG_MACH_MELIUS_USC) || defined(CONFIG_MACH_MELIUS_EUR_OPEN) \ || defined(CONFIG_MACH_MELIUS_EUR_LTE) || defined(CONFIG_MACH_MELIUS_SKT) \ || defined(CONFIG_MACH_MELIUS_KTT) || defined(CONFIG_MACH_MELIUS_LGT) \ || defined(CONFIG_MACH_MELIUS_CHN_CTC) open.endpoint_id2 = this_adm.ec_ref_rx; #else open.endpoint_id2 = 0xFFFF; #endif } else if (this_adm.ec_ref_rx && (path != 1)) { open.endpoint_id2 = this_adm.ec_ref_rx; this_adm.ec_ref_rx = 0; } pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d", __func__, open.endpoint_id1, open.endpoint_id2); /* convert path to acdb path */ if (path == ADM_PATH_PLAYBACK) open.topology_id = get_adm_rx_topology(); else { open.topology_id = get_adm_tx_topology(); if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) || (open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY)) rate = 16000; } if ((open.topology_id == 0) || (port_id == VOICE_RECORD_RX) || (port_id == VOICE_RECORD_TX)) open.topology_id = topology; open.channel_config = channel_mode & 0x00FF; open.rate = rate; pr_debug("%s: channel_config=%d port_id=%d rate=%d" "topology_id=0x%X\n", __func__, open.channel_config,\ open.endpoint_id1, open.rate,\ open.topology_id); atomic_set(&this_adm.copp_stat[index], 0); ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open); if (ret < 0) { pr_err("%s:ADM enable for port %d failed\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } /* Wait for the callback with copp id */ ret = wait_event_timeout(this_adm.wait, atomic_read(&this_adm.copp_stat[index]), msecs_to_jiffies(TIMEOUT_MS)); if (!ret) { pr_err("%s ADM open failed for port %d\n", __func__, port_id); ret = -EINVAL; goto fail_cmd; } } atomic_inc(&this_adm.copp_cnt[index]); return 0; fail_cmd: return ret; }