static void afe_send_cal_block(int32_t path, u16 port_id) { int result = 0; struct acdb_cal_block cal_block; struct afe_port_cmd_set_param_no_payload afe_cal; pr_debug("%s: path %d\n", __func__, path); get_afe_cal(path, &cal_block); if (cal_block.cal_size <= 0) { pr_debug("%s: No AFE cal to send!\n", __func__); goto done; } if ((afe_cal_addr[path].cal_paddr != cal_block.cal_paddr) || (cal_block.cal_size > afe_cal_addr[path].cal_size)) { if (afe_cal_addr[path].cal_paddr != 0) afe_cmd_memory_unmap( afe_cal_addr[path].cal_paddr); afe_cmd_memory_map(cal_block.cal_paddr, cal_block.cal_size); afe_cal_addr[path].cal_paddr = cal_block.cal_paddr; afe_cal_addr[path].cal_size = cal_block.cal_size; } afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); afe_cal.hdr.pkt_size = sizeof(afe_cal); afe_cal.hdr.src_port = 0; afe_cal.hdr.dest_port = 0; afe_cal.hdr.token = 0; afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM; afe_cal.port_id = port_id; afe_cal.payload_size = cal_block.cal_size; afe_cal.payload_address = cal_block.cal_paddr; pr_debug("%s: AFE cal sent for device port = %d, path = %d, " "cal size = %d, cal addr = 0x%x\n", __func__, port_id, path, cal_block.cal_size, cal_block.cal_paddr); atomic_set(&this_afe.state, 1); result = apr_send_pkt(this_afe.apr, (uint32_t *) &afe_cal); if (result < 0) { pr_err("%s: AFE cal for port %d failed\n", __func__, port_id); } result = wait_event_timeout(this_afe.wait, (atomic_read(&this_afe.state) == 0), msecs_to_jiffies(TIMEOUT_MS)); if (!result) { pr_err("%s: wait_event timeout SET AFE CAL\n", __func__); HTC_Q6_BUG(); goto done; } pr_debug("%s: AFE cal sent for path %d device!\n", __func__, path); done: return; }
static int config(struct pcm *pcm) { int ret = 0, i; struct audio_buffer *buf; pr_debug("%s\n", __func__); ret = q6asm_audio_client_buf_alloc_contiguous(OUT, pcm->ac, pcm->buffer_size, pcm->buffer_count); if (ret < 0) { pr_err("Audio Start: Buffer Allocation failed rc = %d\n", ret); return -ENOMEM; } buf = pcm->ac->port[OUT].buf; if (buf == NULL || buf[0].data == NULL) return -ENOMEM; memset(buf[0].data, 0, pcm->buffer_size * pcm->buffer_count); pcm->dma_addr = (u32) buf[0].phys; pcm->dma_virt = (u32) buf[0].data; for (i = 0; i < pcm->buffer_count; i++) { pcm->dma_buf[i].addr = (u32) (buf[i].phys); pcm->dma_buf[i].v_addr = (u32) (buf[i].data); pcm->dma_buf[i].used = 0; } ret = afe_register_get_events(RT_PROXY_DAI_001_TX, pcm_afe_callback, pcm); if (ret < 0) { pr_err("afe-pcm:register for events failed\n"); return ret; } ret = afe_cmd_memory_map(pcm->dma_addr, pcm->buffer_size * pcm->buffer_count); if (ret < 0) { pr_err("fail to map memory to DSP\n"); return ret; } pr_debug("%s:success\n", __func__); return ret; }
static int msm_afe_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dma_buffer *dma_buf = &substream->dma_buffer; struct pcm_afe_info *prtd = runtime->private_data; int rc; pr_debug("%s:\n", __func__); mutex_lock(&prtd->lock); dma_buf->dev.type = SNDRV_DMA_TYPE_DEV; dma_buf->dev.dev = substream->pcm->card->dev; dma_buf->private_data = NULL; dma_buf->area = dma_alloc_coherent(dma_buf->dev.dev, runtime->hw.buffer_bytes_max, &dma_buf->addr, GFP_KERNEL); pr_debug("%s: dma_buf->area: 0x%p, dma_buf->addr: 0x%x", __func__, (unsigned int *) dma_buf->area, dma_buf->addr); if (!dma_buf->area) { pr_err("%s:MSM AFE memory allocation failed\n", __func__); mutex_unlock(&prtd->lock); return -ENOMEM; } dma_buf->bytes = runtime->hw.buffer_bytes_max; memset(dma_buf->area, 0, runtime->hw.buffer_bytes_max); prtd->dma_addr = (u32) dma_buf->addr; mutex_unlock(&prtd->lock); snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); rc = afe_cmd_memory_map(dma_buf->addr, dma_buf->bytes); if (rc < 0) pr_err("fail to map memory to DSP\n"); return rc; }