示例#1
0
文件: rtac.c 项目: Ca1ne/Enoch316
u32 send_rtac_asm_apr(void *buf, u32 opcode)
{
	s32	result;
	u32	count = 0;
	u32	bytes_returned = 0;
	u32	session_id = 0;
	u32	payload_size;
	struct apr_hdr	asm_params;
	pr_debug("%s\n", __func__);

	if (copy_from_user(&count, (void *)buf, sizeof(count))) {
		pr_err("%s: Copy to user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		result = -EFAULT;
		goto done;
	}

	if (count <= 0) {
		pr_err("%s: Invalid buffer size = %d\n", __func__, count);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (payload_size > MAX_PAYLOAD_SIZE) {
		pr_err("%s: Invalid payload size = %d\n",
			__func__, payload_size);
		goto done;
	}

	if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy session id from user buffer\n",
			__func__);
		goto done;
	}
	if (session_id > (SESSION_MAX + 1)) {
		pr_err("%s: Invalid Session = %d\n", __func__, session_id);
		goto done;
	}

	mutex_lock(&rtac_asm_apr_mutex);
	if (session_id < SESSION_MAX+1) {
		if (rtac_asm_apr_data[session_id].apr_handle == NULL) {
			pr_err("%s: APR not initialized\n", __func__);
			goto err;
		}
	}

	
	rtac_asm_user_buf_size = count;

	
	if (copy_from_user(rtac_asm_buffer + sizeof(asm_params),
		buf + 3 * sizeof(u32), payload_size)) {
		pr_err("%s: Could not copy payload from user buffer\n",
			__func__);
		goto err;
	}

	
	asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	asm_params.src_svc = q6asm_get_apr_service_id(session_id);
	asm_params.src_domain = APR_DOMAIN_APPS;
	asm_params.src_port = (session_id << 8) | 0x0001;
	asm_params.dest_svc = APR_SVC_ASM;
	asm_params.dest_domain = APR_DOMAIN_ADSP;
	asm_params.dest_port = (session_id << 8) | 0x0001;
	asm_params.token = session_id;
	asm_params.opcode = opcode;

	memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params));
	if (session_id < SESSION_MAX+1)
		atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1);

	pr_debug("%s: Sending RTAC command size = %d, session_id=%d\n",
		__func__, asm_params.pkt_size, session_id);

	result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle,
				(uint32_t *)rtac_asm_buffer);
	if (result < 0) {
		pr_err("%s: Set params failed session = %d\n",
			__func__, session_id);
		goto err;
	}

	
	result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait,
		(atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0),
		5 * HZ);
	mutex_unlock(&rtac_asm_apr_mutex);
	if (!result) {
		pr_err("%s: Set params timed out session = %d\n",
			__func__, session_id);
		goto done;
	}

	if (rtac_asm_payload_size != 0) {
		if (copy_to_user(buf, rtac_asm_buffer,
			rtac_asm_payload_size + sizeof(u32))) {
			pr_err("%s: Could not copy buffer to user,size = %d\n",
				 __func__, payload_size);
			goto done;
		}
	}

	
	if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS)
		bytes_returned = rtac_asm_payload_size;
	else
		bytes_returned = payload_size;
done:
	return bytes_returned;
err:
	mutex_unlock(&rtac_asm_apr_mutex);
	return bytes_returned;
}
示例#2
0
int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
{
	uint32_t int_format = INVALID_FORMAT;
	struct usm_stream_cmd_encdec_cfg_blk  enc_cfg_obj;
	struct usm_stream_cmd_encdec_cfg_blk  *enc_cfg = &enc_cfg_obj;
	int rc = 0;
	uint32_t total_cfg_size =
		sizeof(struct usm_stream_cmd_encdec_cfg_blk);
	uint32_t round_params_size = 0;
	uint8_t  is_allocated = 0;


	if ((usc == NULL) || (us_cfg == NULL)) {
		pr_err("%s: wrong input", __func__);
		return -EINVAL;
	}

	int_format = q6usm_ext2int_format(us_cfg->format_id);
	if (int_format == INVALID_FORMAT) {
		pr_err("%s: wrong input format[%d]",
		       __func__, us_cfg->format_id);
		return -EINVAL;
	}

	
	
	round_params_size = ((us_cfg->params_size + 3)/4) * 4;
	if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
		
		
		round_params_size -= USM_MAX_CFG_DATA_SIZE;
		total_cfg_size += round_params_size;
		enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
		if (enc_cfg == NULL) {
			pr_err("%s: enc_cfg[%d] allocation failed\n",
			       __func__, total_cfg_size);
			return -ENOMEM;
		}
		is_allocated = 1;
	} else
		round_params_size = 0;

	q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size - APR_HDR_SIZE, true);

	enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM;
	enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK;
	enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+
				round_params_size;
	enc_cfg->enc_blk.frames_per_buf = 1;
	enc_cfg->enc_blk.format_id = int_format;
	enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+
				    USM_MAX_CFG_DATA_SIZE +
				    round_params_size;
	memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common),
	       sizeof(struct usm_cfg_common));

	
	memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params,
	       us_cfg->params_size);
	pr_debug("%s: cfg_size[%d], params_size[%d]\n",
		__func__,
		enc_cfg->enc_blk.cfg_size,
		us_cfg->params_size);
	pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n",
		__func__,
		enc_cfg->enc_blk.transp_data[0],
		enc_cfg->enc_blk.transp_data[1],
		enc_cfg->enc_blk.transp_data[2],
		enc_cfg->enc_blk.transp_data[3],
		enc_cfg->enc_blk.transp_data[4],
		enc_cfg->enc_blk.transp_data[5],
		enc_cfg->enc_blk.transp_data[6],
		enc_cfg->enc_blk.transp_data[7]
	       );
	pr_debug("%s: srate:%d, ch=%d, bps= %d;\n",
		__func__, enc_cfg->enc_blk.cfg_common.sample_rate,
		enc_cfg->enc_blk.cfg_common.ch_cfg,
		enc_cfg->enc_blk.cfg_common.bits_per_sample);
	pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n",
		enc_cfg->enc_blk.cfg_common.data_map[0],
		enc_cfg->enc_blk.cfg_common.data_map[1],
		enc_cfg->enc_blk.cfg_common.data_map[2],
		enc_cfg->enc_blk.cfg_common.data_map[3],
		enc_cfg->enc_blk.cfg_common.data_map[4],
		enc_cfg->enc_blk.cfg_common.data_map[5],
		enc_cfg->enc_blk.cfg_common.data_map[6],
		enc_cfg->enc_blk.cfg_common.data_map[7],
		enc_cfg->enc_blk.cfg_common.dev_id);

	rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg);
	if (rc < 0) {
		pr_err("%s:Comamnd open failed\n", __func__);
		rc = -EINVAL;
		goto fail_cmd;
	}
	rc = wait_event_timeout(usc->cmd_wait,
				(atomic_read(&usc->cmd_state) == 0),
				Q6USM_TIMEOUT_JIFFIES);
	if (!rc) {
		rc = -ETIME;
		pr_err("%s: timeout opcode[0x%x]\n",
		       __func__, enc_cfg->hdr.opcode);
	} else
		rc = 0;

fail_cmd:
	if (is_allocated == 1)
		kfree(enc_cfg);

	return rc;
}
示例#3
0
u32 send_rtac_asm_apr(void *buf, u32 opcode)
{
	s32	result;
	u32	user_buf_size = 0;
	u32	bytes_returned = 0;
	u32	session_id = 0;
	u32	payload_size;
	u32	data_size = 0;
	struct apr_hdr		asm_params;
	pr_debug("%s\n", __func__);

	if (rtac_cal[ASM_RTAC_CAL].map_data.ion_handle == NULL) {
		result = rtac_allocate_cal_buffer(ASM_RTAC_CAL);
		if (result < 0) {
			pr_err("%s: allocate buffer failed!",
				__func__);
			goto done;
		}
	}

	if (rtac_cal[ASM_RTAC_CAL].map_data.map_handle == 0) {
		result = rtac_map_cal_buffer(ASM_RTAC_CAL);
		if (result < 0) {
			pr_err("%s: map buffer failed!",
				__func__);
			goto done;
		}
	}

	if (copy_from_user(&user_buf_size, (void *)buf,
						sizeof(user_buf_size))) {
		pr_err("%s: Copy from user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		goto done;
	}
	if (user_buf_size <= 0) {
		pr_err("%s: Invalid buffer size = %d\n",
			__func__, user_buf_size);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy session id from user buffer\n",
			__func__);
		goto done;
	}
	if (session_id >= (SESSION_MAX + 1)) {
		pr_err("%s: Invalid Session = %d\n", __func__, session_id);
		goto done;
	}

	mutex_lock(&rtac_asm_apr_mutex);
	if (rtac_asm_apr_data[session_id].apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	if (opcode == ASM_STREAM_CMD_SET_PP_PARAMS_V2) {
		/* set payload size to in-band payload */
		/* set data size to actual out of band payload size */
		data_size = payload_size - 4 * sizeof(u32);
		if (data_size > rtac_cal[ASM_RTAC_CAL].map_data.map_size) {
			pr_err("%s: Invalid data size = %d\n",
				__func__, data_size);
			goto done;
		}
		payload_size = 4 * sizeof(u32);

		/* Copy buffer to out-of-band payload */
		if (copy_from_user((void *)
				rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr,
				buf + 7 * sizeof(u32), data_size)) {
			pr_err("%s: Could not copy payload from user buffer\n",
				__func__);
			goto err;
		}
		/* set payload size in packet */
		rtac_asm_buffer[8] = data_size;

	} else {
		if (payload_size > MAX_PAYLOAD_SIZE) {
			pr_err("%s: Invalid payload size = %d\n",
				__func__, payload_size);
			goto done;
		}

		/* Copy buffer to in-band payload */
		if (copy_from_user(rtac_asm_buffer +
				sizeof(asm_params)/sizeof(u32),
				buf + 3 * sizeof(u32), payload_size)) {
			pr_err("%s: Could not copy payload from user buffer\n",
				__func__);
			goto err;
		}
	}

	/* Pack header */
	asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	asm_params.src_svc = q6asm_get_apr_service_id(session_id);
	asm_params.src_domain = APR_DOMAIN_APPS;
	asm_params.src_port = (session_id << 8) | 0x0001;
	asm_params.dest_svc = APR_SVC_ASM;
	asm_params.dest_domain = APR_DOMAIN_ADSP;
	asm_params.dest_port = (session_id << 8) | 0x0001;
	asm_params.token = session_id;
	asm_params.opcode = opcode;

	/* fill for out-of-band */
	rtac_asm_buffer[5] = rtac_cal[ASM_RTAC_CAL].cal_data.paddr;
	rtac_asm_buffer[6] = 0;
	rtac_asm_buffer[7] = rtac_cal[ASM_RTAC_CAL].map_data.map_handle;

	memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params));
	atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1);

	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%x\n",
		__func__, opcode,
		rtac_cal[ASM_RTAC_CAL].cal_data.paddr);

	result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle,
				(uint32_t *)rtac_asm_buffer);
	if (result < 0) {
		pr_err("%s: Set params failed session = %d\n",
			__func__, session_id);
		goto err;
	}

	/* Wait for the callback */
	result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait,
		(atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0),
		5 * HZ);
	if (!result) {
		pr_err("%s: Set params timed out session = %d\n",
			__func__, session_id);
		goto err;
	}
	if (atomic_read(&rtac_common.apr_err_code)) {
		pr_err("%s: DSP returned error code = %d, opcode = 0x%x\n",
			__func__, atomic_read(&rtac_common.apr_err_code),
			opcode);
		goto err;
	}

	if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS_V2) {
		bytes_returned = ((u32 *)rtac_cal[ASM_RTAC_CAL].cal_data.
			kvaddr)[2] + 3 * sizeof(u32);

		if (bytes_returned > user_buf_size) {
			pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
				__func__, user_buf_size, bytes_returned);
			goto err;
		}

		if (copy_to_user(buf, (void *)
				rtac_cal[ASM_RTAC_CAL].cal_data.kvaddr,
				bytes_returned)) {
			pr_err("%s: Could not copy buffer to user,size = %d\n",
				 __func__, bytes_returned);
			goto err;
		}
	} else {
		bytes_returned = data_size;
	}
err:
	mutex_unlock(&rtac_asm_apr_mutex);
done:
	return bytes_returned;
}
示例#4
0
u32 send_adm_apr(void *buf, u32 opcode)
{
	s32	result;
	u32	user_buf_size = 0;
	u32	bytes_returned = 0;
	u32	port_index = 0;
	u32	copp_id;
	int	port_id;
	u32	payload_size;
	u32	data_size = 0;
	struct apr_hdr	adm_params;
	pr_debug("%s\n", __func__);

	if (rtac_cal[ADM_RTAC_CAL].map_data.ion_handle == NULL) {
		result = rtac_allocate_cal_buffer(ADM_RTAC_CAL);
		if (result < 0) {
			pr_err("%s: allocate buffer failed!",
				__func__);
			goto done;
		}
	}

	if (rtac_cal[ADM_RTAC_CAL].map_data.map_handle == 0) {
		result = rtac_map_cal_buffer(ADM_RTAC_CAL);
		if (result < 0) {
			pr_err("%s: map buffer failed!",
				__func__);
			goto done;
		}
	}

	if (copy_from_user(&user_buf_size, (void *)buf,
						sizeof(user_buf_size))) {
		pr_err("%s: Copy from user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		goto done;
	}
	if (user_buf_size <= 0) {
		pr_err("%s: Invalid buffer size = %d\n",
			__func__, user_buf_size);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy port id from user buffer\n",
			__func__);
		goto done;
	}

	for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) {
		if (adm_get_copp_id(port_index) == copp_id)
			break;
		if (adm_get_lowlatency_copp_id(port_index) == copp_id)
			break;
	}
	if (port_index >= AFE_MAX_PORTS) {
		pr_err("%s: Could not find port index for copp = %d\n",
		       __func__, copp_id);
		goto done;
	}
	port_id = q6audio_get_port_id_from_index(port_index);

	if (port_id < 0) {
		pr_err("%s: Could not find port id mapped for port_idx %d\n",
		       __func__, port_index);
		goto done;
	}

	mutex_lock(&rtac_adm_apr_mutex);
	if (rtac_adm_apr_data.apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	if (opcode == ADM_CMD_SET_PP_PARAMS_V5) {
		/* set payload size to in-band payload */
		/* set data size to actual out of band payload size */
		data_size = payload_size - 4 * sizeof(u32);
		if (data_size > rtac_cal[ADM_RTAC_CAL].map_data.map_size) {
			pr_err("%s: Invalid data size = %d\n",
				__func__, data_size);
			goto done;
		}
		payload_size = 4 * sizeof(u32);

		/* Copy buffer to out-of-band payload */
		if (copy_from_user((void *)
				rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr,
				buf + 7 * sizeof(u32), data_size)) {
			pr_err("%s: Could not copy payload from user buffer\n",
				__func__);
			goto err;
		}
		/* set payload size in packet */
		rtac_adm_buffer[8] = data_size;
	} else {
		if (payload_size > MAX_PAYLOAD_SIZE) {
			pr_err("%s: Invalid payload size = %d\n",
				__func__, payload_size);
			goto done;
		}

		/* Copy buffer to in-band payload */
		if (copy_from_user(rtac_adm_buffer +
				sizeof(adm_params)/sizeof(u32),
				buf + 3 * sizeof(u32), payload_size)) {
			pr_err("%s: Could not copy payload from user buffer\n",
				__func__);
			goto err;
		}
	}

	/* Pack header */
	adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	adm_params.src_svc = APR_SVC_ADM;
	adm_params.src_domain = APR_DOMAIN_APPS;
	adm_params.src_port = copp_id;
	adm_params.dest_svc = APR_SVC_ADM;
	adm_params.dest_domain = APR_DOMAIN_ADSP;
	adm_params.dest_port = copp_id;
	adm_params.token = port_id;
	adm_params.opcode = opcode;

	/* fill for out-of-band */
	rtac_adm_buffer[5] = rtac_cal[ADM_RTAC_CAL].cal_data.paddr;
	rtac_adm_buffer[6] = 0;
	rtac_adm_buffer[7] = rtac_cal[ADM_RTAC_CAL].map_data.map_handle;

	memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params));
	atomic_set(&rtac_adm_apr_data.cmd_state, 1);

	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%x\n",
		__func__, opcode,
		rtac_cal[ADM_RTAC_CAL].cal_data.paddr);

	result = apr_send_pkt(rtac_adm_apr_data.apr_handle,
					(uint32_t *)rtac_adm_buffer);
	if (result < 0) {
		pr_err("%s: Set params failed port = %d, copp = %d\n",
			__func__, port_index, copp_id);
		goto err;
	}
	/* Wait for the callback */
	result = wait_event_timeout(rtac_adm_apr_data.cmd_wait,
		(atomic_read(&rtac_adm_apr_data.cmd_state) == 0),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!result) {
		pr_err("%s: Set params timed out copp = %d\n", __func__,
			copp_id);
		goto err;
	}
	if (atomic_read(&rtac_common.apr_err_code)) {
		pr_err("%s: DSP returned error code = %d, opcode = 0x%x\n",
			__func__, atomic_read(&rtac_common.apr_err_code),
			opcode);
		goto err;
	}

	if (opcode == ADM_CMD_GET_PP_PARAMS_V5) {
		bytes_returned = ((u32 *)rtac_cal[ADM_RTAC_CAL].cal_data.
			kvaddr)[2] + 3 * sizeof(u32);

		if (bytes_returned > user_buf_size) {
			pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
				__func__, user_buf_size, bytes_returned);
			goto err;
		}

		if (copy_to_user(buf, (void *)
				rtac_cal[ADM_RTAC_CAL].cal_data.kvaddr,
				bytes_returned)) {
			pr_err("%s: Could not copy buffer to user,size = %d\n",
				__func__, bytes_returned);
			goto err;
		}
	} else {
		bytes_returned = data_size;
	}
err:
	mutex_unlock(&rtac_adm_apr_mutex);
done:
	return bytes_returned;
}
示例#5
0
文件: am200epd.c 项目: E-LLP/n900
static int am200_wait_event(struct metronomefb_par *par)
{
	return wait_event_timeout(par->waitq, gpio_get_value(RDY_GPIO_PIN), HZ);
}
示例#6
0
/**
 * Main entrypoint for syncpoint value waits.
 */
int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
			u32 thresh, u32 timeout, u32 *value,
			struct timespec *ts, bool interruptible)
{
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
	void *ref;
	void *waiter;
	int err = 0, check_count = 0, low_timeout = 0;
	u32 val, old_val, new_val;

	if (!id || id >= nvhost_syncpt_nb_pts(sp))
		return -EINVAL;

	if (value)
		*value = 0;

	/* first check cache */
	if (nvhost_syncpt_is_expired(sp, id, thresh)) {
		if (value)
			*value = nvhost_syncpt_read_min(sp, id);
		if (ts)
			ktime_get_ts(ts);
		return 0;
	}

	/* keep host alive */
	err = nvhost_module_busy(syncpt_to_dev(sp)->dev);
	if (err)
		return err;

	/* try to read from register */
	val = syncpt_op().update_min(sp, id);
	if (nvhost_syncpt_is_expired(sp, id, thresh)) {
		if (value)
			*value = val;
		if (ts)
			ktime_get_ts(ts);
		goto done;
	}

	if (!timeout) {
		err = -EAGAIN;
		goto done;
	}

	old_val = val;

	/* schedule a wakeup when the syncpoint value is reached */
	waiter = nvhost_intr_alloc_waiter();
	if (!waiter) {
		err = -ENOMEM;
		goto done;
	}

	err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
				interruptible ?
				  NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE :
				  NVHOST_INTR_ACTION_WAKEUP,
				&wq,
				waiter,
				&ref);
	if (err)
		goto done;

	err = -EAGAIN;
	/* Caller-specified timeout may be impractically low */
	if (timeout < SYNCPT_CHECK_PERIOD)
		low_timeout = timeout;

	/* wait for the syncpoint, or timeout, or signal */
	while (timeout) {
		u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
		int remain;
		if (interruptible)
			remain = wait_event_interruptible_timeout(wq,
				syncpt_update_min_is_expired(sp, id, thresh),
				check);
		else
			remain = wait_event_timeout(wq,
				syncpt_update_min_is_expired(sp, id, thresh),
				check);
		if (remain > 0 || nvhost_syncpt_is_expired(sp, id, thresh)) {
			if (value)
				*value = nvhost_syncpt_read_min(sp, id);
			if (ts) {
				err = nvhost_intr_release_time(ref, ts);
				if (err)
					ktime_get_ts(ts);
			}

			err = 0;
			break;
		}
		if (remain < 0) {
			err = remain;
			break;
		}
		if (timeout != NVHOST_NO_TIMEOUT)
			timeout -= check;
		if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
			new_val = syncpt_op().update_min(sp, id);
			if (old_val == new_val) {
				dev_warn(&syncpt_to_dev(sp)->dev->dev,
					"%s: syncpoint id %d (%s) stuck waiting %d, timeout=%d\n",
					 current->comm, id,
					 syncpt_op().name(sp, id),
					 thresh, timeout);
				syncpt_op().debug(sp);
			} else {
				old_val = new_val;
				dev_warn(&syncpt_to_dev(sp)->dev->dev,
					"%s: syncpoint id %d (%s) progressing slowly %d, timeout=%d\n",
					 current->comm, id,
					 syncpt_op().name(sp, id),
					 thresh, timeout);
			}
			if (check_count == MAX_STUCK_CHECK_COUNT) {
				if (low_timeout) {
					dev_warn(&syncpt_to_dev(sp)->dev->dev,
						"is timeout %d too low?\n",
						low_timeout);
				}
				nvhost_debug_dump(syncpt_to_dev(sp));
			}
			check_count++;
		}
	}
	nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), id, ref);

done:
	nvhost_module_idle(syncpt_to_dev(sp)->dev);
	return err;
}
示例#7
0
int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
				uint32_t *bufsz, uint32_t bufcnt)
{
	struct  adm_cmd_memory_map_regions *mmap_regions = NULL;
	struct  adm_memory_map_regions *mregions = NULL;
	void    *mmap_region_cmd = NULL;
	void    *payload = NULL;
	int     ret = 0;
	int     i = 0;
	int     cmd_size = 0;

	pr_debug("%s\n", __func__);
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	cmd_size = sizeof(struct adm_cmd_memory_map_regions)
			+ sizeof(struct adm_memory_map_regions) * bufcnt;

	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
	if (!mmap_region_cmd) {
		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
		return -ENOMEM;
	}
	mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
						APR_HDR_LEN(APR_HDR_SIZE),
								APR_PKT_VER);
	mmap_regions->hdr.pkt_size = cmd_size;
	mmap_regions->hdr.src_port = 0;
	mmap_regions->hdr.dest_port = 0;
	mmap_regions->hdr.token = 0;
	mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
	mmap_regions->mempool_id = mempool_id & 0x00ff;
	mmap_regions->nregions = bufcnt & 0x00ff;
	pr_debug("%s: map_regions->nregions = %d\n", __func__,
				mmap_regions->nregions);
	payload = ((u8 *) mmap_region_cmd +
				sizeof(struct adm_cmd_memory_map_regions));
	mregions = (struct adm_memory_map_regions *)payload;

	for (i = 0; i < bufcnt; i++) {
		mregions->phys = buf_add[i];
		mregions->buf_size = bufsz[i];
		++mregions;
	}

	atomic_set(&this_adm.copp_stat[0], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
	if (ret < 0) {
		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
					mmap_regions->hdr.opcode, ret);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
	if (!ret) {
		pr_err("%s: timeout. waited for memory_map\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	kfree(mmap_region_cmd);
	return ret;
}
示例#8
0
int afe_loopback_gain(u16 port_id, u16 volume)
{
	struct afe_port_cmd_set_param set_param;
	int ret = 0;

	if (this_afe.apr == NULL) {
		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
					0xFFFFFFFF, &this_afe);
		pr_debug("%s: Register AFE\n", __func__);
		if (this_afe.apr == NULL) {
			pr_err("%s: Unable to register AFE\n", __func__);
			ret = -ENODEV;
			return ret;
		}
	}

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	/* RX ports numbers are even .TX ports numbers are odd. */
	if (port_id % 2 == 0) {
		pr_err("%s: Failed : afe loopback gain only for TX ports."
			" port_id %d\n", __func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	pr_debug("%s: %d %hX\n", __func__, port_id, volume);

	set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	set_param.hdr.pkt_size = sizeof(set_param);
	set_param.hdr.src_port = 0;
	set_param.hdr.dest_port = 0;
	set_param.hdr.token = 0;
	set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM;

	set_param.port_id		= port_id;
	set_param.payload_size	= sizeof(struct afe_param_payload_base) +
				  sizeof(struct afe_param_loopback_gain);
	set_param.payload_address	= 0;

	set_param.payload.base.module_id	= AFE_MODULE_ID_PORT_INFO;
	set_param.payload.base.param_id	= AFE_PARAM_ID_LOOPBACK_GAIN;
	set_param.payload.base.param_size =
		sizeof(struct afe_param_loopback_gain);
	set_param.payload.base.reserved	= 0;

	set_param.payload.param.loopback_gain.gain		= volume;
	set_param.payload.param.loopback_gain.reserved	= 0;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param);
	if (ret < 0) {
		pr_err("%s: AFE param set failed for port %d\n",
					__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
		(atomic_read(&this_afe.state) == 0),
			msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	return 0;
fail_cmd:
	return ret;
}
示例#9
0
int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
				int topology)
{
	struct adm_multi_ch_copp_open_command open;
	int ret = 0;
	int index;

	pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__,
				port_id, path, rate, channel_mode);

	port_id = afe_convert_virtual_to_portid(port_id);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	/* Create a COPP if port id are not enabled */
	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {

		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);

		open.hdr.pkt_size =
			sizeof(struct adm_multi_ch_copp_open_command);
		open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN;
		memset(open.dev_channel_mapping, 0, 8);

		if (channel_mode == 1)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
		} else if (channel_mode == 2) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
		} else if (channel_mode == 6) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
		} else {
			pr_err("%s invalid num_chan %d\n", __func__,
					channel_mode);
			return -EINVAL;
		}


		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = port_id;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = port_id;
		open.hdr.token = port_id;

		open.mode = path;
		open.endpoint_id1 = port_id;
		open.endpoint_id2 = 0xFFFF;

		/* convert path to acdb path */
		if (path == ADM_PATH_PLAYBACK)
			open.topology_id = get_adm_rx_topology();
		else {
			open.topology_id = get_adm_tx_topology();
			if ((open.topology_id ==
				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			    (open.topology_id ==
				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY) ||
			    (open.topology_id ==
				VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;
		}

		if (open.topology_id  == 0)
			open.topology_id = topology;

		open.channel_config = channel_mode & 0x00FF;
		open.rate  = rate;

		pr_debug("%s: channel_config=%d port_id=%d rate=%d"
			" topology_id=0x%X\n", __func__, open.channel_config,
			open.endpoint_id1, open.rate,
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d failed\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		/* Wait for the callback with copp id */
		ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d\n", __func__,
								port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}
示例#10
0
int adm_matrix_map(int session_id, int path, int num_copps,
			unsigned int *port_id, int copp_id)
{
	struct adm_routings_command	route;
	int ret = 0, i = 0;
	/* Assumes port_ids have already been validated during adm_open */
	int index = afe_get_port_index(copp_id);

	pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
		 __func__, session_id, path, num_copps, port_id[0]);

	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d token %d\n",
					__func__, index, copp_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	route.hdr.pkt_size = sizeof(route);
	route.hdr.src_svc = 0;
	route.hdr.src_domain = APR_DOMAIN_APPS;
	route.hdr.src_port = copp_id;
	route.hdr.dest_svc = APR_SVC_ADM;
	route.hdr.dest_domain = APR_DOMAIN_ADSP;
	route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	route.hdr.token = copp_id;
	route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
	route.num_sessions = 1;
	route.session[0].id = session_id;
	route.session[0].num_copps = num_copps;

	for (i = 0; i < num_copps; i++) {
		int tmp;
		port_id[i] = afe_convert_virtual_to_portid(port_id[i]);

		tmp = afe_get_port_index(port_id[i]);

		pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
			 port_id[i], tmp);

		if ((tmp >= 0) && (tmp < AFE_MAX_PORTS))
			route.session[0].copp_id[i] =
					atomic_read(&this_adm.copp_id[tmp]);
	}
	if (num_copps % 2)
		route.session[0].copp_id[i] = 0;

	switch (path) {
	case 0x1:
		route.path = AUDIO_RX;
		break;
	case 0x2:
	case 0x3:
		route.path = AUDIO_TX;
		break;
	default:
		pr_err("%s: Wrong path set[%d]\n", __func__, path);
		break;
	}
	atomic_set(&this_adm.copp_stat[index], 0);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
	if (ret < 0) {
		pr_err("%s: ADM routing for port %d failed\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_adm.wait,
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: ADM cmd Route failed for port %d\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}

	for (i = 0; i < num_copps; i++)
		send_adm_cal(port_id[i], path);

	for (i = 0; i < num_copps; i++)
		rtac_add_adm_device(port_id[i],	atomic_read(&this_adm.copp_id
			[afe_get_port_index(port_id[i])]),
			path, session_id);
	return 0;

fail_cmd:

	return ret;
}
示例#11
0
文件: q6afe.c 项目: manl/pubile
/* This function sends multi-channel HDMI configuration command and AFE
 * calibration which is only supported by QDSP6 on 8960 and onward.
 */
int afe_port_start(u16 port_id, union afe_port_config *afe_config,
		   u32 rate)
{
	struct afe_port_start_command start;
	struct afe_audioif_config_command config;
	int ret;

	if (!afe_config) {
		pr_err("%s: Error, no configuration data\n", __func__);
		ret = -EINVAL;
		return ret;
	}
	pr_debug("%s: %d %d\n", __func__, port_id, rate);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX)) {
		pr_debug("%s: before incrementing pcm_afe_instance %d"\
				" port_id %d\n", __func__,
				pcm_afe_instance[port_id & 0x1], port_id);
		port_id = VIRTUAL_ID_TO_PORTID(port_id);
		pcm_afe_instance[port_id & 0x1]++;
		return 0;
	}
	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX)) {
		pr_debug("%s: before incrementing proxy_afe_instance %d"\
				" port_id %d\n", __func__,
				proxy_afe_instance[port_id & 0x1], port_id);
		if (!afe_close_done[port_id & 0x1]) {
			/*close pcm dai corresponding to the proxy dai*/
			afe_close(port_id - 0x10);
			pcm_afe_instance[port_id & 0x1]++;
			pr_debug("%s: reconfigure afe port again\n", __func__);
		}
		proxy_afe_instance[port_id & 0x1]++;
		afe_close_done[port_id & 0x1] = false;
		port_id = VIRTUAL_ID_TO_PORTID(port_id);
	}

	ret = afe_q6_interface_prepare();
	if (IS_ERR_VALUE(ret))
		return ret;

	if (port_id == HDMI_RX) {
		config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
		config.hdr.src_port = 0;
		config.hdr.dest_port = 0;
		config.hdr.token = 0;
		config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG;
	} else {

		config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
		config.hdr.src_port = 0;
		config.hdr.dest_port = 0;
		config.hdr.token = 0;
		switch (port_id) {
		case SLIMBUS_0_RX:
		case SLIMBUS_0_TX:
		case SLIMBUS_1_RX:
		case SLIMBUS_1_TX:
		case SLIMBUS_2_RX:
		case SLIMBUS_2_TX:
		case SLIMBUS_3_RX:
		case SLIMBUS_3_TX:
		case SLIMBUS_4_RX:
		case SLIMBUS_4_TX:
			config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG;
		break;
		case MI2S_TX:
		case MI2S_RX:
		case SECONDARY_I2S_RX:
		case SECONDARY_I2S_TX:
		case PRIMARY_I2S_RX:
		case PRIMARY_I2S_TX:
			/* AFE_PORT_CMD_I2S_CONFIG command is not supported
			 * in the LPASS EL 1.0. So we have to distiguish
			 * which AFE command, AFE_PORT_CMD_I2S_CONFIG or
			 * AFE_PORT_AUDIO_IF_CONFIG	to use. If the format
			 * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used
			 * to make the backward compatible.
			 */
			pr_debug("%s: afe_config->mi2s.format = %d\n", __func__,
					 afe_config->mi2s.format);
			if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM)
				config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
			else
				config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG;
		break;
		default:
			config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
		break;
		}
	}

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	config.port_id = port_id;
	config.port = *afe_config;

	atomic_set(&this_afe.state, 1);
	atomic_set(&this_afe.status, 0);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));

	if (!ret) {
		pr_err("%s: wait_event timeout IF CONFIG\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	if (atomic_read(&this_afe.status) != 0) {
		pr_err("%s: config cmd failed\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	/* send AFE cal */
	afe_send_cal(port_id);

	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	start.hdr.pkt_size = sizeof(start);
	start.hdr.src_port = 0;
	start.hdr.dest_port = 0;
	start.hdr.token = 0;
	start.hdr.opcode = AFE_PORT_CMD_START;
	start.port_id = port_id;
	start.gain = 0x2000;
	start.sample_rate = rate;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);

	if (IS_ERR_VALUE(ret)) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout PORT START\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	if (this_afe.task != current)
		this_afe.task = current;

	pr_debug("task_name = %s pid = %d\n",
	this_afe.task->comm, this_afe.task->pid);
	return 0;

fail_cmd:
	return ret;
}
示例#12
0
文件: q6afe.c 项目: manl/pubile
int afe_close(int port_id)
{
	struct afe_port_stop_command stop;
	int ret = 0;

	if (this_afe.apr == NULL) {
		pr_err("AFE is already closed\n");
		ret = -EINVAL;
		goto fail_cmd;
	}
	pr_debug("%s: port_id=%d\n", __func__, port_id);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX)) {
		pr_debug("%s: before decrementing pcm_afe_instance %d\n",
				__func__, pcm_afe_instance[port_id & 0x1]);
		port_id = VIRTUAL_ID_TO_PORTID(port_id);
		pcm_afe_instance[port_id & 0x1]--;
		if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
			proxy_afe_instance[port_id & 0x1] == 0))
			return 0;
		else
			afe_close_done[port_id & 0x1] = true;
	}

	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX)) {
		pr_debug("%s: before decrementing proxy_afe_instance %d\n",
				__func__, proxy_afe_instance[port_id & 0x1]);
		port_id = VIRTUAL_ID_TO_PORTID(port_id);
		proxy_afe_instance[port_id & 0x1]--;
		if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
			proxy_afe_instance[port_id & 0x1] == 0))
			return 0;
		else
			afe_close_done[port_id & 0x1] = true;
	}

	port_id = afe_convert_virtual_to_portid(port_id);

	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	stop.hdr.pkt_size = sizeof(stop);
	stop.hdr.src_port = 0;
	stop.hdr.dest_port = 0;
	stop.hdr.token = 0;
	stop.hdr.opcode = AFE_PORT_CMD_STOP;
	stop.port_id = port_id;
	stop.reserved = 0;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);

	if (ret == -ENETRESET) {
		pr_info("%s: Need to reset, calling APR deregister", __func__);
		return apr_deregister(this_afe.apr);
	}

	if (ret < 0) {
		pr_err("%s: AFE close failed\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
					msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	return ret;
}
示例#13
0
int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg)
{

	uint32_t int_format = INVALID_FORMAT;
	struct usm_stream_media_format_update dec_cfg_obj;
	struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj;

	int rc = 0;
	uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update);
	uint32_t round_params_size = 0;
	uint8_t  is_allocated = 0;


	if ((usc == NULL) || (us_cfg == NULL)) {
		pr_err("%s: wrong input", __func__);
		return -EINVAL;
	}

	int_format = q6usm_ext2int_format(us_cfg->format_id);
	if (int_format == INVALID_FORMAT) {
		pr_err("%s: wrong input format[%d]",
		       __func__, us_cfg->format_id);
		return -EINVAL;
	}

	
	
	round_params_size = ((us_cfg->params_size + 3)/4) * 4;
	if (round_params_size > USM_MAX_CFG_DATA_SIZE) {
		
		
		round_params_size -= USM_MAX_CFG_DATA_SIZE;
		total_cfg_size += round_params_size;
		dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL);
		if (dec_cfg == NULL) {
			pr_err("%s:dec_cfg[%d] allocation failed\n",
			       __func__, total_cfg_size);
			return -ENOMEM;
		}
		is_allocated = 1;
	} else { 
		round_params_size = 0;
	}

	q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size - APR_HDR_SIZE, true);

	dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE;
	dec_cfg->format_id = int_format;
	dec_cfg->cfg_size = sizeof(struct usm_cfg_common) +
			    USM_MAX_CFG_DATA_SIZE +
			    round_params_size;
	memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common),
	       sizeof(struct usm_cfg_common));
	
	memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size);
	pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n",
		__func__,
		dec_cfg->cfg_size,
		us_cfg->params_size,
		dec_cfg->transp_data[0],
		dec_cfg->transp_data[1],
		dec_cfg->transp_data[2],
		dec_cfg->transp_data[3]
	       );

	rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg);
	if (rc < 0) {
		pr_err("%s:Comamnd open failed\n", __func__);
		rc = -EINVAL;
		goto fail_cmd;
	}
	rc = wait_event_timeout(usc->cmd_wait,
				(atomic_read(&usc->cmd_state) == 0),
				Q6USM_TIMEOUT_JIFFIES);
	if (!rc) {
		rc = -ETIME;
		pr_err("%s: timeout opcode[0x%x]\n",
		       __func__, dec_cfg->hdr.opcode);
	} else
		rc = 0;

fail_cmd:
	if (is_allocated == 1)
		kfree(dec_cfg);

	return rc;
}
示例#14
0
static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
{
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	int cmd_idx;
	int ret;

	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));

	if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
				     &trans_pcie->status))) {
		IWL_ERR(trans, "Command %s: a command is already active!\n",
			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
		return -EIO;
	}

	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
		       trans_pcie_get_cmd_string(trans_pcie, cmd->id));

	cmd_idx = iwl_enqueue_hcmd(trans, cmd);
	if (cmd_idx < 0) {
		ret = cmd_idx;
		clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
		IWL_ERR(trans,
			"Error sending %s: enqueue_hcmd failed: %d\n",
			trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
		return ret;
	}

	ret = wait_event_timeout(trans->wait_command_queue,
			!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
			HOST_COMPLETE_TIMEOUT);
	if (!ret) {
		if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
			struct iwl_tx_queue *txq =
				&trans_pcie->txq[trans_pcie->cmd_queue];
			struct iwl_queue *q = &txq->q;

			IWL_ERR(trans,
				"Error sending %s: time out after %dms.\n",
				trans_pcie_get_cmd_string(trans_pcie, cmd->id),
				jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));

			IWL_ERR(trans,
				"Current CMD queue read_ptr %d write_ptr %d\n",
				q->read_ptr, q->write_ptr);

			clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
			IWL_DEBUG_INFO(trans,
				       "Clearing HCMD_ACTIVE for command %s\n",
				       trans_pcie_get_cmd_string(trans_pcie,
								 cmd->id));
			ret = -ETIMEDOUT;
			goto cancel;
		}
	}

	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
			trans_pcie_get_cmd_string(trans_pcie, cmd->id));
		ret = -EIO;
		goto cancel;
	}

	return 0;

cancel:
	if (cmd->flags & CMD_WANT_SKB) {
		/*
		 * Cancel the CMD_WANT_SKB flag for the cmd in the
		 * TX cmd queue. Otherwise in case the cmd comes
		 * in later, it will possibly set an invalid
		 * address (cmd->meta.source).
		 */
		trans_pcie->txq[trans_pcie->cmd_queue].
			entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
	}

	if (cmd->resp_pkt) {
		iwl_free_resp(cmd);
		cmd->resp_pkt = NULL;
	}

	return ret;
}
示例#15
0
int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
						uint32_t bufcnt)
{
	struct  adm_cmd_memory_unmap_regions *unmap_regions = NULL;
	struct  adm_memory_unmap_regions *mregions = NULL;
	void    *unmap_region_cmd = NULL;
	void    *payload = NULL;
	int     ret = 0;
	int     i = 0;
	int     cmd_size = 0;

	pr_debug("%s\n", __func__);

	if (this_adm.apr == NULL) {
		pr_err("%s APR handle NULL\n", __func__);
		return -EINVAL;
	}

	cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
			+ sizeof(struct adm_memory_unmap_regions) * bufcnt;

	unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
	if (!unmap_region_cmd) {
		pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
		return -ENOMEM;
	}
	unmap_regions = (struct adm_cmd_memory_unmap_regions *)
						unmap_region_cmd;
	unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
						APR_HDR_LEN(APR_HDR_SIZE),
							APR_PKT_VER);
	unmap_regions->hdr.pkt_size = cmd_size;
	unmap_regions->hdr.src_port = 0;
	unmap_regions->hdr.dest_port = 0;
	unmap_regions->hdr.token = 0;
	unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
	unmap_regions->nregions = bufcnt & 0x00ff;
	unmap_regions->reserved = 0;
	pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
				unmap_regions->nregions);
	payload = ((u8 *) unmap_region_cmd +
			sizeof(struct adm_cmd_memory_unmap_regions));
	mregions = (struct adm_memory_unmap_regions *)payload;

	for (i = 0; i < bufcnt; i++) {
		mregions->phys = buf_add[i];
		++mregions;
	}
	atomic_set(&this_adm.copp_stat[0], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
	if (ret < 0) {
		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
				unmap_regions->hdr.opcode, ret);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
	if (!ret) {
		pr_err("%s: timeout. waited for memory_unmap\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	kfree(unmap_region_cmd);
	return ret;
}
示例#16
0
/* This function should be used by 8660 exclusively */
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
{
	struct afe_port_start_command start;
	struct afe_audioif_config_command config;
	static int  if_first_open = 1;
	int ret = 0;

	if (!afe_config) {
		pr_err("%s: Error, no configuration data\n", __func__);
		ret = -EINVAL;
		return ret;
	}

	pr_info("%s: %d %d\n", __func__, port_id, rate);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX))
		return 0;
	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX))
		port_id = VIRTUAL_ID_TO_PORTID(port_id);

	ret = afe_q6_interface_prepare();
	if (ret != 0)
		return ret;


        if(if_first_open)
        {
                msleep(100);
                if_first_open = 0;
                pr_info("%s: First afe_open ",__func__);
        }


	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
	config.hdr.src_port = 0;
	config.hdr.dest_port = 0;
	config.hdr.token = 0;
	switch (port_id) {
	case SLIMBUS_0_RX:
	case SLIMBUS_0_TX:
	case SLIMBUS_1_RX:
	case SLIMBUS_1_TX:
	case SLIMBUS_2_RX:
	case SLIMBUS_2_TX:
	case SLIMBUS_3_RX:
	case SLIMBUS_3_TX:
	case SLIMBUS_4_RX:
	case SLIMBUS_4_TX:
		config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG;
	break;
	case MI2S_TX:
	case MI2S_RX:
	case SECONDARY_I2S_RX:
	case SECONDARY_I2S_TX:
	case PRIMARY_I2S_RX:
	case PRIMARY_I2S_TX:
		/* AFE_PORT_CMD_I2S_CONFIG command is not supported
		 * in the LPASS EL 1.0. So we have to distiguish
		 * which AFE command, AFE_PORT_CMD_I2S_CONFIG or
		 * AFE_PORT_AUDIO_IF_CONFIG	to use. If the format
		 * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used
		 * to make the backward compatible.
		 */
		pr_debug("%s: afe_config->mi2s.format = %d\n", __func__,
				 afe_config->mi2s.format);
		if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM)
			config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
		else
			config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG;
	break;
	default:
		config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
	break;
	}

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	config.port_id = port_id;
	config.port = *afe_config;

	atomic_set(&this_afe.state, 1);
	atomic_set(&this_afe.status, 0);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		HTC_Q6_BUG();
		ret = -EINVAL;
		goto fail_cmd;
	}
	if (atomic_read(&this_afe.status) != 0) {
		pr_err("%s: config cmd failed\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	start.hdr.pkt_size = sizeof(start);
	start.hdr.src_port = 0;
	start.hdr.dest_port = 0;
	start.hdr.token = 0;
	start.hdr.opcode = AFE_PORT_CMD_START;
	start.port_id = port_id;
	start.gain = 0x2000;
	start.sample_rate = rate;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		HTC_Q6_BUG();
		ret = -EINVAL;
		goto fail_cmd;
	}

	if (this_afe.task != current)
		this_afe.task = current;

	pr_debug("task_name = %s pid = %d\n",
			this_afe.task->comm, this_afe.task->pid);
	return 0;
fail_cmd:
	return ret;
}
示例#17
0
int adm_close(int port_id)
{
	struct apr_hdr close;

	int ret = 0;
	int index = 0;

	port_id = afe_convert_virtual_to_portid(port_id);

	index = afe_get_port_index(port_id);
	if (afe_validate_port(port_id) < 0)
		return -EINVAL;

	pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);

	if (!(atomic_read(&this_adm.copp_cnt[index]))) {
		pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);

		goto fail_cmd;
	}
	atomic_dec(&this_adm.copp_cnt[index]);
	if (!(atomic_read(&this_adm.copp_cnt[index]))) {

		close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		close.pkt_size = sizeof(close);
		close.src_svc = APR_SVC_ADM;
		close.src_domain = APR_DOMAIN_APPS;
		close.src_port = port_id;
		close.dest_svc = APR_SVC_ADM;
		close.dest_domain = APR_DOMAIN_ADSP;
		close.dest_port = atomic_read(&this_adm.copp_id[index]);
		close.token = port_id;
		close.opcode = ADM_CMD_COPP_CLOSE;

		atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
		atomic_set(&this_adm.copp_stat[index], 0);


		pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
				__func__,
				atomic_read(&this_adm.copp_id[index]),
				port_id, index,
				atomic_read(&this_adm.copp_cnt[index]));

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
		if (ret < 0) {
			pr_err("%s ADM close failed\n", __func__);
			ret = -EINVAL;
			goto fail_cmd;
		}

		ret = wait_event_timeout(this_adm.wait,
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s: ADM cmd Route failed for port %d\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}

		rtac_remove_adm_device(port_id);
	}

fail_cmd:
	return ret;
}
示例#18
0
int afe_apply_gain(u16 port_id, u16 gain)
{
	struct afe_port_gain_command set_gain;
	int ret = 0;

	if (this_afe.apr == NULL) {
		pr_err("%s: AFE is not opened\n", __func__);
		ret = -EPERM;
		goto fail_cmd;
	}

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	/* RX ports numbers are even .TX ports numbers are odd. */
	if (port_id % 2 == 0) {
		pr_err("%s: Failed : afe apply gain only for TX ports."
			" port_id %d\n", __func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	pr_debug("%s: %d %hX\n", __func__, port_id, gain);

	set_gain.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	set_gain.hdr.pkt_size = sizeof(set_gain);
	set_gain.hdr.src_port = 0;
	set_gain.hdr.dest_port = 0;
	set_gain.hdr.token = 0;
	set_gain.hdr.opcode = AFE_PORT_CMD_APPLY_GAIN;

	set_gain.port_id		= port_id;
	set_gain.gain	= gain;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_gain);
	if (ret < 0) {
		pr_err("%s: AFE Gain set failed for port %d\n",
					__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
		(atomic_read(&this_afe.state) == 0),
			msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		HTC_Q6_BUG();
		ret = -EINVAL;
		goto fail_cmd;
	}
	return 0;
fail_cmd:
	return ret;
}
示例#19
0
static ssize_t lpa_if_write(struct file *file, const char __user *buf,
		size_t count, loff_t *pos)
{
	struct lpa_if *lpa_if = file->private_data;
	struct audio_buffer *ab;
	const char __user *start = buf;
	int xfer, rc;
	struct sched_param s = { .sched_priority = 1 };
	int old_prio = current->rt_priority;
	int old_policy = current->policy;
	int cap_nice = cap_raised(current_cap(), CAP_SYS_NICE);

	 /* just for this write, set us real-time */
	if (!task_has_rt_policy(current)) {
		struct cred *new = prepare_creds();
		cap_raise(new->cap_effective, CAP_SYS_NICE);
		commit_creds(new);
		if ((sched_setscheduler(current, SCHED_RR, &s)) < 0)
			pr_err("sched_setscheduler failed\n");
	}
	mutex_lock(&lpa_if->lock);

	if (dma_buf_index < 2) {

		ab = lpa_if->audio_buf + dma_buf_index;

		if (copy_from_user(ab->data, buf, count)) {
			pr_err("copy from user failed\n");
			rc = 0;
			goto end;

		}
		mb();
		pr_debug("prefill: count %u  audio_buf[%u].size %u\n",
			 count, dma_buf_index, ab->size);

		ab->used = 1;
		dma_buf_index++;
		rc =  count;
		goto end;
	}

	if (lpa_if->config != 1) {
		pr_err("AUDIO_START did not happen\n");
		rc = 0;
		goto end;
	}

	while (count > 0) {

		ab = lpa_if->audio_buf + lpa_if->cpu_buf;

		rc = wait_event_timeout(lpa_if->wait, (ab->used == 0), 10 * HZ);
		if (!rc) {
			pr_err("wait_event_timeout failed\n");
			rc =  buf - start;
			goto end;
		}

		xfer = count;

		if (xfer > lpa_if->dma_period_sz)
			xfer = lpa_if->dma_period_sz;

		if (copy_from_user(ab->data, buf, xfer)) {
			pr_err("copy from user failed\n");
			rc = buf - start;
			goto end;
		}

		mb();
		buf += xfer;
		count -= xfer;
		ab->used = 1;

		pr_debug("xfer %d, size %d, used %d cpu_buf %d\n",
			xfer, ab->size, ab->used, lpa_if->cpu_buf);
		lpa_if->cpu_buf++;
		lpa_if->cpu_buf = lpa_if->cpu_buf % lpa_if->cfg.buffer_count;
	}
	rc = buf - start;
end:
	mutex_unlock(&lpa_if->lock);
	/* restore old scheduling policy */
	if (!rt_policy(old_policy)) {
		struct sched_param v = { .sched_priority = old_prio };
		if ((sched_setscheduler(current, old_policy, &v)) < 0)
			pr_err("sched_setscheduler failed\n");
		if (likely(!cap_nice)) {
			struct cred *new = prepare_creds();
			cap_lower(new->cap_effective, CAP_SYS_NICE);
			commit_creds(new);
		}
	}
	return rc;
}

static int lpa_if_release(struct inode *inode, struct file *file)
{
	struct lpa_if *lpa_if = file->private_data;

	hdmi_audio_packet_enable(0);

	wait_for_dma_cnt_stop(lpa_if->dma_ch);

	hdmi_audio_enable(0, HDMI_AUDIO_FIFO_WATER_MARK);

	if (lpa_if->config) {
		unregister_dma_irq_handler(lpa_if->dma_ch);
		dai_stop_hdmi(lpa_if->dma_ch);
		lpa_if->config = 0;
	}
	core_req_bus_bandwith(AUDIO_IF_BUS_ID, 0, 0);

	if (hdmi_msm_audio_get_sample_rate() != HDMI_SAMPLE_RATE_48KHZ)
		hdmi_msm_audio_sample_rate_reset(HDMI_SAMPLE_RATE_48KHZ);

	return 0;
}

static const struct file_operations lpa_if_fops = {
	.owner = THIS_MODULE,
	.open = lpa_if_open,
	.write = lpa_if_write,
	.release = lpa_if_release,
	.unlocked_ioctl = lpa_if_ioctl,
};

struct miscdevice lpa_if_misc = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = "msm_lpa_if_out",
	.fops = &lpa_if_fops,
};

static int __init lpa_if_init(void)
{
	int rc;

	lpa_if_ptr = kzalloc(sizeof(struct lpa_if), GFP_KERNEL);
	if (!lpa_if_ptr) {
		pr_info("No mem for lpa-if\n");
		return -ENOMEM;
	}

	mutex_init(&lpa_if_ptr->lock);
	init_waitqueue_head(&lpa_if_ptr->wait);

	lpa_if_ptr->buffer = dma_alloc_coherent(NULL, DMA_ALLOC_BUF_SZ,
				    &(lpa_if_ptr->buffer_phys), GFP_KERNEL);
	if (!lpa_if_ptr->buffer) {
		pr_err("dma_alloc_coherent failed\n");
		kfree(lpa_if_ptr);
		return -ENOMEM;
	}

	pr_info("lpa_if_ptr 0x%08x   buf_vir 0x%08x   buf_phy 0x%08x "
		" buf_zise %u\n", (u32)lpa_if_ptr,
		(u32)(lpa_if_ptr->buffer), lpa_if_ptr->buffer_phys,
		DMA_ALLOC_BUF_SZ);

	rc =  misc_register(&lpa_if_misc);
	if (rc < 0) {
		pr_err("misc_register failed\n");

		dma_free_coherent(NULL, DMA_ALLOC_BUF_SZ, lpa_if_ptr->buffer,
				lpa_if_ptr->buffer_phys);
		kfree(lpa_if_ptr);
	}
	return rc;
}

device_initcall(lpa_if_init);
/* must be called with audio->lock held */
static int audvoicememo_enable(struct audio_voicememo *audio)
{
	struct audmgr_config cfg;
	struct snd_voc_rec_put_buf_msg bmsg;
	struct snd_voc_rec_start_msg msg;
	uint8_t index;
	uint32_t offset = 0;
	int rc;

	if (audio->enabled)
		return 0;

	/* Codec / method configure to audmgr client */
	cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_8000;
	cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE;
	cfg.def_method = RPC_AUD_DEF_METHOD_RECORD;

	if (audio->voicememo_cfg.capability == RPC_VOC_CAP_IS733)
		cfg.codec = RPC_AUD_DEF_CODEC_VOC_13K;
	else if (audio->voicememo_cfg.capability == RPC_VOC_CAP_IS127)
		cfg.codec = RPC_AUD_DEF_CODEC_VOC_EVRC;
	else
		cfg.codec = RPC_AUD_DEF_CODEC_VOC_AMR; /* RPC_VOC_CAP_AMR */

	cfg.snd_method = RPC_SND_METHOD_VOICE;
	rc = audmgr_enable(&audio->audmgr, &cfg);

	if (rc < 0)
		return rc;

	/* Configure VOC Rec buffer */
	for (index = 0; index < MAX_REC_BUF_COUNT; index++) {
		audio->in[index].data = audio->rec_buf_ptr + offset;
		audio->in[index].addr = audio->phys + offset;
		audio->in[index].size = audio->rec_buf_size;
		audio->in[index].used = 0;
		audio->in[index].numframes = 0;
		offset += audio->rec_buf_size;
		bmsg.args.buf = (uint32_t) audio->in[index].data;
		bmsg.args.num_bytes = cpu_to_be32(audio->in[index].size);
		MM_DBG("rec_buf_ptr=0x%8x, rec_buf_size = 0x%8x\n",
				bmsg.args.buf, bmsg.args.num_bytes);

		msm_rpc_setup_req(&bmsg.hdr, audio->rpc_prog, audio->rpc_ver,
				SND_VOC_REC_PUT_BUF_PROC);
		audio->rpc_xid = bmsg.hdr.xid;
		audio->rpc_status = RPC_STATUS_FAILURE;
		msm_rpc_write(audio->sndept, &bmsg, sizeof(bmsg));
		rc = wait_event_timeout(audio->wait,
			audio->rpc_status != RPC_STATUS_FAILURE, 1 * HZ);
		if (rc == 0)
			goto err;
	}


	/* Start Recording */
	msg.args.param_status = cpu_to_be32(0x00000001);
	msg.args.rec_type = cpu_to_be32(audio->voicememo_cfg.rec_type);
	msg.args.rec_interval_ms =
		cpu_to_be32(audio->voicememo_cfg.rec_interval_ms);
	msg.args.auto_stop_ms = cpu_to_be32(audio->voicememo_cfg.auto_stop_ms);
	msg.args.capability = cpu_to_be32(audio->voicememo_cfg.capability);
	msg.args.max_rate = cpu_to_be32(audio->voicememo_cfg.max_rate);
	msg.args.min_rate = cpu_to_be32(audio->voicememo_cfg.min_rate);
	msg.args.frame_format = cpu_to_be32(audio->voicememo_cfg.frame_format);
	msg.args.dtx_enable = cpu_to_be32(audio->voicememo_cfg.dtx_enable);
	msg.args.data_req_ms = cpu_to_be32(audio->voicememo_cfg.data_req_ms);
	msg.args.rec_client_data = cpu_to_be32(REC_CLIENT_DATA);
	msg.args.cb_func_id = cpu_to_be32(DATA_CB_FUNC_ID);
	msg.args.sync_cb_func_id = cpu_to_be32(AV_SYNC_CB_FUNC_ID);
	msg.args.client_data = cpu_to_be32(CLIENT_DATA);

	msm_rpc_setup_req(&msg.hdr, audio->rpc_prog, audio->rpc_ver,
			SND_VOC_REC_START_PROC);

	audio->rpc_xid = msg.hdr.xid;
	audio->rpc_status = RPC_STATUS_FAILURE;
	msm_rpc_write(audio->sndept, &msg, sizeof(msg));
	rc = wait_event_timeout(audio->wait,
		audio->rpc_status != RPC_STATUS_FAILURE, 1 * HZ);
	if (rc == 0)
		goto err;

	audio->rpc_xid = 0;
	audio->enabled = 1;
	return 0;

err:
	audio->rpc_xid = 0;
	audmgr_disable(&audio->audmgr);
	MM_ERR("Fail\n");
	return -1;
}
示例#21
0
static int audio_mvs_setup_amr(struct audio_mvs_info_type *audio)
{
	int rc = 0;
	struct audio_mvs_set_amr_mode_msg set_amr_mode_msg;
	struct audio_mvs_set_dtx_mode_msg set_dtx_mode_msg;

	pr_debug("%s:\n", __func__);

	/* Set AMR mode. */
	memset(&set_amr_mode_msg, 0, sizeof(set_amr_mode_msg));
	set_amr_mode_msg.amr_mode = cpu_to_be32(audio->rate_type);

	if (audio->mvs_mode == MVS_MODE_AMR) {
		msm_rpc_setup_req(&set_amr_mode_msg.rpc_hdr,
				  audio->rpc_prog,
				  audio->rpc_ver,
				  MVS_AMR_SET_AMR_MODE_PROC);
	} else {
		msm_rpc_setup_req(&set_amr_mode_msg.rpc_hdr,
				  audio->rpc_prog,
				  audio->rpc_ver,
				  MVS_AMR_SET_AWB_MODE_PROC);
	}

	audio->rpc_status = RPC_STATUS_FAILURE;
	rc = msm_rpc_write(audio->rpc_endpt,
			   &set_amr_mode_msg,
			   sizeof(set_amr_mode_msg));

	if (rc >= 0) {
		pr_debug("%s: RPC write for set amr mode done\n", __func__);

		rc = wait_event_timeout(audio->wait,
				(audio->rpc_status != RPC_STATUS_FAILURE),
				1 * HZ);

		if (rc > 0) {
			pr_debug("%s: Wait event for set amr mode succeeded\n",
				 __func__);

			/* Save the MVS configuration information. */
			audio->frame_mode = MVS_FRAME_MODE_AMR_DL;

			/* Disable DTX. */
			memset(&set_dtx_mode_msg, 0, sizeof(set_dtx_mode_msg));
			set_dtx_mode_msg.dtx_mode = cpu_to_be32(0);

			msm_rpc_setup_req(&set_dtx_mode_msg.rpc_hdr,
					  audio->rpc_prog,
					  audio->rpc_ver,
					  MVS_SET_DTX_MODE_PROC);

			audio->rpc_status = RPC_STATUS_FAILURE;
			rc = msm_rpc_write(audio->rpc_endpt,
					   &set_dtx_mode_msg,
					   sizeof(set_dtx_mode_msg));

			if (rc >= 0) {
				pr_debug("%s: RPC write for set dtx done\n",
					 __func__);

				rc = wait_event_timeout(audio->wait,
				      (audio->rpc_status != RPC_STATUS_FAILURE),
				      1 * HZ);

				if (rc > 0) {
					pr_debug("%s: Wait event for set dtx"
						 "succeeded\n", __func__);

					rc = 0;
				}
			}
		} else {
			pr_aud_err("%s: Wait event for set amr mode failed %d\n",
			       __func__, rc);
		}
	} else {
		pr_aud_err("%s: RPC write for set amr mode failed %d\n",
		       __func__, rc);
	}

	return rc;
}
示例#22
0
static int usf_get_tx_update(struct usf_type *usf, unsigned long arg)
{
	struct us_tx_update_info_type upd_tx_info;
	unsigned long prev_jiffies = 0;
	uint32_t timeout = 0;
	struct usf_xx_type *usf_xx =  &usf->usf_tx;
	int rc = copy_from_user(&upd_tx_info, (void *) arg,
			    sizeof(upd_tx_info));

	if (rc) {
		pr_err("%s: copy upd_tx_info from user; rc=%d\n",
			__func__, rc);
		return -EFAULT;
	}

	if (!usf_xx->user_upd_info_na) {
		usf_set_event_filters(usf, upd_tx_info.event_filters);
		handle_input_event(usf,
				   upd_tx_info.event_counter,
				   upd_tx_info.event);

		/* Release available regions */
		rc = q6usm_read(usf_xx->usc,
				upd_tx_info.free_region);
		if (rc)
			return rc;
	} else
		usf_xx->user_upd_info_na = 0;

	/* Get data ready regions */
	if (upd_tx_info.timeout == USF_INFINITIVE_TIMEOUT) {
		rc = wait_event_interruptible(usf_xx->wait,
			   (usf_xx->prev_region !=
			    usf_xx->new_region) ||
			   (usf_xx->usf_state !=
			    USF_WORK_STATE));
	} else {
		if (upd_tx_info.timeout == USF_NO_WAIT_TIMEOUT)
			rc = (usf_xx->prev_region != usf_xx->new_region);
		else {
			prev_jiffies = jiffies;
			if (upd_tx_info.timeout == USF_DEFAULT_TIMEOUT) {
				timeout = USF_TIMEOUT_JIFFIES;
				rc = wait_event_timeout(
						usf_xx->wait,
						(usf_xx->prev_region !=
						 usf_xx->new_region) ||
						(usf_xx->usf_state !=
						 USF_WORK_STATE),
						timeout);
			} else {
				timeout = upd_tx_info.timeout * HZ;
				rc = wait_event_interruptible_timeout(
						usf_xx->wait,
						(usf_xx->prev_region !=
						 usf_xx->new_region) ||
						(usf_xx->usf_state !=
						 USF_WORK_STATE),
						timeout);
			}
		}
		if (!rc) {
			pr_debug("%s: timeout. prev_j=%lu; j=%lu\n",
				__func__, prev_jiffies, jiffies);
			pr_debug("%s: timeout. prev=%d; new=%d\n",
				__func__, usf_xx->prev_region,
				usf_xx->new_region);
			pr_debug("%s: timeout. free_region=%d;\n",
				__func__, upd_tx_info.free_region);
			if (usf_xx->prev_region ==
			    usf_xx->new_region) {
				pr_err("%s:read data: timeout\n",
				       __func__);
				return -ETIME;
			}
		}
	}

	if ((usf_xx->usf_state != USF_WORK_STATE) ||
	    (rc == -ERESTARTSYS)) {
		pr_err("%s: Get ready region failure; state[%d]; rc[%d]\n",
		       __func__, usf_xx->usf_state, rc);
		return -EINTR;
	}

	upd_tx_info.ready_region = usf_xx->new_region;
	usf_xx->prev_region = upd_tx_info.ready_region;

	if (upd_tx_info.ready_region == USM_WRONG_TOKEN) {
		pr_err("%s: TX path corrupted; prev=%d\n",
		       __func__, usf_xx->prev_region);
		return -EIO;
	}

	rc = copy_to_user((void __user *)arg, &upd_tx_info,
			  sizeof(upd_tx_info));
	if (rc) {
		pr_err("%s: copy upd_tx_info to user; rc=%d\n",
			__func__, rc);
		rc = -EFAULT;
	}

	return rc;
} /* usf_get_tx_update */
示例#23
0
/**
@ingroup tx_functions
Transmit thread
*/
int tx_pkt_handler(PMINI_ADAPTER Adapter  /**< pointer to adapter object*/
				)
{

#ifndef BCM_SHM_INTERFACE
	int status = 0;
#endif

	UINT calltransmit = 1;
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Entring to wait for signal from the interrupt service thread!Adapter = 0x%lx",(ULONG) Adapter); 
	
	
	while(1)
	{
		if(Adapter->LinkUpStatus){
			wait_event_timeout(Adapter->tx_packet_wait_queue, 
				((atomic_read(&Adapter->TxPktAvail) && 
				(MINIMUM_PENDING_DESCRIPTORS < 
				atomic_read(&Adapter->CurrNumFreeTxDesc)) && 
				(Adapter->device_removed == FALSE))) || 
				(1 == Adapter->uiFirstInterrupt) || kthread_should_stop() 
#ifndef BCM_SHM_INTERFACE
				|| (TRUE == Adapter->bEndPointHalted) 
#endif				
				, msecs_to_jiffies(10));
		}
		else{
			wait_event(Adapter->tx_packet_wait_queue, 
				((atomic_read(&Adapter->TxPktAvail) && 
				(MINIMUM_PENDING_DESCRIPTORS < 
				atomic_read(&Adapter->CurrNumFreeTxDesc)) && 
				(Adapter->device_removed == FALSE))) || 
				(1 == Adapter->uiFirstInterrupt) || kthread_should_stop()
#ifndef BCM_SHM_INTERFACE
				|| (TRUE == Adapter->bEndPointHalted)
#endif
				);
		}
		
		if(kthread_should_stop() || Adapter->device_removed)
		{
			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Exiting the tx thread..\n");
			Adapter->transmit_packet_thread = NULL;
			return 0;
		}


		if(Adapter->uiFirstInterrupt == 1)
		{
			SetUpTargetDsxBuffers(Adapter);
			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Seting DSX...\n");
			Adapter->uiFirstInterrupt +=1;
#ifndef BCM_SHM_INTERFACE
			status = download_ddr_settings(Adapter);
			if(status)
				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "DDR DOWNLOAD FAILED!\n");
#endif
			continue;
		}
#ifndef BCM_SHM_INTERFACE
		//Check end point for halt/stall. 
		if(Adapter->bEndPointHalted == TRUE)
		{
			Bcm_clear_halt_of_endpoints(Adapter);
			Adapter->bEndPointHalted = FALSE;
			StartInterruptUrb((PS_INTERFACE_ADAPTER)(Adapter->pvInterfaceAdapter));
		}		

		if(Adapter->LinkUpStatus && !Adapter->IdleMode)
		{
			if(atomic_read(&Adapter->TotalPacketCount)) 
			{
				update_per_sf_desc_cnts(Adapter);
			}
		}
#endif		
		
		if( atomic_read(&Adapter->CurrNumFreeTxDesc) && 
			Adapter->LinkStatus == SYNC_UP_REQUEST && 
			!Adapter->bSyncUpRequestSent)
		{
			BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling LinkMessage");
			LinkMessage(Adapter);
		}

		if((Adapter->IdleMode || Adapter->bShutStatus) && atomic_read(&Adapter->TotalPacketCount))
		{
				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Device in Low Power mode...waking up");
    			Adapter->usIdleModePattern = ABORT_IDLE_MODE;
				Adapter->bWakeUpDevice = TRUE; 
				wake_up(&Adapter->process_rx_cntrlpkt);
		}

#ifdef BCM_SHM_INTERFACE			
		spin_lock_bh(&Adapter->txtransmitlock);
		if(Adapter->txtransmit_running == 0)
		{
			Adapter->txtransmit_running = 1;
			calltransmit = 1;
		}
		else
			calltransmit = 0;
		spin_unlock_bh(&Adapter->txtransmitlock);
#endif

		if(calltransmit)
			transmit_packets(Adapter);

		atomic_set(&Adapter->TxPktAvail, 0);
	}
	return 0;
}
示例#24
0
u32 send_voice_apr(u32 mode, void *buf, u32 opcode)
{
	s32	result;
	u32	user_buf_size = 0;
	u32	bytes_returned = 0;
	u32	payload_size;
	u32	dest_port;
	u32	data_size = 0;
	struct apr_hdr		voice_params;
	pr_debug("%s\n", __func__);

	if (rtac_cal[VOICE_RTAC_CAL].map_data.ion_handle == NULL) {
		result = rtac_allocate_cal_buffer(VOICE_RTAC_CAL);
		if (result < 0) {
			pr_err("%s: allocate buffer failed!",
				__func__);
			goto done;
		}
	}

	if (rtac_cal[VOICE_RTAC_CAL].map_data.map_handle == 0) {
		result = rtac_map_cal_buffer(VOICE_RTAC_CAL);
		if (result < 0) {
			pr_err("%s: map buffer failed!",
				__func__);
			goto done;
		}
	}

	if (copy_from_user(&user_buf_size, (void *)buf,
						sizeof(user_buf_size))) {
		pr_err("%s: Copy from user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		goto done;
	}
	if (user_buf_size <= 0) {
		pr_err("%s: Invalid buffer size = %d\n",
			__func__, user_buf_size);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (copy_from_user(&dest_port, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy port id from user buffer\n",
			__func__);
		goto done;
	}

	if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) {
		pr_err("%s: Invalid Mode for APR, mode = %d\n",
			__func__, mode);
		goto done;
	}

	mutex_lock(&rtac_voice_apr_mutex);
	if (rtac_voice_apr_data[mode].apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	if (opcode == VOICE_CMD_SET_PARAM) {
		/* set payload size to in-band payload */
		/* set data size to actual out of band payload size */
		data_size = payload_size - 4 * sizeof(u32);
		if (data_size > rtac_cal[VOICE_RTAC_CAL].map_data.map_size) {
			pr_err("%s: Invalid data size = %d\n",
				__func__, data_size);
			goto done;
		}
		payload_size = 4 * sizeof(u32);

		/* Copy buffer to out-of-band payload */
		if (copy_from_user((void *)
				rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr,
				buf + 7 * sizeof(u32), data_size)) {
			pr_err("%s: Could not copy payload from user buffer\n",
				__func__);
			goto err;
		}
		/* set payload size in packet */
		rtac_voice_buffer[8] = data_size;
	} else {
		if (payload_size > MAX_PAYLOAD_SIZE) {
			pr_err("%s: Invalid payload size = %d\n",
					__func__, payload_size);
			goto done;
		}

		/* Copy buffer to in-band payload */
		if (copy_from_user(rtac_voice_buffer +
				sizeof(voice_params)/sizeof(u32),
				buf + 3 * sizeof(u32), payload_size)) {
			pr_err("%s: Could not copy payload from user buffer\n",
				__func__);
			goto err;
		}
	}

	/* Pack header */
	voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	voice_params.src_svc = 0;
	voice_params.src_domain = APR_DOMAIN_APPS;
	voice_params.src_port = get_voice_index(mode, dest_port);
	voice_params.dest_svc = 0;
	voice_params.dest_domain = APR_DOMAIN_MODEM;
	voice_params.dest_port = (u16)dest_port;
	voice_params.token = 0;
	voice_params.opcode = opcode;

	/* fill for out-of-band */
	rtac_voice_buffer[5] = rtac_cal[VOICE_RTAC_CAL].map_data.map_handle;
	rtac_voice_buffer[6] = rtac_cal[VOICE_RTAC_CAL].cal_data.paddr;
	rtac_voice_buffer[7] = 0;

	memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
	atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);

	pr_debug("%s: Sending RTAC command ioctl 0x%x, paddr 0x%x\n",
		__func__, opcode,
		rtac_cal[VOICE_RTAC_CAL].cal_data.paddr);

	result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle,
					(uint32_t *)rtac_voice_buffer);
	if (result < 0) {
		pr_err("%s: apr_send_pkt failed opcode = %x\n",
			__func__, opcode);
		goto err;
	}
	/* Wait for the callback */
	result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait,
		(atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!result) {
		pr_err("%s: apr_send_pkt timed out opcode = %x\n",
			__func__, opcode);
		goto err;
	}
	if (atomic_read(&rtac_common.apr_err_code)) {
		pr_err("%s: DSP returned error code = %d, opcode = 0x%x\n",
			__func__, atomic_read(&rtac_common.apr_err_code),
			opcode);
		goto err;
	}

	if (opcode == VOICE_CMD_GET_PARAM) {
		bytes_returned = ((u32 *)rtac_cal[VOICE_RTAC_CAL].cal_data.
			kvaddr)[2] + 3 * sizeof(u32);

		if (bytes_returned > user_buf_size) {
			pr_err("%s: User buf not big enough, size = 0x%x, returned size = 0x%x\n",
				__func__, user_buf_size, bytes_returned);
			goto err;
		}

		if (copy_to_user(buf, (void *)
				rtac_cal[VOICE_RTAC_CAL].cal_data.kvaddr,
				bytes_returned)) {
			pr_err("%s: Could not copy buffer to user, size = %d\n",
				 __func__, bytes_returned);
			goto err;
		}
	} else {
		bytes_returned = data_size;
	}
err:
	mutex_unlock(&rtac_voice_apr_mutex);
done:
	return bytes_returned;
}
示例#25
0
static int ipp_blit_sync_real(const struct rk29_ipp_req *req)
{
	int status;
	int wait_ret;
	
	DBG("Omegamoon >>> IPP Blit Sync Start\n");

	//If IPP is busy now,wait until it becomes idle
	mutex_lock(&drvdata->mutex);
	{
		status = wait_event_interruptible(blit_wait_queue, idle_condition);
		
		if(status < 0)
		{
			printk("ipp_blit_sync_real wait_event_interruptible=%d\n",status);
			mutex_unlock(&drvdata->mutex);
			return status;
		}
		
		idle_condition = 0;
		
	}
	mutex_unlock(&drvdata->mutex);

	
  	drvdata->issync = true;
	drvdata->ipp_result = ipp_blit(req);
   
	if(drvdata->ipp_result == 0)
	{
		//wait_ret = wait_event_interruptible_timeout(hw_wait_queue, wq_condition, msecs_to_jiffies(req->timeout));
		wait_ret = wait_event_timeout(hw_wait_queue, wq_condition, msecs_to_jiffies(req->timeout));
#ifdef IPP_TEST
		irq_end = ktime_get(); 
		irq_end = ktime_sub(irq_end,irq_start);
		hw_end = ktime_sub(hw_end,hw_start);
		if((((int)ktime_to_us(hw_end)/1000)>10)||(((int)ktime_to_us(irq_end)/1000)>10))
		{
			//printk("hw time: %d ms, irq time: %d ms\n",(int)ktime_to_us(hw_end)/1000,(int)ktime_to_us(irq_end)/1000);
		}
#endif				
		if (wait_ret <= 0)
		{
			printk("%s wait_ret=%d,wq_condition =%d,wait_event_timeout:%dms! \n",__FUNCTION__,wait_ret,wq_condition,req->timeout);

			if(wq_condition==0)
			{
				//print all register's value
				printk("IPP_CONFIG: %x\n",ipp_read(IPP_CONFIG));
				printk("IPP_SRC_IMG_INFO: %x\n",ipp_read(IPP_SRC_IMG_INFO));
				printk("IPP_DST_IMG_INFO: %x\n",ipp_read(IPP_DST_IMG_INFO));
				printk("IPP_IMG_VIR: %x\n",ipp_read(IPP_IMG_VIR));
				printk("IPP_INT: %x\n",ipp_read(IPP_INT));
				printk("IPP_SRC0_Y_MST: %x\n",ipp_read(IPP_SRC0_Y_MST));
				printk("IPP_SRC0_CBR_MST: %x\n",ipp_read(IPP_SRC0_CBR_MST));
				printk("IPP_SRC1_Y_MST: %x\n",ipp_read(IPP_SRC1_Y_MST));
				printk("IPP_SRC1_CBR_MST: %x\n",ipp_read(IPP_SRC1_CBR_MST));
				printk("IPP_DST0_Y_MST: %x\n",ipp_read(IPP_DST0_Y_MST));
				printk("IPP_DST0_CBR_MST: %x\n",ipp_read(IPP_DST0_CBR_MST));
				printk("IPP_DST1_Y_MST: %x\n",ipp_read(IPP_DST1_Y_MST));
				printk("IPP_DST1_CBR_MST: %x\n",ipp_read(IPP_DST1_CBR_MST));
				printk("IPP_PRE_SCL_PARA: %x\n",ipp_read(IPP_PRE_SCL_PARA));
				printk("IPP_POST_SCL_PARA: %x\n",ipp_read(IPP_POST_SCL_PARA));
				printk("IPP_SWAP_CTRL: %x\n",ipp_read(IPP_SWAP_CTRL));
				printk("IPP_PRE_IMG_INFO: %x\n",ipp_read(IPP_PRE_IMG_INFO));
				printk("IPP_AXI_ID: %x\n",ipp_read(IPP_AXI_ID));
				printk("IPP_SRESET: %x\n",ipp_read(IPP_SRESET));
				printk("IPP_PROCESS_ST: %x\n",ipp_read(IPP_PROCESS_ST));
		
				ipp_soft_reset();
				drvdata->ipp_result = -EAGAIN;
			}
		}

		ipp_power_off(NULL);
	}
	drvdata->issync = false;

	DBG("Omegamoon >>> IPP Blit Sync Finished\n");

	//IPP is idle, wake up the wait queue
	status = drvdata->ipp_result;
	idle_condition = 1;
	wake_up_interruptible_sync(&blit_wait_queue);
	
	return status;
}
示例#26
0
文件: rtac.c 项目: Ca1ne/Enoch316
u32 send_adm_apr(void *buf, u32 opcode)
{
	s32	result;
	u32	count = 0;
	u32	bytes_returned = 0;
	u32	port_index = 0;
	u32	copp_id;
	u32	payload_size;
	struct apr_hdr	adm_params;
	pr_debug("%s\n", __func__);

	if (copy_from_user(&count, (void *)buf, sizeof(count))) {
		pr_err("%s: Copy to user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		result = -EFAULT;
		goto done;
	}

	if (count <= 0) {
		pr_err("%s: Invalid buffer size = %d\n", __func__, count);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}


	if (payload_size > MAX_PAYLOAD_SIZE) {
		pr_err("%s: Invalid payload size = %d\n",
			__func__, payload_size);
		goto done;
	}

	if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy port id from user buffer\n",
			__func__);
		goto done;
	}

	for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) {
		if (adm_get_copp_id(port_index) == copp_id)
			break;
	}
	if (port_index >= AFE_MAX_PORTS) {
		pr_err("%s: Could not find port index for copp = %d\n",
		       __func__, copp_id);
		goto done;
	}

	mutex_lock(&rtac_adm_apr_mutex);
	if (rtac_adm_apr_data.apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	
	rtac_adm_user_buf_size = count;
	
	if (copy_from_user(rtac_adm_buffer + sizeof(adm_params),
			buf + 3 * sizeof(u32), payload_size)) {
		pr_err("%s: Could not copy payload from user buffer\n",
			__func__);
		goto err;
	}

	
	adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	adm_params.src_svc = APR_SVC_ADM;
	adm_params.src_domain = APR_DOMAIN_APPS;
	adm_params.src_port = copp_id;
	adm_params.dest_svc = APR_SVC_ADM;
	adm_params.dest_domain = APR_DOMAIN_ADSP;
	adm_params.dest_port = copp_id;
	adm_params.token = copp_id;
	adm_params.opcode = opcode;

	memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params));
	atomic_set(&rtac_adm_apr_data.cmd_state, 1);

	pr_debug("%s: Sending RTAC command size = %d\n",
		__func__, adm_params.pkt_size);

	result = apr_send_pkt(rtac_adm_apr_data.apr_handle,
				(uint32_t *)rtac_adm_buffer);
	if (result < 0) {
		pr_err("%s: Set params failed port = %d, copp = %d\n",
			__func__, port_index, copp_id);
		goto err;
	}
	
	result = wait_event_timeout(rtac_adm_apr_data.cmd_wait,
		(atomic_read(&rtac_adm_apr_data.cmd_state) == 0),
		msecs_to_jiffies(TIMEOUT_MS));
	mutex_unlock(&rtac_adm_apr_mutex);
	if (!result) {
		pr_err("%s: Set params timed out port = %d, copp = %d\n",
			__func__, port_index, copp_id);
		goto done;
	}

	if (rtac_adm_payload_size != 0) {
		if (copy_to_user(buf, rtac_adm_buffer,
			rtac_adm_payload_size + sizeof(u32))) {
			pr_err("%s: Could not copy buffer to user, size = %d\n",
				 __func__, payload_size);
			goto done;
		}
	}

	
	if (opcode == ADM_CMD_GET_PARAMS)
		bytes_returned = rtac_adm_payload_size;
	else
		bytes_returned = payload_size;
done:
	return bytes_returned;
err:
	mutex_unlock(&rtac_adm_apr_mutex);
	return bytes_returned;
}
示例#27
0
int pwm_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
	      unsigned long arg)
{
	int mode;
	unsigned int minor = iminor(inode);
	struct pwm_davinci_device *pwm_dev;

	pwm_dev = pwm_dev_get_by_minor(minor);

	switch (cmd) {
	case PWMIOC_SET_MODE:
		if (pwm_dev->regs->cfg & 0x20000)
			return -EBUSY;

		get_user(mode, (int *)arg);
		if (mode == PWM_ONESHOT_MODE) {
			pwm_dev->regs->cfg &= 0xFFFFFFFC;
			pwm_dev->regs->cfg |= 0x1;
		}
		else if (mode == PWM_CONTINUOUS_MODE) {
			pwm_dev->regs->cfg &= 0xFFFFFFFC;
			pwm_dev->regs->cfg |= 0x2;
		}
		else
			return -EINVAL;
		break;
	case PWMIOC_SET_PERIOD:
		get_user(mode, (int *)arg);

		if (mode < 0 || mode > 0xffffffff)
			return -EINVAL;

		if (pwm_dev->regs->cfg & 0x2 && pwm_dev->regs->cfg & 0x20000) {
			if (mode < 7)
				return -EINVAL;

			/* Enable PWM interrupts */
			pwm_dev->regs->cfg |= 0x40;

			/* wait for the transaction to complete */
			wait_event_timeout(pwm_dev->intr_wait,
					   pwm_dev->intr_complete,
					   DAVINCI_PWM_TIMEOUT);

			if (pwm_dev->intr_complete)
				pwm_dev->regs->per = mode;
			else
				return -1;
		} else
			pwm_dev->regs->per = mode;
		break;
	case PWMIOC_SET_DURATION:
		get_user(mode, (int *)arg);

		if (mode < 0 || mode > 0xffffffff)
			return -EINVAL;

		if (pwm_dev->regs->cfg & 0x2 && pwm_dev->regs->cfg & 0x20000) {
			/* Enable PWM interrupts */
			pwm_dev->regs->cfg |= 0x40;

			/* wait for the transaction to complete */
			wait_event_timeout(pwm_dev->intr_wait,
					   pwm_dev->intr_complete,
					   DAVINCI_PWM_TIMEOUT);

			if (pwm_dev->intr_complete)
				pwm_dev->regs->ph1d = mode;
			else
				return -1;
		} else
			pwm_dev->regs->ph1d = mode;
		break;
	case PWMIOC_SET_RPT_VAL:
		get_user(mode, (int *)arg);

		if (mode < 0 || mode > 0xff)
			return -EINVAL;

		pwm_dev->regs->rpt = mode;
		break;
	case PWMIOC_SET_FIRST_PHASE_STATE:
		get_user(mode, (int *)arg);

		if (pwm_dev->regs->cfg & 0x20000)
			return -EBUSY;
		if (mode == 1)
			pwm_dev->regs->cfg |= 0x10;
		else if (mode == 0)
			pwm_dev->regs->cfg &= ~0x10;
		else
			return -EINVAL;
		break;
	case PWMIOC_SET_INACT_OUT_STATE:
		get_user(mode, (int *)arg);

		if (pwm_dev->regs->cfg & 0x20000)
			return -EBUSY;
		if (mode == 1)
			pwm_dev->regs->cfg |= 0x20;
		else if (mode == 0)
			pwm_dev->regs->cfg &= ~0x20;
		else
			return -EINVAL;
		break;
	case PWMIOC_START:
		pwm_dev->regs->start = 0x1;
		break;
	case PWMIOC_STOP:
		if (pwm_dev->regs->cfg & 0x1 && pwm_dev->regs->cfg & 0x20000)
			pwm_dev->regs->cfg &= 0xFFFFFFFC;
		if (pwm_dev->regs->cfg & 0x2 && pwm_dev->regs->cfg & 0x20000) {
			unsigned long temp;
			temp = pwm_dev->regs->cfg;
			temp &= 0xFFFFFFFC;
			temp |= 0x1;

			/* Enable PWM interrupts */
			pwm_dev->regs->cfg |= 0x40;

			/* wait for the transaction to complete */
			wait_event_timeout(pwm_dev->intr_wait,
					   pwm_dev->intr_complete,
					   DAVINCI_PWM_TIMEOUT);

			if (pwm_dev->intr_complete)
				pwm_dev->regs->cfg = temp;
			else
				return -1;
		}
		break;
	}

	return 0;
}
示例#28
0
文件: rtac.c 项目: Ca1ne/Enoch316
u32 send_voice_apr(u32 mode, void *buf, u32 opcode)
{
	s32	result;
	u32	count = 0;
	u32	bytes_returned = 0;
	u32	payload_size;
	u32	dest_port;
	struct	apr_hdr	voice_params;
	pr_debug("%s\n", __func__);

	if (copy_from_user(&count, (void *)buf, sizeof(count))) {
		pr_err("%s: Copy to user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		result = -EFAULT;
		goto done;
	}

	if (count <= 0) {
		pr_err("%s: Invalid buffer size = %d\n", __func__, count);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(payload_size),
						sizeof(payload_size))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (payload_size > MAX_PAYLOAD_SIZE) {
		pr_err("%s: Invalid payload size = %d\n",
			__func__, payload_size);
		goto done;
	}

	if (copy_from_user(&dest_port, buf + 2 * sizeof(dest_port),
						sizeof(dest_port))) {
		pr_err("%s: Could not copy port id from user buffer\n",
			__func__);
		goto done;
	}

	if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) {
		pr_err("%s: Invalid Mode for APR, mode = %d\n",
			__func__, mode);
		goto done;
	}

	mutex_lock(&rtac_voice_apr_mutex);
	if (rtac_voice_apr_data[mode].apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	
	rtac_voice_user_buf_size = count;

	
	if (copy_from_user(rtac_voice_buffer + sizeof(voice_params),
		buf + 3 * sizeof(u32), payload_size)) {
		pr_err("%s: Could not copy payload from user buffer\n",
			__func__);
		goto err;
	}

	
	voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	voice_params.src_svc = 0;
	voice_params.src_domain = APR_DOMAIN_APPS;
	voice_params.src_port = voice_session_id[
					get_voice_index(mode, dest_port)];
	voice_params.dest_svc = 0;
	voice_params.dest_domain = APR_DOMAIN_MODEM;
	voice_params.dest_port = (u16)dest_port;
	voice_params.token = 0;
	voice_params.opcode = opcode;

	memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
	atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);

	pr_debug("%s: Sending RTAC command size = %d, opcode = %x\n",
		__func__, voice_params.pkt_size, opcode);

	result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle,
					(uint32_t *)rtac_voice_buffer);
	if (result < 0) {
		pr_err("%s: apr_send_pkt failed opcode = %x\n",
			__func__, opcode);
		goto err;
	}
	
	result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait,
		(atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0),
		msecs_to_jiffies(TIMEOUT_MS));
	mutex_unlock(&rtac_voice_apr_mutex);
	if (!result) {
		pr_err("%s: apr_send_pkt timed out opcode = %x\n",
			__func__, opcode);
		goto done;
	}

	if (rtac_voice_payload_size != 0) {
		if (copy_to_user(buf, rtac_voice_buffer,
				rtac_voice_payload_size + sizeof(u32))) {
			pr_err("%s: Could not copy buffer to user,size = %d\n",
						 __func__, payload_size);
			goto done;
		}
	}

	
	if (opcode == VOICE_CMD_GET_PARAM)
		bytes_returned = rtac_voice_payload_size;
	else
		bytes_returned = payload_size;
done:
	return bytes_returned;
err:
	mutex_unlock(&rtac_voice_apr_mutex);
	return bytes_returned;
}
/**
 * fm_event_check - sleep until a condition gets true or a timeout elapses
 * @thiz: the pointer of current object
 * @mask: bitmap in fm_u32
 * @timeout: timeout, in jiffies
 *
 * fm_event_set() has to be called after changing any variable that could
 * change the result of the wait condition.
 *
 * The function returns 0 if the @timeout elapsed, and the remaining
 * jiffies if the condition evaluated to true before the timeout elapsed.
 */
long fm_event_wait_timeout(struct fm_flag_event* thiz, fm_u32 mask, long timeout)
{
    return wait_event_timeout(*((wait_queue_head_t*)(thiz->priv)), ((thiz->flag & mask) == mask), timeout*HZ);
}