Пример #1
0
int adm_matrix_map(int session_id, int path, int num_copps,
			unsigned int *port_id, int copp_id, bool perf_mode)
{
	struct adm_cmd_matrix_map_routings_v5	*route;
	struct adm_session_map_node_v5 *node;
	uint16_t *copps_list;
	int cmd_size = 0;
	int ret = 0, i = 0;
	void *payload = NULL;
	void *matrix_map = NULL;

	/* Assumes port_ids have already been validated during adm_open */
	int index = q6audio_get_port_index(copp_id);
	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d token %d\n",
					__func__, index, copp_id);
		return 0;
	}
	cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
			sizeof(struct adm_session_map_node_v5) +
			(sizeof(uint32_t) * num_copps));
	matrix_map = kzalloc(cmd_size, GFP_KERNEL);
	if (matrix_map == NULL) {
		pr_err("%s: Mem alloc failed\n", __func__);
		ret = -EINVAL;
		return ret;
	}
	route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;

	pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%#x coppid[%d]\n",
		 __func__, session_id, path, num_copps, port_id[0], copp_id);

	route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	route->hdr.pkt_size = cmd_size;
	route->hdr.src_svc = 0;
	route->hdr.src_domain = APR_DOMAIN_APPS;
	route->hdr.src_port = copp_id;
	route->hdr.dest_svc = APR_SVC_ADM;
	route->hdr.dest_domain = APR_DOMAIN_ADSP;
	if (perf_mode) {
		route->hdr.dest_port =
			atomic_read(&this_adm.copp_low_latency_id[index]);
	} else {
		route->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	}
	route->hdr.token = copp_id;
	route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
	route->num_sessions = 1;

	switch (path) {
	case 0x1:
		route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
		break;
	case 0x2:
	case 0x3:
		route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
		break;
	default:
		pr_err("%s: Wrong path set[%d]\n", __func__, path);
		break;
	}
	payload = ((u8 *)matrix_map +
			sizeof(struct adm_cmd_matrix_map_routings_v5));
	node = (struct adm_session_map_node_v5 *)payload;

	node->session_id = session_id;
	node->num_copps = num_copps;
	payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
	copps_list = (uint16_t *)payload;
	for (i = 0; i < num_copps; i++) {
		int tmp;
		port_id[i] = q6audio_convert_virtual_to_portid(port_id[i]);

		tmp = q6audio_get_port_index(port_id[i]);


		if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
			if (perf_mode)
				copps_list[i] =
				atomic_read(&this_adm.copp_low_latency_id[tmp]);
			else
				copps_list[i] =
					atomic_read(&this_adm.copp_id[tmp]);
		}
		else
			continue;
		pr_debug("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
			__func__, i, port_id[i], tmp, copps_list[i] );
	}
	atomic_set(&this_adm.copp_stat[index], 0);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
	if (ret < 0) {
		pr_err("%s: ADM routing for port %#x failed\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_adm.wait[index],
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: ADM cmd Route failed for port %#x\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}

	for (i = 0; i < num_copps; i++)
		send_adm_cal(port_id[i], path, perf_mode);

	for (i = 0; i < num_copps; i++) {
		int tmp, copp_id;
		tmp = afe_get_port_index(port_id[i]);
		if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
			if(!perf_mode)
				copp_id = atomic_read(&this_adm.copp_id[tmp]);
			else
				copp_id =
				atomic_read(&this_adm.copp_low_latency_id[tmp]);
			rtac_add_adm_device(port_id[i],
						copp_id, path, session_id);
			pr_debug("%s, copp_id: %d\n", __func__, copp_id);
		} else {
			pr_debug("%s: Invalid port index %d", __func__, tmp);
		}
	}

fail_cmd:
	kfree(matrix_map);
	return ret;
}
Пример #2
0
int adm_multi_ch_copp_open_v2(int port_id, int path, int rate, int channel_mode,
				int topology, int perfmode, uint16_t bit_width)
{
	struct adm_multi_ch_copp_open_command_v2 open;
	int ret = 0;
	int index;

	pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__,
				port_id, path, rate, channel_mode);

	port_id = afe_convert_virtual_to_portid(port_id);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	
	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {

		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);

		open.hdr.pkt_size =
			sizeof(struct adm_multi_ch_copp_open_command_v2);

		if (perfmode) {
			pr_debug("%s Performance mode", __func__);
			open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3;
			open.flags = ADM_MULTI_CH_COPP_OPEN_PERF_MODE_BIT;
		} else {
			open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V2;
		}

		memset(open.dev_channel_mapping, 0, 8);

		if (channel_mode == 1)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
		} else if (channel_mode == 2) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
		} else if (channel_mode == 4) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
		} else if (channel_mode == 6) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
		} else if (channel_mode == 8) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
			open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
		} else {
			pr_err("%s invalid num_chan %d\n", __func__,
					channel_mode);
			return -EINVAL;
		}
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = port_id;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = port_id;
		open.hdr.token = port_id;

		open.mode = path;
		open.endpoint_id1 = port_id;

		if (this_adm.ec_ref_rx == 0) {
			open.endpoint_id2 = 0xFFFF;
		} else if (this_adm.ec_ref_rx && (path != 1)) {
				open.endpoint_id2 = this_adm.ec_ref_rx;
				this_adm.ec_ref_rx = 0;
		}

		pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
			__func__, open.endpoint_id1, open.endpoint_id2);
		
		if (path == ADM_PATH_PLAYBACK)
			open.topology_id = get_adm_rx_topology();
		else {
			open.topology_id = get_adm_tx_topology();
			if ((open.topology_id ==
				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			    (open.topology_id ==
				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;
		}

		if (open.topology_id  == 0)
			open.topology_id = topology;

		open.channel_config = channel_mode & 0x00FF;
		open.bit_width = bit_width;
		open.rate  = rate;

		pr_debug("%s: channel_config=%d port_id=%d\n",
			__func__, open.channel_config,
			open.endpoint_id1);
		pr_debug("%s: rate=%d topology_id=0x%X\n",
			__func__, open.rate, open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d failed\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		
		ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d\n", __func__,
								port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}
Пример #3
0
int adm_matrix_map(int session_id, int path, int num_copps,
			unsigned int *port_id, int copp_id)
{
	struct adm_routings_command	route;
	int ret = 0, i = 0;
	/* Assumes port_ids have already been validated during adm_open */
	int index = afe_get_port_index(copp_id);

	pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
		 __func__, session_id, path, num_copps, port_id[0]);

	route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	route.hdr.pkt_size = sizeof(route);
	route.hdr.src_svc = 0;
	route.hdr.src_domain = APR_DOMAIN_APPS;
	route.hdr.src_port = copp_id;
	route.hdr.dest_svc = APR_SVC_ADM;
	route.hdr.dest_domain = APR_DOMAIN_ADSP;
	route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	route.hdr.token = copp_id;
	route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
	route.num_sessions = 1;
	route.session[0].id = session_id;
	route.session[0].num_copps = num_copps;

	for (i = 0; i < num_copps; i++) {
		int tmp;
		tmp = afe_get_port_index(port_id[i]);

		pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
			 port_id[i], tmp);

		route.session[0].copp_id[i] =
					atomic_read(&this_adm.copp_id[tmp]);
	}
	if (num_copps % 2)
		route.session[0].copp_id[i] = 0;

	switch (path) {
	case 0x1:
		route.path = AUDIO_RX;
		break;
	case 0x2:
	case 0x3:
		route.path = AUDIO_TX;
		break;
	default:
		pr_err("%s: Wrong path set[%d]\n", __func__, path);
		break;
	}
	atomic_set(&this_adm.copp_stat[index], 0);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
	if (ret < 0) {
		pr_err("%s: ADM routing for port %d failed\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_adm.wait,
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: ADM cmd Route failed for port %d\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}

	for (i = 0; i < num_copps; i++)
		send_adm_cal(port_id[i], path);

	return 0;

fail_cmd:

	return ret;
}
Пример #4
0
static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
{
	s32				result = 0;
	struct adm_set_params_command	adm_params;
	int index = afe_get_port_index(port_id);
	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d portid %d\n",
				__func__, index, port_id);
		return 0;
	}

	pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);

	if (!aud_cal || aud_cal->cal_size == 0) {
		pr_debug("%s: No ADM cal to send for port_id = %d!\n",
			__func__, port_id);
		result = -EINVAL;
		goto done;
	}

	adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		sizeof(adm_params));
	adm_params.hdr.src_svc = APR_SVC_ADM;
	adm_params.hdr.src_domain = APR_DOMAIN_APPS;
	adm_params.hdr.src_port = port_id;
	adm_params.hdr.dest_svc = APR_SVC_ADM;
	adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
	adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	adm_params.hdr.token = port_id;
	adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
	adm_params.payload = aud_cal->cal_paddr;
	adm_params.payload_size = aud_cal->cal_size;

	atomic_set(&this_adm.copp_stat[index], 0);
	pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
		__func__, adm_params.payload, adm_params.payload_size);
	result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
	if (result < 0) {
		pr_err("%s: Set params failed port = %d payload = 0x%x\n",
			__func__, port_id, aud_cal->cal_paddr);
		result = -EINVAL;
		goto done;
	}
	
	result = wait_event_timeout(this_adm.wait,
		atomic_read(&this_adm.copp_stat[index]),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!result) {
		pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
			__func__, port_id, aud_cal->cal_paddr);
		HTC_Q6_BUG();
		result = -EINVAL;
		goto done;
	}

	result = 0;
done:
	return result;
}
Пример #5
0
int adm_disconnect_afe_port(int mode, int session_id, int port_id)
{
	struct adm_cmd_connect_afe_port	cmd;
	int ret = 0;
	int index;

	pr_info("%s: port %d session id:%d mode:%d\n", __func__,
				port_id, session_id, mode);

	port_id = afe_convert_virtual_to_portid(port_id);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}
	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	cmd.hdr.pkt_size = sizeof(cmd);
	cmd.hdr.src_svc = APR_SVC_ADM;
	cmd.hdr.src_domain = APR_DOMAIN_APPS;
	cmd.hdr.src_port = port_id;
	cmd.hdr.dest_svc = APR_SVC_ADM;
	cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
	cmd.hdr.dest_port = port_id;
	cmd.hdr.token = port_id;
	cmd.hdr.opcode = ADM_CMD_DISCONNECT_AFE_PORT;

	cmd.mode = mode;
	cmd.session_id = session_id;
	cmd.afe_port_id = port_id;

	atomic_set(&this_adm.copp_stat[index], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
	if (ret < 0) {
		pr_err("%s:ADM enable for port %d failed\n",
					__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	
	ret = wait_event_timeout(this_adm.wait,
		atomic_read(&this_adm.copp_stat[index]),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s ADM connect AFE failed for port %d\n", __func__,
							port_id);
		HTC_Q6_BUG();
		ret = -EINVAL;
		goto fail_cmd;
	}
	atomic_dec(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}
Пример #6
0
int adm_matrix_map(int session_id, int path, int num_copps,
			unsigned int *port_id, int copp_id)
{
	struct adm_routings_command	route;
	int ret = 0, i = 0;
	
	int index = afe_get_port_index(copp_id);
	int copp_cnt;

	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d token %d\n",
					__func__, index, copp_id);
		return 0;
	}

	pr_info("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
		 __func__, session_id, path, num_copps, port_id[0]);

	route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	route.hdr.pkt_size = sizeof(route);
	route.hdr.src_svc = 0;
	route.hdr.src_domain = APR_DOMAIN_APPS;
	route.hdr.src_port = copp_id;
	route.hdr.dest_svc = APR_SVC_ADM;
	route.hdr.dest_domain = APR_DOMAIN_ADSP;
	route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	route.hdr.token = copp_id;
	route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
	route.num_sessions = 1;
	route.session[0].id = session_id;

	if (num_copps < ADM_MAX_COPPS) {
		copp_cnt = num_copps;
	} else {
		copp_cnt = ADM_MAX_COPPS;
		/* print out warning for now as playback/capture to/from
		 * COPPs more than maximum allowed is extremely unlikely
		 */
		pr_warn("%s: max out routable COPPs\n", __func__);
	}

	route.session[0].num_copps = copp_cnt;
	for (i = 0; i < copp_cnt; i++) {
		int tmp;
		port_id[i] = afe_convert_virtual_to_portid(port_id[i]);

		tmp = afe_get_port_index(port_id[i]);

		pr_info("%s: port_id[%d]: %d, index: %d\n", __func__, i,
			 port_id[i], tmp);

		if (tmp >= 0 && tmp < AFE_MAX_PORTS)
			route.session[0].copp_id[i] =
					atomic_read(&this_adm.copp_id[tmp]);
	}

	if (copp_cnt % 2)
		route.session[0].copp_id[i] = 0;

	switch (path) {
	case 0x1:
		route.path = AUDIO_RX;
		break;
	case 0x2:
	case 0x3:
		route.path = AUDIO_TX;
		break;
	default:
		pr_err("%s: Wrong path set[%d]\n", __func__, path);
		break;
	}
	atomic_set(&this_adm.copp_stat[index], 0);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
	if (ret < 0) {
		pr_err("%s: ADM routing for port %d failed\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_adm.wait,
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: ADM cmd Route failed for port %d\n",
					__func__, port_id[0]);
		HTC_Q6_BUG();
		ret = -EINVAL;
		goto fail_cmd;
	}

	for (i = 0; i < num_copps; i++)
		send_adm_cal(port_id[i], path);

	for (i = 0; i < num_copps; i++) {
		int tmp;
		tmp = afe_get_port_index(port_id[i]);
		if (tmp >= 0 && tmp < AFE_MAX_PORTS)
			rtac_add_adm_device(port_id[i],
				atomic_read(&this_adm.copp_id[tmp]),
				path, session_id);
		else
			pr_debug("%s: Invalid port index %d",
				__func__, tmp);
	}
	return 0;

fail_cmd:

	return ret;
}
Пример #7
0
int adm_close(int port_id)
{
	struct apr_hdr close;

	int ret = 0;
	int index = 0;

	port_id = afe_convert_virtual_to_portid(port_id);

	index = afe_get_port_index(port_id);
	if (afe_validate_port(port_id) < 0)
		return -EINVAL;

	pr_info("%s port_id=%d index %d\n", __func__, port_id, index);

	if (!(atomic_read(&this_adm.copp_cnt[index]))) {
		pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);

		goto fail_cmd;
	}
	atomic_dec(&this_adm.copp_cnt[index]);
	if (!(atomic_read(&this_adm.copp_cnt[index]))) {

		close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		close.pkt_size = sizeof(close);
		close.src_svc = APR_SVC_ADM;
		close.src_domain = APR_DOMAIN_APPS;
		close.src_port = port_id;
		close.dest_svc = APR_SVC_ADM;
		close.dest_domain = APR_DOMAIN_ADSP;
		close.dest_port = atomic_read(&this_adm.copp_id[index]);
		close.token = port_id;
		close.opcode = ADM_CMD_COPP_CLOSE;

		atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
		atomic_set(&this_adm.copp_stat[index], 0);


		pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
				__func__,
				atomic_read(&this_adm.copp_id[index]),
				port_id, index,
				atomic_read(&this_adm.copp_cnt[index]));

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
		if (ret < 0) {
			pr_err("%s ADM close failed\n", __func__);
			ret = -EINVAL;
			goto fail_cmd;
		}

		ret = wait_event_timeout(this_adm.wait,
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s: ADM cmd Route failed for port %d\n",
						__func__, port_id);
			HTC_Q6_BUG();
			ret = -EINVAL;
			goto fail_cmd;
		}

		rtac_remove_adm_device(port_id);
	}

fail_cmd:
	return ret;
}
Пример #8
0
int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
			bool perf_mode, uint16_t bits_per_sample)
{
	struct adm_cmd_device_open_v5	open;
	int ret = 0;
	int index;
	int tmp_port = q6audio_get_port_id(port_id);

	pr_debug("%s: port %#x path:%d rate:%d mode:%d perf_mode:%d\n",
		 __func__, port_id, path, rate, channel_mode, perf_mode);

	port_id = q6audio_convert_virtual_to_portid(port_id);

	if (q6audio_validate_port(port_id) < 0) {
		pr_err("%s port idi[%#x] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = q6audio_get_port_index(port_id);
	pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	if (!perf_mode) {
		atomic_set(&this_adm.copp_perf_mode[index], 0);
		send_adm_custom_topology(port_id);
	} else {
		atomic_set(&this_adm.copp_perf_mode[index], 1);
	}

	/* Create a COPP if port id are not enabled */
	if ((!perf_mode && (atomic_read(&this_adm.copp_cnt[index]) == 0)) ||
		(perf_mode &&
		(atomic_read(&this_adm.copp_low_latency_cnt[index]) == 0))) {
		pr_debug("%s:opening ADM: perf_mode: %d\n", __func__,
			perf_mode);
		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = tmp_port;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = tmp_port;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
		open.flags = 0x00;
		if (perf_mode) {
			open.flags |= ADM_LOW_LATENCY_DEVICE_SESSION <<
				ADM_BIT_SHIFT_DEVICE_PERF_MODE_FLAG;
		} else {
			open.flags |= ADM_LEGACY_DEVICE_SESSION <<
				ADM_BIT_SHIFT_DEVICE_PERF_MODE_FLAG;
		}

		open.mode_of_operation = path;
		open.endpoint_id_1 = tmp_port;

		if (this_adm.ec_ref_rx == -1) {
			open.endpoint_id_2 = 0xFFFF;
		} else if (this_adm.ec_ref_rx && (path != 1)) {
			open.endpoint_id_2 = this_adm.ec_ref_rx;
			this_adm.ec_ref_rx = -1;
		}

		open.topology_id = topology;
		if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			(open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;

		open.dev_num_channel = channel_mode & 0x00FF;
		open.bit_width = bits_per_sample;
		open.sample_rate  = rate;
		memset(open.dev_channel_mapping, 0, 8);

		if (channel_mode == 1)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
		} else if (channel_mode == 2) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
		} else if (channel_mode == 3)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
		} else if (channel_mode == 4) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
		} else if (channel_mode == 5) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
		} else if (channel_mode == 6) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
		} else if (channel_mode == 8) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[6] = PCM_CHANNEL_RLC;
			open.dev_channel_mapping[7] = PCM_CHANNEL_RRC;
		} else {
			pr_err("%s invalid num_chan %d\n", __func__,
					channel_mode);
			return -EINVAL;
		}
		if ((open.dev_num_channel > 2) &&
			multi_ch_map.set_channel_map)
			memcpy(open.dev_channel_mapping,
				multi_ch_map.channel_mapping,
				PCM_FORMAT_MAX_NUM_CHANNEL);

		pr_debug("%s: port_id=%#x rate=%d topology_id=0x%X\n",
			__func__, open.endpoint_id_1, open.sample_rate,
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %#x for[%d] failed\n",
						__func__, tmp_port, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		/* Wait for the callback with copp id */
		ret = wait_event_timeout(this_adm.wait[index],
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %#x for [%d]\n",
						__func__, tmp_port, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	if (perf_mode) {
		atomic_inc(&this_adm.copp_low_latency_cnt[index]);
		pr_debug("%s: index: %d coppid: %d", __func__, index,
			atomic_read(&this_adm.copp_low_latency_id[index]));
	} else {
		atomic_inc(&this_adm.copp_cnt[index]);
		pr_debug("%s: index: %d coppid: %d", __func__, index,
			atomic_read(&this_adm.copp_id[index]));
	}
	return 0;

fail_cmd:

	return ret;
}
Пример #9
0
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
{
	struct afe_port_start_command start;
	struct afe_audioif_config_command config;
	int ret = 0;

	if (!afe_config) {
		pr_err("%s: Error, no configuration data\n", __func__);
		ret = -EINVAL;
		return ret;
	}

	pr_info("%s: %d %d\n", __func__, port_id, rate);

	if (this_afe.apr == NULL) {
		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
					0xFFFFFFFF, &this_afe);
		pr_info("%s: Register AFE\n", __func__);
		if (this_afe.apr == NULL) {
			pr_err("%s: Unable to register AFE\n", __func__);
			ret = -ENODEV;
			return ret;
		}
	}

	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	config.hdr.pkt_size = sizeof(config);
	config.hdr.src_port = 0;
	config.hdr.dest_port = 0;
	config.hdr.token = 0;
	config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	config.port_id = port_id;
	config.port = *afe_config;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	start.hdr.pkt_size = sizeof(start);
	start.hdr.src_port = 0;
	start.hdr.dest_port = 0;
	start.hdr.token = 0;
	start.hdr.opcode = AFE_PORT_CMD_START;
	start.port_id = port_id;
	start.gain = 0x2000;
	start.sample_rate = rate;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	if (this_afe.task != current)
		this_afe.task = current;

	pr_debug("task_name = %s pid = %d\n",
			this_afe.task->comm, this_afe.task->pid);
	return 0;
fail_cmd:
	return ret;
}
Пример #10
0
void send_adm_custom_topology(int port_id)
{
	struct acdb_cal_block		cal_block;
	struct cmd_set_topologies	adm_top;
	int				index;
	int				result;
	int				size = 4096;

	get_adm_custom_topology(&cal_block);
	if (cal_block.cal_size == 0) {
		pr_debug("%s: no cal to send addr= 0x%x\n",
				__func__, cal_block.cal_paddr);
		goto done;
	}

	index = afe_get_port_index(port_id);
	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d portid %#x\n",
				__func__, index, port_id);
		goto done;
	}

	if (this_adm.set_custom_topology) {
		/* specific index 4 for adm topology memory */
		atomic_set(&this_adm.mem_map_cal_index, ADM_CUSTOM_TOP_CAL);

		/* Only call this once */
		this_adm.set_custom_topology = 0;

		result = adm_memory_map_regions(GLOBAL_CFG,
				&cal_block.cal_paddr, 0, &size, 1);
		if (result < 0) {
			pr_err("%s: mmap did not work! addr = 0x%x, size = %d\n",
				__func__, cal_block.cal_paddr,
			       cal_block.cal_size);
			goto done;
		}
	}


	adm_top.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	adm_top.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		sizeof(adm_top));
	adm_top.hdr.src_svc = APR_SVC_ADM;
	adm_top.hdr.src_domain = APR_DOMAIN_APPS;
	adm_top.hdr.src_port = port_id;
	adm_top.hdr.dest_svc = APR_SVC_ADM;
	adm_top.hdr.dest_domain = APR_DOMAIN_ADSP;
	adm_top.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	adm_top.hdr.token = port_id;
	adm_top.hdr.opcode = ADM_CMD_ADD_TOPOLOGIES;
	adm_top.payload_addr_lsw = cal_block.cal_paddr;
	adm_top.payload_addr_msw = 0;
	adm_top.mem_map_handle =
		atomic_read(&this_adm.mem_map_cal_handles[ADM_CUSTOM_TOP_CAL]);
	adm_top.payload_size = cal_block.cal_size;

	atomic_set(&this_adm.copp_stat[index], 0);
	pr_debug("%s: Sending ADM_CMD_ADD_TOPOLOGIES payload = 0x%x, size = %d\n",
		__func__, adm_top.payload_addr_lsw,
		adm_top.payload_size);
	result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_top);
	if (result < 0) {
		pr_err("%s: Set topologies failed port = 0x%x payload = 0x%x\n",
			__func__, port_id, cal_block.cal_paddr);
		goto done;
	}
	/* Wait for the callback */
	result = wait_event_timeout(this_adm.wait[index],
		atomic_read(&this_adm.copp_stat[index]),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!result) {
		pr_err("%s: Set topologies timed out port = 0x%x, payload = 0x%x\n",
			__func__, port_id, cal_block.cal_paddr);
		goto done;
	}

done:
	return;
}
Пример #11
0
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
{
	struct adm_cmd_set_pp_params_inband_v5 *adm_params = NULL;
	int ret = 0, sz = 0;
	int index;

	pr_debug("SRS - %s", __func__);
	switch (srs_tech_id) {
	case SRS_ID_GLOBAL: {
		struct srs_trumedia_params_GLOBAL *glb_params = NULL;
		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
			sizeof(struct srs_trumedia_params_GLOBAL);
		adm_params = kzalloc(sz, GFP_KERNEL);
		if (!adm_params) {
			pr_err("%s, adm params memory alloc failed\n",
				__func__);
			return -ENOMEM;
		}
		adm_params->payload_size =
			sizeof(struct srs_trumedia_params_GLOBAL) +
			sizeof(struct adm_param_data_v5);
		adm_params->params.param_id = SRS_TRUMEDIA_PARAMS;
		adm_params->params.param_size =
				sizeof(struct srs_trumedia_params_GLOBAL);
		glb_params = (struct srs_trumedia_params_GLOBAL *)
			((u8 *)adm_params +
			sizeof(struct adm_cmd_set_pp_params_inband_v5));
		memcpy(glb_params, srs_params,
			sizeof(struct srs_trumedia_params_GLOBAL));
		pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
				__func__, (int)glb_params->v1,
				(int)glb_params->v2, (int)glb_params->v3,
				(int)glb_params->v4, (int)glb_params->v5,
				(int)glb_params->v6, (int)glb_params->v7,
				(int)glb_params->v8);
		break;
	}
	case SRS_ID_WOWHD: {
		struct srs_trumedia_params_WOWHD *whd_params = NULL;
		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
			sizeof(struct srs_trumedia_params_WOWHD);
		adm_params = kzalloc(sz, GFP_KERNEL);
		if (!adm_params) {
			pr_err("%s, adm params memory alloc failed\n",
				__func__);
			return -ENOMEM;
		}
		adm_params->payload_size =
			sizeof(struct srs_trumedia_params_WOWHD) +
			sizeof(struct adm_param_data_v5);
		adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
		adm_params->params.param_size =
				sizeof(struct srs_trumedia_params_WOWHD);
		whd_params = (struct srs_trumedia_params_WOWHD *)
			((u8 *)adm_params +
			sizeof(struct adm_cmd_set_pp_params_inband_v5));
		memcpy(whd_params, srs_params,
				sizeof(struct srs_trumedia_params_WOWHD));
		pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x, 10 = %x, 11 = %x\n",
			 __func__, (int)whd_params->v1,
			(int)whd_params->v2, (int)whd_params->v3,
			(int)whd_params->v4, (int)whd_params->v5,
			(int)whd_params->v6, (int)whd_params->v7,
			(int)whd_params->v8, (int)whd_params->v9,
			(int)whd_params->v10, (int)whd_params->v11);
		break;
	}
	case SRS_ID_CSHP: {
		struct srs_trumedia_params_CSHP *chp_params = NULL;
		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
			sizeof(struct srs_trumedia_params_CSHP);
		adm_params = kzalloc(sz, GFP_KERNEL);
		if (!adm_params) {
			pr_err("%s, adm params memory alloc failed\n",
				__func__);
			return -ENOMEM;
		}
		adm_params->payload_size =
			sizeof(struct srs_trumedia_params_CSHP) +
			sizeof(struct adm_param_data_v5);
		adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
		adm_params->params.param_size =
				sizeof(struct srs_trumedia_params_CSHP);
		chp_params = (struct srs_trumedia_params_CSHP *)
			((u8 *)adm_params +
			sizeof(struct adm_cmd_set_pp_params_inband_v5));
		memcpy(chp_params, srs_params,
				sizeof(struct srs_trumedia_params_CSHP));
		pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x\n",
				__func__, (int)chp_params->v1,
				(int)chp_params->v2, (int)chp_params->v3,
				(int)chp_params->v4, (int)chp_params->v5,
				(int)chp_params->v6, (int)chp_params->v7,
				(int)chp_params->v8, (int)chp_params->v9);
		break;
	}
	case SRS_ID_HPF: {
		struct srs_trumedia_params_HPF *hpf_params = NULL;
		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
			sizeof(struct srs_trumedia_params_HPF);
		adm_params = kzalloc(sz, GFP_KERNEL);
		if (!adm_params) {
			pr_err("%s, adm params memory alloc failed\n",
				__func__);
			return -ENOMEM;
		}
		adm_params->payload_size =
			sizeof(struct srs_trumedia_params_HPF) +
			sizeof(struct adm_param_data_v5);
		adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
		adm_params->params.param_size =
				sizeof(struct srs_trumedia_params_HPF);
		hpf_params = (struct srs_trumedia_params_HPF *)
			((u8 *)adm_params +
			sizeof(struct adm_cmd_set_pp_params_inband_v5));
		memcpy(hpf_params, srs_params,
			sizeof(struct srs_trumedia_params_HPF));
		pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__,
				(int)hpf_params->v1);
		break;
	}
	case SRS_ID_PEQ: {
		struct srs_trumedia_params_PEQ *peq_params = NULL;
		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
			sizeof(struct srs_trumedia_params_PEQ);
		adm_params = kzalloc(sz, GFP_KERNEL);
		if (!adm_params) {
			pr_err("%s, adm params memory alloc failed\n",
				__func__);
			return -ENOMEM;
		}
		adm_params->payload_size =
				sizeof(struct srs_trumedia_params_PEQ) +
				sizeof(struct adm_param_data_v5);
		adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
		adm_params->params.param_size =
				sizeof(struct srs_trumedia_params_PEQ);
		peq_params = (struct srs_trumedia_params_PEQ *)
			((u8 *)adm_params +
			sizeof(struct adm_cmd_set_pp_params_inband_v5));
		memcpy(peq_params, srs_params,
				sizeof(struct srs_trumedia_params_PEQ));
		pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x, 4 = %x\n",
			__func__, (int)peq_params->v1,
			(int)peq_params->v2, (int)peq_params->v3,
			(int)peq_params->v4);
		break;
	}
	case SRS_ID_HL: {
		struct srs_trumedia_params_HL *hl_params = NULL;
		sz = sizeof(struct adm_cmd_set_pp_params_inband_v5) +
			sizeof(struct srs_trumedia_params_HL);
		adm_params = kzalloc(sz, GFP_KERNEL);
		if (!adm_params) {
			pr_err("%s, adm params memory alloc failed\n",
				__func__);
			return -ENOMEM;
		}
		adm_params->payload_size =
			sizeof(struct srs_trumedia_params_HL) +
			sizeof(struct adm_param_data_v5);
		adm_params->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
		adm_params->params.param_size =
			sizeof(struct srs_trumedia_params_HL);
		hl_params = (struct srs_trumedia_params_HL *)
			((u8 *)adm_params +
			sizeof(struct adm_cmd_set_pp_params_inband_v5));
		memcpy(hl_params, srs_params,
				sizeof(struct srs_trumedia_params_HL));
		pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x, 5 = %x, 6 = %x, 7 = %x\n",
				__func__, (int)hl_params->v1,
				(int)hl_params->v2, (int)hl_params->v3,
				(int)hl_params->v4, (int)hl_params->v5,
				(int)hl_params->v6, (int)hl_params->v7);
		break;
	}
	default:
		goto fail_cmd;
	}

	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	adm_params->hdr.pkt_size = sz;
	adm_params->hdr.src_svc = APR_SVC_ADM;
	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
	adm_params->hdr.src_port = port_id;
	adm_params->hdr.dest_svc = APR_SVC_ADM;
	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
	index = afe_get_port_index(port_id);
	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d portid %#x\n",
				__func__, index, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	adm_params->hdr.token = port_id;
	adm_params->hdr.opcode = ADM_CMD_SET_PP_PARAMS_V5;
	adm_params->payload_addr_lsw = 0;
	adm_params->payload_addr_msw = 0;
	adm_params->mem_map_handle = 0;

	adm_params->params.module_id = SRS_TRUMEDIA_MODULE_ID;
	adm_params->params.reserved = 0;

	pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d, size %d, module id %x, param id %x.\n",
			__func__, adm_params->hdr.dest_port,
			adm_params->payload_size, adm_params->params.module_id,
			adm_params->params.param_id);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
	if (ret < 0) {
		pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
			port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	/* Wait for the callback with copp id */
	ret = wait_event_timeout(this_adm.wait[index], 1,
			msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: SRS set params timed out port = %d\n",
			__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

fail_cmd:
	kfree(adm_params);
	return ret;
}
Пример #12
0
int adm_dolby_dap_get_params(int port_id, uint32_t module_id, uint32_t param_id,
				uint32_t params_length, char *params)
{
	struct adm_cmd_get_pp_params_v5	*adm_params = NULL;
	int sz, rc = 0, i = 0, index = afe_get_port_index(port_id);
	int *params_data = (int *)params;

	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d portid %#x\n",
			__func__, index, port_id);
		return -EINVAL;
	}
	sz = sizeof(struct adm_cmd_set_pp_params_v5) + params_length;
	adm_params = kzalloc(sz, GFP_KERNEL);
	if (!adm_params) {
		pr_err("%s, adm params memory alloc failed", __func__);
		return -ENOMEM;
	}

	memcpy(((u8 *)adm_params + sizeof(struct adm_cmd_set_pp_params_v5)),
			params, params_length);
	adm_params->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	adm_params->hdr.pkt_size = sz;
	adm_params->hdr.src_svc = APR_SVC_ADM;
	adm_params->hdr.src_domain = APR_DOMAIN_APPS;
	adm_params->hdr.src_port = port_id;
	adm_params->hdr.dest_svc = APR_SVC_ADM;
	adm_params->hdr.dest_domain = APR_DOMAIN_ADSP;
	adm_params->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	adm_params->hdr.token = port_id;
	adm_params->hdr.opcode = ADM_CMD_GET_PP_PARAMS_V5;
	adm_params->data_payload_addr_lsw = 0;
	adm_params->data_payload_addr_msw = 0;
	adm_params->mem_map_handle = 0;
	adm_params->module_id = module_id;
	adm_params->param_id = param_id;
	adm_params->param_max_size = params_length;
	adm_params->reserved = 0;

	atomic_set(&this_adm.copp_stat[index], 0);
	rc = apr_send_pkt(this_adm.apr, (uint32_t *)adm_params);
	if (rc < 0) {
		pr_err("%s: Failed to Get DOLBY Params on port %d\n", __func__,
			port_id);
		rc = -EINVAL;
		goto dolby_dap_get_param_return;
	}
	/* Wait for the callback with copp id */
	rc = wait_event_timeout(this_adm.wait[index],
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
	if (!rc) {
		pr_err("%s: DOLBY get params timed out port = %d\n", __func__,
			port_id);
		rc = -EINVAL;
		goto dolby_dap_get_param_return;
	}
	if (params_data) {
		for (i = 0; i < adm_dolby_get_parameters[0]; i++)
			params_data[i] = adm_dolby_get_parameters[1+i];
	}
	rc = 0;
dolby_dap_get_param_return:
	kfree(adm_params);
	return rc;
}
Пример #13
0
int adm_close(int port_id, bool perf_mode)
{
	struct apr_hdr close;

	int ret = 0;
	int index = 0;
	int copp_id = RESET_COPP_ID;

	port_id = q6audio_convert_virtual_to_portid(port_id);

	index = q6audio_get_port_index(port_id);
	if (q6audio_validate_port(port_id) < 0)
		return -EINVAL;

	pr_debug("%s port_id=%#x index %d perf_mode: %d\n", __func__, port_id,
		index, perf_mode);

	if (perf_mode) {
		if (!(atomic_read(&this_adm.copp_low_latency_cnt[index]))) {
			pr_err("%s: copp count for port[%#x]is 0\n", __func__,
				port_id);
			goto fail_cmd;
		}
		atomic_dec(&this_adm.copp_low_latency_cnt[index]);
	} else {
		if (!(atomic_read(&this_adm.copp_cnt[index]))) {
			pr_err("%s: copp count for port[%#x]is 0\n", __func__,
				port_id);
			goto fail_cmd;
		}
		atomic_dec(&this_adm.copp_cnt[index]);
	}
	if ((!perf_mode && !(atomic_read(&this_adm.copp_cnt[index]))) ||
		(perf_mode &&
		!(atomic_read(&this_adm.copp_low_latency_cnt[index])))) {

		pr_debug("%s:Closing ADM: perf_mode: %d\n", __func__,
				perf_mode);
		close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		close.pkt_size = sizeof(close);
		close.src_svc = APR_SVC_ADM;
		close.src_domain = APR_DOMAIN_APPS;
		close.src_port = port_id;
		close.dest_svc = APR_SVC_ADM;
		close.dest_domain = APR_DOMAIN_ADSP;
		if (perf_mode)
			close.dest_port =
			     atomic_read(&this_adm.copp_low_latency_id[index]);
		else
			close.dest_port = atomic_read(&this_adm.copp_id[index]);
		close.token = port_id;
		close.opcode = ADM_CMD_DEVICE_CLOSE_V5;

		atomic_set(&this_adm.copp_stat[index], 0);

		if (perf_mode) {
			copp_id = atomic_read(&this_adm.copp_low_latency_id[index]);
			pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
				__func__,
				copp_id,
				port_id, index,
				atomic_read(&this_adm.copp_low_latency_cnt[index]));
			atomic_set(&this_adm.copp_low_latency_id[index],
				RESET_COPP_ID);
		} else {
			copp_id = atomic_read(&this_adm.copp_id[index]);
			pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
				__func__,
				copp_id,
				port_id, index,
				atomic_read(&this_adm.copp_cnt[index]));
			atomic_set(&this_adm.copp_id[index],
				RESET_COPP_ID);
		}

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
		if (ret < 0) {
			pr_err("%s ADM close failed\n", __func__);
			ret = -EINVAL;
			goto fail_cmd;
		}

		ret = wait_event_timeout(this_adm.wait[index],
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s: ADM cmd Route failed for port %#x\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}

	pr_debug("%s: remove adm device from rtac\n", __func__);
	rtac_remove_adm_device(port_id, copp_id);

fail_cmd:
	return ret;
}
Пример #14
0
int adm_memory_map_regions(int port_id,
		uint32_t *buf_add, uint32_t mempool_id,
		uint32_t *bufsz, uint32_t bufcnt)
{
	struct  avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
	struct  avs_shared_map_region_payload *mregions = NULL;
	void    *mmap_region_cmd = NULL;
	void    *payload = NULL;
	int     ret = 0;
	int     i = 0;
	int     cmd_size = 0;
	int     index = 0;

	pr_debug("%s\n", __func__);
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	port_id = q6audio_convert_virtual_to_portid(port_id);

	if (q6audio_validate_port(port_id) < 0) {
		pr_err("%s port id[%#x] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = q6audio_get_port_index(port_id);

	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
			+ sizeof(struct avs_shared_map_region_payload)
			* bufcnt;

	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
	if (!mmap_region_cmd) {
		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
		return -ENOMEM;
	}
	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
						APR_HDR_LEN(APR_HDR_SIZE),
								APR_PKT_VER);
	mmap_regions->hdr.pkt_size = cmd_size;
	mmap_regions->hdr.src_port = 0;
	mmap_regions->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	mmap_regions->hdr.token = port_id;
	mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
	mmap_regions->num_regions = bufcnt & 0x00ff;
	mmap_regions->property_flag = 0x00;

	pr_debug("%s: map_regions->num_regions = %d\n", __func__,
				mmap_regions->num_regions);
	payload = ((u8 *) mmap_region_cmd +
				sizeof(struct avs_cmd_shared_mem_map_regions));
	mregions = (struct avs_shared_map_region_payload *)payload;

	for (i = 0; i < bufcnt; i++) {
		mregions->shm_addr_lsw = buf_add[i];
		mregions->shm_addr_msw = 0x00;
		mregions->mem_size_bytes = bufsz[i];
		++mregions;
	}

	atomic_set(&this_adm.copp_stat[index], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
	if (ret < 0) {
		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
					mmap_regions->hdr.opcode, ret);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_adm.wait[index],
			atomic_read(&this_adm.copp_stat[index]), 5 * HZ);
	if (!ret) {
		pr_err("%s: timeout. waited for memory_map\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	kfree(mmap_region_cmd);
	return ret;
}
Пример #15
0
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
{
	struct afe_port_start_command start;
	struct afe_audioif_config_command config;
	int ret = 0;

	if (!afe_config) {
		pr_err("%s: Error, no configuration data\n", __func__);
		ret = -EINVAL;
		return ret;
	}

	pr_info("%s: %d %d\n", __func__, port_id, rate);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX))
		return -EINVAL;
	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX))
		port_id = VIRTUAL_ID_TO_PORTID(port_id);

	ret = afe_q6_interface_prepare();
	if (ret != 0)
		return ret;

	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
	config.hdr.src_port = 0;
	config.hdr.dest_port = 0;
	config.hdr.token = 0;
	config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	config.port_id = port_id;
	config.port = *afe_config;

	atomic_set(&this_afe.state, 1);
	atomic_set(&this_afe.status, 0);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	if (atomic_read(&this_afe.status) != 0) {
		pr_err("%s: config cmd failed\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	start.hdr.pkt_size = sizeof(start);
	start.hdr.src_port = 0;
	start.hdr.dest_port = 0;
	start.hdr.token = 0;
	start.hdr.opcode = AFE_PORT_CMD_START;
	start.port_id = port_id;
	start.gain = 0x2000;
	start.sample_rate = rate;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	if (this_afe.task != current)
		this_afe.task = current;

	pr_debug("task_name = %s pid = %d\n",
			this_afe.task->comm, this_afe.task->pid);
	return 0;
fail_cmd:
	return ret;
}
Пример #16
0
u32 send_adm_apr(void *buf, u32 opcode)
{
	s32	result;
	u32	count = 0;
	u32	bytes_returned = 0;
	u32	port_index = 0;
	u32	copp_id;
	u32	payload_size;
	struct apr_hdr	adm_params;
	pr_debug("%s\n", __func__);

	if (copy_from_user(&count, (void *)buf, sizeof(count))) {
		pr_err("%s: Copy to user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		result = -EFAULT;
		goto done;
	}

	if (count <= 0) {
		pr_err("%s: Invalid buffer size = %d\n", __func__, count);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}


	if (payload_size > MAX_PAYLOAD_SIZE) {
		pr_err("%s: Invalid payload size = %d\n",
			__func__, payload_size);
		goto done;
	}

	if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy port id from user buffer\n",
			__func__);
		goto done;
	}

	for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) {
		if (adm_get_copp_id(port_index) == copp_id)
			break;
	}
	if (port_index >= AFE_MAX_PORTS) {
		pr_err("%s: Could not find port index for copp = %d\n",
		       __func__, copp_id);
		goto done;
	}

	mutex_lock(&rtac_adm_apr_mutex);
	if (rtac_adm_apr_data.apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	
	rtac_adm_user_buf_size = count;
	
	if (copy_from_user(rtac_adm_buffer + sizeof(adm_params),
			buf + 3 * sizeof(u32), payload_size)) {
		pr_err("%s: Could not copy payload from user buffer\n",
			__func__);
		goto err;
	}

	
	adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	adm_params.src_svc = APR_SVC_ADM;
	adm_params.src_domain = APR_DOMAIN_APPS;
	adm_params.src_port = copp_id;
	adm_params.dest_svc = APR_SVC_ADM;
	adm_params.dest_domain = APR_DOMAIN_ADSP;
	adm_params.dest_port = copp_id;
	adm_params.token = copp_id;
	adm_params.opcode = opcode;

	memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params));
	atomic_set(&rtac_adm_apr_data.cmd_state, 1);

	pr_debug("%s: Sending RTAC command size = %d\n",
		__func__, adm_params.pkt_size);

	result = apr_send_pkt(rtac_adm_apr_data.apr_handle,
				(uint32_t *)rtac_adm_buffer);
	if (result < 0) {
		pr_err("%s: Set params failed port = %d, copp = %d\n",
			__func__, port_index, copp_id);
		goto err;
	}
	
	result = wait_event_timeout(rtac_adm_apr_data.cmd_wait,
		(atomic_read(&rtac_adm_apr_data.cmd_state) == 0),
		msecs_to_jiffies(TIMEOUT_MS));
	mutex_unlock(&rtac_adm_apr_mutex);
	if (!result) {
		pr_err("%s: Set params timed out port = %d, copp = %d\n",
			__func__, port_index, copp_id);
		goto done;
	}

	if (rtac_adm_payload_size != 0) {
		if (copy_to_user(buf, rtac_adm_buffer,
			rtac_adm_payload_size + sizeof(u32))) {
			pr_err("%s: Could not copy buffer to user, size = %d\n",
				 __func__, payload_size);
			goto done;
		}
	}

	/* Return data written for SET & data read for GET */
	if (opcode == ADM_CMD_GET_PARAMS)
		bytes_returned = rtac_adm_payload_size;
	else
		bytes_returned = payload_size;
done:
	return bytes_returned;
err:
	mutex_unlock(&rtac_adm_apr_mutex);
	return bytes_returned;
}
Пример #17
0
int afe_loopback_gain(u16 port_id, u16 volume)
{
	struct afe_port_cmd_set_param set_param;
	int ret = 0;

	if (this_afe.apr == NULL) {
		this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
					0xFFFFFFFF, &this_afe);
		pr_debug("%s: Register AFE\n", __func__);
		if (this_afe.apr == NULL) {
			pr_err("%s: Unable to register AFE\n", __func__);
			ret = -ENODEV;
			return ret;
		}
	}

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	/* RX ports numbers are even .TX ports numbers are odd. */
	if (port_id % 2 == 0) {
		pr_err("%s: Failed : afe loopback gain only for TX ports."
			" port_id %d\n", __func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	pr_debug("%s: %d %hX\n", __func__, port_id, volume);

	set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	set_param.hdr.pkt_size = sizeof(set_param);
	set_param.hdr.src_port = 0;
	set_param.hdr.dest_port = 0;
	set_param.hdr.token = 0;
	set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM;

	set_param.port_id		= port_id;
	set_param.payload_size		= sizeof(struct afe_param_payload);
	set_param.payload_address	= 0;

	set_param.payload.module_id	= AFE_MODULE_ID_PORT_INFO;
	set_param.payload.param_id	= AFE_PARAM_ID_LOOPBACK_GAIN;
	set_param.payload.param_size = sizeof(struct afe_param_loopback_gain);
	set_param.payload.reserved	= 0;

	set_param.payload.param.loopback_gain.gain		= volume;
	set_param.payload.param.loopback_gain.reserved	= 0;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param);
	if (ret < 0) {
		pr_err("%s: AFE param set failed for port %d\n",
					__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
		(atomic_read(&this_afe.state) == 0),
			msecs_to_jiffies(TIMEOUT_MS));
	if (ret < 0) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	return 0;
fail_cmd:
	return ret;
}
Пример #18
0
u32 send_rtac_asm_apr(void *buf, u32 opcode)
{
	s32	result;
	u32	count = 0;
	u32	bytes_returned = 0;
	u32	session_id = 0;
	u32	payload_size;
	struct apr_hdr	asm_params;
	pr_debug("%s\n", __func__);

	if (copy_from_user(&count, (void *)buf, sizeof(count))) {
		pr_err("%s: Copy to user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		result = -EFAULT;
		goto done;
	}

	if (count <= 0) {
		pr_err("%s: Invalid buffer size = %d\n", __func__, count);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (payload_size > MAX_PAYLOAD_SIZE) {
		pr_err("%s: Invalid payload size = %d\n",
			__func__, payload_size);
		goto done;
	}

	if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) {
		pr_err("%s: Could not copy session id from user buffer\n",
			__func__);
		goto done;
	}
	if (session_id > (SESSION_MAX + 1)) {
		pr_err("%s: Invalid Session = %d\n", __func__, session_id);
		goto done;
	}

	mutex_lock(&rtac_asm_apr_mutex);
	if (session_id < SESSION_MAX+1) {
		if (rtac_asm_apr_data[session_id].apr_handle == NULL) {
			pr_err("%s: APR not initialized\n", __func__);
			goto err;
		}
	}

	
	rtac_asm_user_buf_size = count;

	
	if (copy_from_user(rtac_asm_buffer + sizeof(asm_params),
		buf + 3 * sizeof(u32), payload_size)) {
		pr_err("%s: Could not copy payload from user buffer\n",
			__func__);
		goto err;
	}

	
	asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	asm_params.src_svc = q6asm_get_apr_service_id(session_id);
	asm_params.src_domain = APR_DOMAIN_APPS;
	asm_params.src_port = (session_id << 8) | 0x0001;
	asm_params.dest_svc = APR_SVC_ASM;
	asm_params.dest_domain = APR_DOMAIN_ADSP;
	asm_params.dest_port = (session_id << 8) | 0x0001;
	asm_params.token = session_id;
	asm_params.opcode = opcode;

	memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params));
	if (session_id < SESSION_MAX+1)
		atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1);

	pr_debug("%s: Sending RTAC command size = %d, session_id=%d\n",
		__func__, asm_params.pkt_size, session_id);

	result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle,
				(uint32_t *)rtac_asm_buffer);
	if (result < 0) {
		pr_err("%s: Set params failed session = %d\n",
			__func__, session_id);
		goto err;
	}

	
	result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait,
		(atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0),
		5 * HZ);
	mutex_unlock(&rtac_asm_apr_mutex);
	if (!result) {
		pr_err("%s: Set params timed out session = %d\n",
			__func__, session_id);
		goto done;
	}

	if (rtac_asm_payload_size != 0) {
		if (copy_to_user(buf, rtac_asm_buffer,
			rtac_asm_payload_size + sizeof(u32))) {
			pr_err("%s: Could not copy buffer to user,size = %d\n",
				 __func__, payload_size);
			goto done;
		}
	}

	/* Return data written for SET & data read for GET */
	if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS)
		bytes_returned = rtac_asm_payload_size;
	else
		bytes_returned = payload_size;
done:
	return bytes_returned;
err:
	mutex_unlock(&rtac_asm_apr_mutex);
	return bytes_returned;
}
Пример #19
0
int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
						uint32_t bufcnt)
{
	struct  adm_cmd_memory_unmap_regions *unmap_regions = NULL;
	struct  adm_memory_unmap_regions *mregions = NULL;
	void    *unmap_region_cmd = NULL;
	void    *payload = NULL;
	int     ret = 0;
	int     i = 0;
	int     cmd_size = 0;

	pr_debug("%s\n", __func__);

	if (this_adm.apr == NULL) {
		pr_err("%s APR handle NULL\n", __func__);
		return -EINVAL;
	}

	cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
			+ sizeof(struct adm_memory_unmap_regions) * bufcnt;

	unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
	if (!unmap_region_cmd) {
		pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
		return -ENOMEM;
	}
	unmap_regions = (struct adm_cmd_memory_unmap_regions *)
						unmap_region_cmd;
	unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
						APR_HDR_LEN(APR_HDR_SIZE),
							APR_PKT_VER);
	unmap_regions->hdr.pkt_size = cmd_size;
	unmap_regions->hdr.src_port = 0;
	unmap_regions->hdr.dest_port = 0;
	unmap_regions->hdr.token = 0;
	unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
	unmap_regions->nregions = bufcnt & 0x00ff;
	unmap_regions->reserved = 0;
	pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
				unmap_regions->nregions);
	payload = ((u8 *) unmap_region_cmd +
			sizeof(struct adm_cmd_memory_unmap_regions));
	mregions = (struct adm_memory_unmap_regions *)payload;

	for (i = 0; i < bufcnt; i++) {
		mregions->phys = buf_add[i];
		++mregions;
	}
	atomic_set(&this_adm.copp_stat[0], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
	if (ret < 0) {
		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
				unmap_regions->hdr.opcode, ret);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
	if (!ret) {
		pr_err("%s: timeout. waited for memory_unmap\n", __func__);
		HTC_Q6_BUG();
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	kfree(unmap_region_cmd);
	return ret;
}
Пример #20
0
u32 send_voice_apr(u32 mode, void *buf, u32 opcode)
{
	s32	result;
	u32	count = 0;
	u32	bytes_returned = 0;
	u32	payload_size;
	u32	dest_port;
	struct	apr_hdr	voice_params;
	pr_debug("%s\n", __func__);

	if (copy_from_user(&count, (void *)buf, sizeof(count))) {
		pr_err("%s: Copy to user failed! buf = 0x%x\n",
		       __func__, (unsigned int)buf);
		result = -EFAULT;
		goto done;
	}

	if (count <= 0) {
		pr_err("%s: Invalid buffer size = %d\n", __func__, count);
		goto done;
	}

	if (copy_from_user(&payload_size, buf + sizeof(payload_size),
						sizeof(payload_size))) {
		pr_err("%s: Could not copy payload size from user buffer\n",
			__func__);
		goto done;
	}

	if (payload_size > MAX_PAYLOAD_SIZE) {
		pr_err("%s: Invalid payload size = %d\n",
			__func__, payload_size);
		goto done;
	}

	if (copy_from_user(&dest_port, buf + 2 * sizeof(dest_port),
						sizeof(dest_port))) {
		pr_err("%s: Could not copy port id from user buffer\n",
			__func__);
		goto done;
	}

	if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) {
		pr_err("%s: Invalid Mode for APR, mode = %d\n",
			__func__, mode);
		goto done;
	}

	mutex_lock(&rtac_voice_apr_mutex);
	if (rtac_voice_apr_data[mode].apr_handle == NULL) {
		pr_err("%s: APR not initialized\n", __func__);
		goto err;
	}

	
	rtac_voice_user_buf_size = count;

	
	if (copy_from_user(rtac_voice_buffer + sizeof(voice_params),
		buf + 3 * sizeof(u32), payload_size)) {
		pr_err("%s: Could not copy payload from user buffer\n",
			__func__);
		goto err;
	}

	
	voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
		APR_HDR_LEN(20), APR_PKT_VER);
	voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
		payload_size);
	voice_params.src_svc = 0;
	voice_params.src_domain = APR_DOMAIN_APPS;
	voice_params.src_port = voice_session_id[
					get_voice_index(mode, dest_port)];
	voice_params.dest_svc = 0;
	voice_params.dest_domain = APR_DOMAIN_MODEM;
	voice_params.dest_port = (u16)dest_port;
	voice_params.token = 0;
	voice_params.opcode = opcode;

	memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
	atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);

	pr_debug("%s: Sending RTAC command size = %d, opcode = %x\n",
		__func__, voice_params.pkt_size, opcode);

	result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle,
					(uint32_t *)rtac_voice_buffer);
	if (result < 0) {
		pr_err("%s: apr_send_pkt failed opcode = %x\n",
			__func__, opcode);
		goto err;
	}
	
	result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait,
		(atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0),
		msecs_to_jiffies(TIMEOUT_MS));
	mutex_unlock(&rtac_voice_apr_mutex);
	if (!result) {
		pr_err("%s: apr_send_pkt timed out opcode = %x\n",
			__func__, opcode);
		goto done;
	}

	if (rtac_voice_payload_size != 0) {
		if (copy_to_user(buf, rtac_voice_buffer,
				rtac_voice_payload_size + sizeof(u32))) {
			pr_err("%s: Could not copy buffer to user,size = %d\n",
						 __func__, payload_size);
			goto done;
		}
	}

	/* Return data written for SET & data read for GET */
	if (opcode == VOICE_CMD_GET_PARAM)
		bytes_returned = rtac_voice_payload_size;
	else
		bytes_returned = payload_size;
done:
	return bytes_returned;
err:
	mutex_unlock(&rtac_voice_apr_mutex);
	return bytes_returned;
}
Пример #21
0
int q6adm_enable_effect(int port_id, uint32_t module_id, uint32_t param_id,
		uint32_t payload_size, void *payload)
{
	void *q6_cmd = NULL;
	void *data = NULL;
	struct asm_pp_params_command *cmd = NULL;
	int ret = 0, sz = 0;

	pr_info("%s: param_id 0x%x, payload size %d\n",
			__func__, param_id, payload_size);
	sz = sizeof(struct asm_pp_params_command) + payload_size;
	q6_cmd = kzalloc(sz, GFP_KERNEL);
	if (q6_cmd == NULL) {
		pr_err("%s[%d]: Mem alloc failed\n",
			   __func__, port_id);
		return -ENOMEM;
	}

	cmd = (struct asm_pp_params_command *)q6_cmd;
	cmd->payload = NULL;
	cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) + payload_size;

	cmd->params.module_id = module_id;
	cmd->params.param_id = param_id;
	cmd->params.param_size = payload_size;
	cmd->params.reserved = 0;

	cmd->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	cmd->hdr.pkt_size = sz;
	cmd->hdr.src_svc = APR_SVC_ADM;
	cmd->hdr.src_domain = APR_DOMAIN_APPS;
	cmd->hdr.src_port = port_id;
	cmd->hdr.dest_svc = APR_SVC_ADM;
	cmd->hdr.dest_domain = APR_DOMAIN_ADSP;
	cmd->hdr.dest_port = atomic_read(&this_adm.copp_id[port_id]);
	cmd->hdr.token = port_id;
	cmd->hdr.opcode = ADM_CMD_SET_PARAMS;

	data = (u8 *)(q6_cmd + sizeof(struct asm_pp_params_command));
	memcpy(data, payload, payload_size);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)q6_cmd);
	if (ret < 0) {
		pr_err("%s: ADM enable for port %d failed\n",
			__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_adm.wait,
		atomic_read(&this_adm.copp_stat[port_id]),
		msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: ADM open failed for port %d\n",
			__func__, port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = 0;

fail_cmd:
	kfree(q6_cmd);
	pr_info("%s: return %d\n", __func__, ret);
	return ret;
}
Пример #22
0
int afe_close(int port_id)
{
	struct afe_port_stop_command stop;
	int ret = 0;

	if (this_afe.apr == NULL) {
		pr_err("AFE is already closed\n");
		ret = -EINVAL;
		goto fail_cmd;
	}
	pr_debug("%s: port_id=%d\n", __func__, port_id);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX)) {
		pr_debug("%s: before decrementing pcm_afe_instance %d\n",
				__func__, pcm_afe_instance[port_id & 0x1]);
		port_id = VIRTUAL_ID_TO_PORTID(port_id);
		pcm_afe_instance[port_id & 0x1]--;
		if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
			proxy_afe_instance[port_id & 0x1] == 0))
			return 0;
		else
			afe_close_done[port_id & 0x1] = true;
	}

	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX)) {
		pr_debug("%s: before decrementing proxy_afe_instance %d\n",
				__func__, proxy_afe_instance[port_id & 0x1]);
		port_id = VIRTUAL_ID_TO_PORTID(port_id);
		proxy_afe_instance[port_id & 0x1]--;
		if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
			proxy_afe_instance[port_id & 0x1] == 0))
			return 0;
		else
			afe_close_done[port_id & 0x1] = true;
	}

	port_id = afe_convert_virtual_to_portid(port_id);

	stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	stop.hdr.pkt_size = sizeof(stop);
	stop.hdr.src_port = 0;
	stop.hdr.dest_port = 0;
	stop.hdr.token = 0;
	stop.hdr.opcode = AFE_PORT_CMD_STOP;
	stop.port_id = port_id;
	stop.reserved = 0;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);

	if (ret == -ENETRESET) {
		pr_info("%s: Need to reset, calling APR deregister", __func__);
		return apr_deregister(this_afe.apr);
	}

	if (ret < 0) {
		pr_err("%s: AFE close failed\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
					msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	return ret;
}
Пример #23
0
int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
{
	struct asm_pp_params_command *open = NULL;
	int ret = 0, sz = 0;
	int index;

	pr_debug("SRS - %s", __func__);

	index = afe_get_port_index(port_id);

	if (IS_ERR_VALUE(index)) {
		pr_err("%s: invald port id\n", __func__);
		return index;
	}

	switch (srs_tech_id) {
	case SRS_ID_GLOBAL: {
		struct srs_trumedia_params_GLOBAL *glb_params = NULL;
		sz = sizeof(struct asm_pp_params_command) +
			sizeof(struct srs_trumedia_params_GLOBAL);
		open = kzalloc(sz, GFP_KERNEL);
		open->payload_size = sizeof(struct srs_trumedia_params_GLOBAL) +
					sizeof(struct asm_pp_param_data_hdr);
		open->params.param_id = SRS_TRUMEDIA_PARAMS;
		open->params.param_size =
				sizeof(struct srs_trumedia_params_GLOBAL);
		glb_params = (struct srs_trumedia_params_GLOBAL *)((u8 *)open +
				sizeof(struct asm_pp_params_command));
		memcpy(glb_params, srs_params,
			sizeof(struct srs_trumedia_params_GLOBAL));
		pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x,"
				" 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
				__func__, (int)glb_params->v1,
				(int)glb_params->v2, (int)glb_params->v3,
				(int)glb_params->v4, (int)glb_params->v5,
				(int)glb_params->v6, (int)glb_params->v7,
				(int)glb_params->v8);
		break;
	}
	case SRS_ID_WOWHD: {
		struct srs_trumedia_params_WOWHD *whd_params = NULL;
		sz = sizeof(struct asm_pp_params_command) +
			sizeof(struct srs_trumedia_params_WOWHD);
		open = kzalloc(sz, GFP_KERNEL);
		open->payload_size = sizeof(struct srs_trumedia_params_WOWHD) +
					sizeof(struct asm_pp_param_data_hdr);
		open->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
		open->params.param_size =
				sizeof(struct srs_trumedia_params_WOWHD);
		whd_params = (struct srs_trumedia_params_WOWHD *)((u8 *)open +
				sizeof(struct asm_pp_params_command));
		memcpy(whd_params, srs_params,
				sizeof(struct srs_trumedia_params_WOWHD));
		pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x,"
			" 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x,"
			" 10 = %x, 11 = %x\n", __func__, (int)whd_params->v1,
			(int)whd_params->v2, (int)whd_params->v3,
			(int)whd_params->v4, (int)whd_params->v5,
			(int)whd_params->v6, (int)whd_params->v7,
			(int)whd_params->v8, (int)whd_params->v9,
			(int)whd_params->v10, (int)whd_params->v11);
		break;
	}
	case SRS_ID_CSHP: {
		struct srs_trumedia_params_CSHP *chp_params = NULL;
		sz = sizeof(struct asm_pp_params_command) +
			sizeof(struct srs_trumedia_params_CSHP);
		open = kzalloc(sz, GFP_KERNEL);
		open->payload_size = sizeof(struct srs_trumedia_params_CSHP) +
					sizeof(struct asm_pp_param_data_hdr);
		open->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
		open->params.param_size =
					sizeof(struct srs_trumedia_params_CSHP);
		chp_params = (struct srs_trumedia_params_CSHP *)((u8 *)open +
				sizeof(struct asm_pp_params_command));
		memcpy(chp_params, srs_params,
				sizeof(struct srs_trumedia_params_CSHP));
		pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x,"
				" 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x,"
				" 9 = %x\n", __func__, (int)chp_params->v1,
				(int)chp_params->v2, (int)chp_params->v3,
				(int)chp_params->v4, (int)chp_params->v5,
				(int)chp_params->v6, (int)chp_params->v7,
				(int)chp_params->v8, (int)chp_params->v9);
		break;
	}
	case SRS_ID_HPF: {
		struct srs_trumedia_params_HPF *hpf_params = NULL;
		sz = sizeof(struct asm_pp_params_command) +
			sizeof(struct srs_trumedia_params_HPF);
		open = kzalloc(sz, GFP_KERNEL);
		open->payload_size = sizeof(struct srs_trumedia_params_HPF) +
					sizeof(struct asm_pp_param_data_hdr);
		open->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
		open->params.param_size =
					sizeof(struct srs_trumedia_params_HPF);
		hpf_params = (struct srs_trumedia_params_HPF *)((u8 *)open +
				sizeof(struct asm_pp_params_command));
		memcpy(hpf_params, srs_params,
			sizeof(struct srs_trumedia_params_HPF));
		pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__,
				(int)hpf_params->v1);
		break;
	}
	case SRS_ID_PEQ: {
		struct srs_trumedia_params_PEQ *peq_params = NULL;
		sz = sizeof(struct asm_pp_params_command) +
			sizeof(struct srs_trumedia_params_PEQ);
		open = kzalloc(sz, GFP_KERNEL);
		open->payload_size = sizeof(struct srs_trumedia_params_PEQ) +
					sizeof(struct asm_pp_param_data_hdr);
		open->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
		open->params.param_size =
					sizeof(struct srs_trumedia_params_PEQ);
		peq_params = (struct srs_trumedia_params_PEQ *)((u8 *)open +
				sizeof(struct asm_pp_params_command));
		memcpy(peq_params, srs_params,
				sizeof(struct srs_trumedia_params_PEQ));
		pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x,"
			" 4 = %x\n", __func__, (int)peq_params->v1,
			(int)peq_params->v2, (int)peq_params->v3,
			(int)peq_params->v4);
		break;
	}
	case SRS_ID_HL: {
		struct srs_trumedia_params_HL *hl_params = NULL;
		sz = sizeof(struct asm_pp_params_command) +
			sizeof(struct srs_trumedia_params_HL);
		open = kzalloc(sz, GFP_KERNEL);
		open->payload_size = sizeof(struct srs_trumedia_params_HL) +
					sizeof(struct asm_pp_param_data_hdr);
		open->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
		open->params.param_size = sizeof(struct srs_trumedia_params_HL);
		hl_params = (struct srs_trumedia_params_HL *)((u8 *)open +
				sizeof(struct asm_pp_params_command));
		memcpy(hl_params, srs_params,
				sizeof(struct srs_trumedia_params_HL));
		pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x,"
				" 5 = %x, 6 = %x, 7 = %x\n", __func__,
				(int)hl_params->v1, (int)hl_params->v2,
				(int)hl_params->v3, (int)hl_params->v4,
				(int)hl_params->v5, (int)hl_params->v6,
				(int)hl_params->v7);
		break;
	}
	default:
		goto fail_cmd;
	}

	open->payload = NULL;
	open->params.module_id = SRS_TRUMEDIA_MODULE_ID;
	open->params.reserved = 0;
	open->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	open->hdr.pkt_size = sz;
	open->hdr.src_svc = APR_SVC_ADM;
	open->hdr.src_domain = APR_DOMAIN_APPS;
	open->hdr.src_port = port_id;
	open->hdr.dest_svc = APR_SVC_ADM;
	open->hdr.dest_domain = APR_DOMAIN_ADSP;
	open->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	open->hdr.token = port_id;
	open->hdr.opcode = ADM_CMD_SET_PARAMS;
	pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d,"
			" size %d, module id %x, param id %x.\n", __func__,
			open->hdr.dest_port, open->payload_size,
			open->params.module_id, open->params.param_id);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)open);
	if (ret < 0) {
		pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
			port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	
	ret = wait_event_timeout(this_adm.wait, 1,
			msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("SRS - %s: ADM open failed for port %d\n", __func__,
			port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

fail_cmd:
	kfree(open);
	return ret;
}
Пример #24
0
static int afe_send_hw_delay(u16 port_id, u32 rate)
{
	struct hw_delay_entry delay_entry;
	struct afe_port_cmd_set_param config;
	int index = 0;
	int ret = -EINVAL;

	pr_debug("%s\n", __func__);

	delay_entry.sample_rate = rate;
	if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
		ret = get_hw_delay(TX_CAL, &delay_entry);
	else if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_RX)
		ret = get_hw_delay(RX_CAL, &delay_entry);

	if (ret != 0) {
		pr_debug("%s: Failed to get hw delay info\n", __func__);
		goto done;
	}
	index = port_id;
	if (index < 0) {
		pr_debug("%s: AFE port index invalid!\n", __func__);
		goto done;
	}

	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	config.hdr.pkt_size = sizeof(config);
	config.hdr.src_port = 0;
	config.hdr.dest_port = 0;
	config.hdr.token = index;
	config.hdr.opcode = AFE_PORT_CMD_SET_PARAM;

	config.port_id = port_id;
	config.payload_size = sizeof(struct afe_param_payload_base)+
				sizeof(struct afe_param_id_device_hw_delay_cfg);
	config.payload_address = 0;

	config.payload.base.module_id = AFE_MODULE_ID_PORT_INFO ;
	config.payload.base.param_id = AFE_PARAM_ID_DEVICE_HW_DELAY;
	config.payload.base.param_size = sizeof(struct afe_param_id_device_hw_delay_cfg);
	config.payload.base.reserved = 0;

	config.payload.param.hw_delay.delay_in_us = delay_entry.delay_usec;
	config.payload.param.hw_delay.device_hw_delay_minor_version =
				AFE_API_VERSION_DEVICE_HW_DELAY;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
			port_id);
		ret = -EINVAL;
		goto done;
	}

	ret = wait_event_timeout(this_afe.wait,
				(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));

	if (!ret) {
		pr_err("%s: wait_event timeout IF CONFIG\n", __func__);
		ret = -EINVAL;
		goto done;
	}
	if (atomic_read(&this_afe.status) != 0) {
		pr_err("%s: config cmd failed\n", __func__);
		ret = -EINVAL;
		goto done;
	}

done:
	pr_debug("%s port_id %u rate %u delay_usec %d status %d\n",
		__func__, port_id, rate, delay_entry.delay_usec, ret);
	return ret;
}
Пример #25
0
int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
{
	struct adm_copp_open_command	open;
	int ret = 0;
	int index;

	pr_info("%s: port %d path:%d rate:%d mode:%d\n", __func__,
				port_id, path, rate, channel_mode);

	port_id = afe_convert_virtual_to_portid(port_id);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}


	
	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {

		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = port_id;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = port_id;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_COPP_OPEN;

		open.mode = path;
		open.endpoint_id1 = port_id;

		if (this_adm.ec_ref_rx == 0) {
			open.endpoint_id2 = 0xFFFF;
		} else if (this_adm.ec_ref_rx && (path != 1)) {
				open.endpoint_id2 = this_adm.ec_ref_rx;
				this_adm.ec_ref_rx = 0;
		}

		pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
			__func__, open.endpoint_id1, open.endpoint_id2);
		
		if (path == ADM_PATH_PLAYBACK)
			open.topology_id = get_adm_rx_topology();
		else {
			open.topology_id = get_adm_tx_topology();
			if ((open.topology_id ==
				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			    (open.topology_id ==
				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;
		}

        if ((open.topology_id  == 0) || (port_id == VOICE_RECORD_RX) || (port_id == VOICE_RECORD_TX))
          open.topology_id = topology;

		open.channel_config = channel_mode & 0x00FF;
		open.rate  = rate;

		pr_debug("%s: channel_config=%d port_id=%d rate=%d"
			"topology_id=0x%X\n", __func__, open.channel_config,\
			open.endpoint_id1, open.rate,\
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d failed\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		
		ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d\n", __func__,
								port_id);
			ret = -EINVAL;
			HTC_Q6_BUG();
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}
Пример #26
0
/* This function should be used by 8660 exclusively */
int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
{
	struct afe_port_start_command start;
	struct afe_audioif_config_command config;
	int ret = 0;

	if (!afe_config) {
		pr_err("%s: Error, no configuration data\n", __func__);
		ret = -EINVAL;
		return ret;
	}

	pr_debug("%s: %d %d\n", __func__, port_id, rate);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX))
		return 0;
	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX))
		port_id = VIRTUAL_ID_TO_PORTID(port_id);

	ret = afe_q6_interface_prepare();
	if (ret != 0)
		return ret;

	config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
	config.hdr.src_port = 0;
	config.hdr.dest_port = 0;
	config.hdr.token = 0;
	switch (port_id) {
	case SLIMBUS_0_RX:
	case SLIMBUS_0_TX:
	case SLIMBUS_1_RX:
	case SLIMBUS_1_TX:
	case SLIMBUS_2_RX:
	case SLIMBUS_2_TX:
	case SLIMBUS_3_RX:
	case SLIMBUS_3_TX:
	case SLIMBUS_4_RX:
	case SLIMBUS_4_TX:
		config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG;
	break;
	case MI2S_TX:
	case MI2S_RX:
	case SECONDARY_I2S_RX:
	case SECONDARY_I2S_TX:
	case PRIMARY_I2S_RX:
	case PRIMARY_I2S_TX:
		/* AFE_PORT_CMD_I2S_CONFIG command is not supported
		 * in the LPASS EL 1.0. So we have to distiguish
		 * which AFE command, AFE_PORT_CMD_I2S_CONFIG or
		 * AFE_PORT_AUDIO_IF_CONFIG	to use. If the format
		 * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used
		 * to make the backward compatible.
		 */
		pr_debug("%s: afe_config->mi2s.format = %d\n", __func__,
				 afe_config->mi2s.format);
		if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM)
			config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
		else
			config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG;
	break;
	default:
		config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
	break;
	}

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	config.port_id = port_id;
	config.port = *afe_config;

	atomic_set(&this_afe.state, 1);
	atomic_set(&this_afe.status, 0);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	if (atomic_read(&this_afe.status) != 0) {
		pr_err("%s: config cmd failed\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	start.hdr.pkt_size = sizeof(start);
	start.hdr.src_port = 0;
	start.hdr.dest_port = 0;
	start.hdr.token = 0;
	start.hdr.opcode = AFE_PORT_CMD_START;
	start.port_id = port_id;
	start.gain = 0x2000;
	start.sample_rate = rate;

	atomic_set(&this_afe.state, 1);
	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_afe.wait,
			(atomic_read(&this_afe.state) == 0),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: wait_event timeout\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}

	if (this_afe.task != current)
		this_afe.task = current;

	pr_debug("task_name = %s pid = %d\n",
			this_afe.task->comm, this_afe.task->pid);
	return 0;
fail_cmd:
	return ret;
}
Пример #27
0
int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
{
	struct adm_copp_open_command	open;
	int ret = 0;
	int index;

	pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
				port_id, path, rate, channel_mode);
	pr_err("Topology = %x\n", topology);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = afe_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
#ifdef CONFIG_MSM8X60_RTAC
		rtac_set_adm_handle(this_adm.apr);
#endif
	}


	/* Create a COPP if port id are not enabled */
	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {

		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = port_id;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = port_id;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_COPP_OPEN;

		open.mode = path;
		open.endpoint_id1 = port_id;
		open.endpoint_id2 = 0xFFFF;
		open.topology_id  = topology;

		open.channel_config = channel_mode & 0x00FF;
		open.rate  = rate;

		pr_debug("%s: channel_config=%d port_id=%d rate=%d\
			topology_id=0x%X\n", __func__, open.channel_config,\
			open.endpoint_id1, open.rate,\
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d failed\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		/* Wait for the callback with copp id */
		ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d\n", __func__,
								port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}
Пример #28
0
int afe_port_start_nowait(u16 port_id, union afe_port_config *afe_config,
	u32 rate) /* This function is no blocking */
{
	struct afe_port_start_command start;
	struct afe_audioif_config_command config;
	int ret;

	if (!afe_config) {
		pr_err("%s: Error, no configuration data\n", __func__);
		ret = -EINVAL;
		return ret;
	}
	pr_debug("%s: %d %d\n", __func__, port_id, rate);

	if ((port_id == RT_PROXY_DAI_001_RX) ||
		(port_id == RT_PROXY_DAI_002_TX))
		return -EINVAL;
	if ((port_id == RT_PROXY_DAI_002_RX) ||
		(port_id == RT_PROXY_DAI_001_TX))
		port_id = VIRTUAL_ID_TO_PORTID(port_id);

	if (this_afe.apr == NULL) {
		pr_err("%s: AFE APR is not registered\n", __func__);
		ret = -ENODEV;
		return ret;
	}

	if (port_id == HDMI_RX) {
		config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
		config.hdr.src_port = 0;
		config.hdr.dest_port = 0;
		config.hdr.token = 0;
		config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG;
	} else {

		config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
		config.hdr.src_port = 0;
		config.hdr.dest_port = 0;
		config.hdr.token = 0;
		config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
	}

	if (afe_validate_port(port_id) < 0) {

		pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}
	config.port_id = port_id;
	config.port = *afe_config;

	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
	if (ret < 0) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	/* send AFE cal */
	afe_send_cal(port_id);

	start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	start.hdr.pkt_size = sizeof(start);
	start.hdr.src_port = 0;
	start.hdr.dest_port = 0;
	start.hdr.token = 0;
	start.hdr.opcode = AFE_PORT_CMD_START;
	start.port_id = port_id;
	start.gain = 0x2000;
	start.sample_rate = rate;

	ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);

	if (IS_ERR_VALUE(ret)) {
		pr_err("%s: AFE enable for port %d failed\n", __func__,
				port_id);
		ret = -EINVAL;
		goto fail_cmd;
	}

	if (this_afe.task != current)
		this_afe.task = current;

	pr_debug("task_name = %s pid = %d\n",
	this_afe.task->comm, this_afe.task->pid);
	return 0;

fail_cmd:
	return ret;
}
Пример #29
0
int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
				uint32_t *bufsz, uint32_t bufcnt)
{
	struct  adm_cmd_memory_map_regions *mmap_regions = NULL;
	struct  adm_memory_map_regions *mregions = NULL;
	void    *mmap_region_cmd = NULL;
	void    *payload = NULL;
	int     ret = 0;
	int     i = 0;
	int     cmd_size = 0;

	pr_info("%s\n", __func__);
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
#ifdef CONFIG_MSM8X60_RTAC
		rtac_set_adm_handle(this_adm.apr);
#endif
	}

	cmd_size = sizeof(struct adm_cmd_memory_map_regions)
			+ sizeof(struct adm_memory_map_regions) * bufcnt;

	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
	if (!mmap_region_cmd) {
		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
		return -ENOMEM;
	}
	mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
						APR_HDR_LEN(APR_HDR_SIZE),
								APR_PKT_VER);
	mmap_regions->hdr.pkt_size = cmd_size;
	mmap_regions->hdr.src_port = 0;
	mmap_regions->hdr.dest_port = 0;
	mmap_regions->hdr.token = 0;
	mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
	mmap_regions->mempool_id = mempool_id & 0x00ff;
	mmap_regions->nregions = bufcnt & 0x00ff;
	pr_debug("%s: map_regions->nregions = %d\n", __func__,
				mmap_regions->nregions);
	payload = ((u8 *) mmap_region_cmd +
				sizeof(struct adm_cmd_memory_map_regions));
	mregions = (struct adm_memory_map_regions *)payload;

	for (i = 0; i < bufcnt; i++) {
		mregions->phys = buf_add[i];
		mregions->buf_size = bufsz[i];
		++mregions;
	}

	atomic_set(&this_adm.copp_stat[0], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
	if (ret < 0) {
		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
					mmap_regions->hdr.opcode, ret);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
	if (!ret) {
		pr_err("%s: timeout. waited for memory_map\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	kfree(mmap_region_cmd);
	return ret;
}
Пример #30
0
/* This function instantiates a mixer in QDSP6 audio path for
 * given audio hardware port. Topology should be made part
 * of audio calibration
 */
int adm_open_mixer(int port_id, int path, int rate,
	int channel_mode, int topology) {
	struct adm_copp_open_command open;
	int ret = 0;
	u32 i;
	int index;

	pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
				port_id, path, rate, channel_mode);

	if (afe_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = afe_get_port_index(port_id);
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
	}

	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {

		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = port_id;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = port_id;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_COPP_OPEN;

		open.mode = path;
		open.endpoint_id1 = port_id;
		open.endpoint_id2 = 0xFFFF;

		/* convert path to acdb path */
		if (path == PLAYBACK)
			open.topology_id = get_adm_rx_topology();
		else {
			open.topology_id = get_adm_tx_topology();
			if ((open.topology_id ==
				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			    (open.topology_id ==
				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;
		}

		if (open.topology_id  == 0)
			open.topology_id = topology;

		open.channel_config = channel_mode & 0x00FF;
		open.rate  = rate;

		pr_debug("%s: channel_config=%d port_id=%d rate=%d\
			topology_id=0x%X\n", __func__, open.channel_config,\
			open.endpoint_id1, open.rate,\
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d failed\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		/* Wait for the callback with copp id */
		ret = wait_event_timeout(this_adm.wait,
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d\n", __func__,
								port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);

	/* Set up routing for cached session */
	for (i = find_first_bit(&this_adm.sessions[index], ASM_MAX_SESSION);
	     i < ASM_MAX_SESSION; i = find_next_bit(&this_adm.sessions[index],
	     ASM_MAX_SESSION, i + 1))
		adm_cmd_map(port_id, i); /* Not thread safe */

fail_cmd:
	return ret;
}