int adm_open(int port_id, int path, int rate, int channel_mode, int topology,
			bool perf_mode, uint16_t bits_per_sample)
{
	struct adm_cmd_device_open_v5	open;
	int ret = 0;
	int index;
	int tmp_port = q6audio_get_port_id(port_id);

	pr_info("%s: port %#x path:%d rate:%d mode:%d perf_mode:%d\n",
		 __func__, port_id, path, rate, channel_mode, perf_mode);

	port_id = q6audio_convert_virtual_to_portid(port_id);

	if (q6audio_validate_port(port_id) < 0) {
		pr_err("%s port idi[%#x] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = q6audio_get_port_index(port_id);
	pr_debug("%s: Port ID %#x, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	if (!perf_mode) {
		atomic_set(&this_adm.copp_perf_mode[index], 0);
		send_adm_custom_topology(port_id);
	} else {
		atomic_set(&this_adm.copp_perf_mode[index], 1);
	}

	/* Create a COPP if port id are not enabled */
	if ((!perf_mode && (atomic_read(&this_adm.copp_cnt[index]) == 0)) ||
		(perf_mode &&
		(atomic_read(&this_adm.copp_low_latency_cnt[index]) == 0))) {
		pr_debug("%s:opening ADM: perf_mode: %d\n", __func__,
			perf_mode);
		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = tmp_port;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = tmp_port;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;
		open.flags = 0x00;
		if (perf_mode) {
			open.flags |= ADM_LOW_LATENCY_DEVICE_SESSION <<
				ADM_BIT_SHIFT_DEVICE_PERF_MODE_FLAG;
		} else {
			open.flags |= ADM_LEGACY_DEVICE_SESSION <<
				ADM_BIT_SHIFT_DEVICE_PERF_MODE_FLAG;
		}

		open.mode_of_operation = path;
		open.endpoint_id_1 = tmp_port;

		if (this_adm.ec_ref_rx == -1) {
			open.endpoint_id_2 = 0xFFFF;
		} else if (this_adm.ec_ref_rx && (path != 1)) {
			open.endpoint_id_2 = this_adm.ec_ref_rx;
			this_adm.ec_ref_rx = -1;
		}

		open.topology_id = topology;
		if ((open.topology_id == VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			(open.topology_id == VPM_TX_DM_FLUENCE_COPP_TOPOLOGY)||
			  (open.topology_id == VPM_TX_SM_LVVE_COPP_TOPOLOGY) ||
			  /* LVVE for Barge-in */
			  (open.topology_id == 0x1000BFF0) ||
			  (open.topology_id == 0x1000BFF1))
				rate = 16000;

		if (perf_mode)
			open.topology_id = NULL_COPP_TOPOLOGY;

		open.dev_num_channel = channel_mode & 0x00FF;
		open.bit_width = bits_per_sample;
		open.sample_rate  = rate;
		memset(open.dev_channel_mapping, 0, 8);

		if (channel_mode == 1)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
		} else if (channel_mode == 2) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
		} else if (channel_mode == 3)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[0] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FC;
		} else if (channel_mode == 4) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
		} else if (channel_mode == 5) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
		} else if (channel_mode == 6) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[4] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_LFE;
		} else if (channel_mode == 8) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
			open.dev_channel_mapping[6] = PCM_CHANNEL_RLC;
			open.dev_channel_mapping[7] = PCM_CHANNEL_RRC;
		} else {
			pr_err("%s invalid num_chan %d\n", __func__,
					channel_mode);
			return -EINVAL;
		}
		if ((open.dev_num_channel > 2) &&
			multi_ch_map.set_channel_map)
			memcpy(open.dev_channel_mapping,
				multi_ch_map.channel_mapping,
				PCM_FORMAT_MAX_NUM_CHANNEL);

		pr_debug("%s: port_id=%#x rate=%d topology_id=0x%X\n",
			__func__, open.endpoint_id_1, open.sample_rate,
			open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %#x for[%d] failed\n",
						__func__, tmp_port, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		/* Wait for the callback with copp id */
		ret = wait_event_timeout(this_adm.wait[index],
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %#x for [%d]\n",
						__func__, tmp_port, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	if (perf_mode) {
		atomic_inc(&this_adm.copp_low_latency_cnt[index]);
		pr_debug("%s: index: %d coppid: %d", __func__, index,
			atomic_read(&this_adm.copp_low_latency_id[index]));
	} else {
		atomic_inc(&this_adm.copp_cnt[index]);
		pr_debug("%s: index: %d coppid: %d", __func__, index,
			atomic_read(&this_adm.copp_id[index]));
	}
	return 0;

fail_cmd:

	return ret;
}
int adm_close(int port_id, bool perf_mode)
{
	struct apr_hdr close;

	int ret = 0;
	int index = 0;

	port_id = q6audio_convert_virtual_to_portid(port_id);

	index = q6audio_get_port_index(port_id);
	if (q6audio_validate_port(port_id) < 0)
		return -EINVAL;

	pr_debug("%s port_id=%#x index %d perf_mode: %d\n", __func__, port_id,
		index, perf_mode);

	if (perf_mode) {
		if (!(atomic_read(&this_adm.copp_low_latency_cnt[index]))) {
			pr_err("%s: copp count for port[%#x]is 0\n", __func__,
				port_id);
			goto fail_cmd;
		}
		atomic_dec(&this_adm.copp_low_latency_cnt[index]);
	} else {
		if (!(atomic_read(&this_adm.copp_cnt[index]))) {
			pr_err("%s: copp count for port[%#x]is 0\n", __func__,
				port_id);
			goto fail_cmd;
		}
		atomic_dec(&this_adm.copp_cnt[index]);
	}
	if ((!perf_mode && !(atomic_read(&this_adm.copp_cnt[index]))) ||
		(perf_mode &&
		!(atomic_read(&this_adm.copp_low_latency_cnt[index])))) {

		pr_debug("%s:Closing ADM: perf_mode: %d\n", __func__,
				perf_mode);
		close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
			APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		close.pkt_size = sizeof(close);
		close.src_svc = APR_SVC_ADM;
		close.src_domain = APR_DOMAIN_APPS;
		close.src_port = port_id;
		close.dest_svc = APR_SVC_ADM;
		close.dest_domain = APR_DOMAIN_ADSP;
		if (perf_mode)
			close.dest_port =
			     atomic_read(&this_adm.copp_low_latency_id[index]);
		else
			close.dest_port = atomic_read(&this_adm.copp_id[index]);
		close.token = port_id;
		close.opcode = ADM_CMD_DEVICE_CLOSE_V5;

		atomic_set(&this_adm.copp_stat[index], 0);

		if (perf_mode) {
			pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
			    __func__,
			    atomic_read(&this_adm.copp_low_latency_id[index]),
			    port_id, index,
			    atomic_read(&this_adm.copp_low_latency_cnt[index]));
			atomic_set(&this_adm.copp_low_latency_id[index],
				RESET_COPP_ID);
		} else {
			pr_debug("%s:coppid %d portid=%#x index=%d coppcnt=%d\n",
				__func__,
				atomic_read(&this_adm.copp_id[index]),
				port_id, index,
				atomic_read(&this_adm.copp_cnt[index]));
			atomic_set(&this_adm.copp_id[index],
				RESET_COPP_ID);
		}

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
		if (ret < 0) {
			pr_err("%s ADM close failed\n", __func__);
			ret = -EINVAL;
			goto fail_cmd;
		}

		ret = wait_event_timeout(this_adm.wait[index],
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s: ADM cmd Route failed for port %#x\n",
						__func__, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}

	if (!perf_mode) {
		pr_debug("%s: remove adm device from rtac\n", __func__);
		rtac_remove_adm_device(port_id);
	}

fail_cmd:
	return ret;
}
int adm_matrix_map(int session_id, int path, int num_copps,
			unsigned int *port_id, int copp_id, bool perf_mode)
{
	struct adm_cmd_matrix_map_routings_v5	*route;
	struct adm_session_map_node_v5 *node;
	uint16_t *copps_list;
	int cmd_size = 0;
	int ret = 0, i = 0;
	void *payload = NULL;
	void *matrix_map = NULL;

	/* Assumes port_ids have already been validated during adm_open */
	int index = q6audio_get_port_index(copp_id);
	if (index < 0 || index >= AFE_MAX_PORTS) {
		pr_err("%s: invalid port idx %d token %d\n",
					__func__, index, copp_id);
		return 0;
	}
	cmd_size = (sizeof(struct adm_cmd_matrix_map_routings_v5) +
			sizeof(struct adm_session_map_node_v5) +
			(sizeof(uint32_t) * num_copps));
	matrix_map = kzalloc(cmd_size, GFP_KERNEL);
	if (matrix_map == NULL) {
		pr_err("%s: Mem alloc failed\n", __func__);
		ret = -EINVAL;
		return ret;
	}
	route = (struct adm_cmd_matrix_map_routings_v5 *)matrix_map;

	pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%#x coppid[%d]\n",
		 __func__, session_id, path, num_copps, port_id[0], copp_id);

	route->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
	route->hdr.pkt_size = cmd_size;
	route->hdr.src_svc = 0;
	route->hdr.src_domain = APR_DOMAIN_APPS;
	route->hdr.src_port = copp_id;
	route->hdr.dest_svc = APR_SVC_ADM;
	route->hdr.dest_domain = APR_DOMAIN_ADSP;
	if (perf_mode) {
		route->hdr.dest_port =
			atomic_read(&this_adm.copp_low_latency_id[index]);
	} else {
		route->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	}
	route->hdr.token = copp_id;
	route->hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS_V5;
	route->num_sessions = 1;

	switch (path) {
	case 0x1:
		route->matrix_id = ADM_MATRIX_ID_AUDIO_RX;
		break;
	case 0x2:
	case 0x3:
		route->matrix_id = ADM_MATRIX_ID_AUDIO_TX;
		break;
	default:
		pr_err("%s: Wrong path set[%d]\n", __func__, path);
		break;
	}
	payload = ((u8 *)matrix_map +
			sizeof(struct adm_cmd_matrix_map_routings_v5));
	node = (struct adm_session_map_node_v5 *)payload;

	node->session_id = session_id;
	node->num_copps = num_copps;
	payload = (u8 *)node + sizeof(struct adm_session_map_node_v5);
	copps_list = (uint16_t *)payload;
	for (i = 0; i < num_copps; i++) {
		int tmp;
		port_id[i] = q6audio_convert_virtual_to_portid(port_id[i]);

		tmp = q6audio_get_port_index(port_id[i]);


		if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
			if (perf_mode)
				copps_list[i] =
				atomic_read(&this_adm.copp_low_latency_id[tmp]);
			else
				copps_list[i] =
					atomic_read(&this_adm.copp_id[tmp]);
		}
		else
			continue;
		pr_debug("%s: port_id[%#x]: %d, index: %d act coppid[0x%x]\n",
			__func__, i, port_id[i], tmp,
			atomic_read(&this_adm.copp_id[tmp]));
	}
	atomic_set(&this_adm.copp_stat[index], 0);

	ret = apr_send_pkt(this_adm.apr, (uint32_t *)matrix_map);
	if (ret < 0) {
		pr_err("%s: ADM routing for port %#x failed\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}
	ret = wait_event_timeout(this_adm.wait[index],
				atomic_read(&this_adm.copp_stat[index]),
				msecs_to_jiffies(TIMEOUT_MS));
	if (!ret) {
		pr_err("%s: ADM cmd Route failed for port %#x\n",
					__func__, port_id[0]);
		ret = -EINVAL;
		goto fail_cmd;
	}
	if (!perf_mode) {
		for (i = 0; i < num_copps; i++)
			send_adm_cal(port_id[i], path);

		for (i = 0; i < num_copps; i++) {
			int tmp;
			tmp = afe_get_port_index(port_id[i]);
			if (tmp >= 0 && tmp < AFE_MAX_PORTS) {
				rtac_add_adm_device(port_id[i],
					atomic_read(&this_adm.copp_id[tmp]),
					path, session_id);
				pr_debug("%s, copp_id: %d\n", __func__,
					atomic_read(&this_adm.copp_id[tmp]));
			} else {
				pr_debug("%s: Invalid port index %d",
					__func__, tmp);
			}
		}
	}
fail_cmd:
	kfree(matrix_map);
	return ret;
}
int adm_memory_map_regions(int port_id,
		uint32_t *buf_add, uint32_t mempool_id,
		uint32_t *bufsz, uint32_t bufcnt)
{
	struct  avs_cmd_shared_mem_map_regions *mmap_regions = NULL;
	struct  avs_shared_map_region_payload *mregions = NULL;
	void    *mmap_region_cmd = NULL;
	void    *payload = NULL;
	int     ret = 0;
	int     i = 0;
	int     cmd_size = 0;
	int     index = 0;

	pr_debug("%s\n", __func__);
	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}

	port_id = q6audio_convert_virtual_to_portid(port_id);

	if (q6audio_validate_port(port_id) < 0) {
		pr_err("%s port id[%#x] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = q6audio_get_port_index(port_id);

	cmd_size = sizeof(struct avs_cmd_shared_mem_map_regions)
			+ sizeof(struct avs_shared_map_region_payload)
			* bufcnt;

	mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
	if (!mmap_region_cmd) {
		pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
		return -ENOMEM;
	}
	mmap_regions = (struct avs_cmd_shared_mem_map_regions *)mmap_region_cmd;
	mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
						APR_HDR_LEN(APR_HDR_SIZE),
								APR_PKT_VER);
	mmap_regions->hdr.pkt_size = cmd_size;
	mmap_regions->hdr.src_port = 0;
	mmap_regions->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
	mmap_regions->hdr.token = port_id;
	mmap_regions->hdr.opcode = ADM_CMD_SHARED_MEM_MAP_REGIONS;
	mmap_regions->mem_pool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL & 0x00ff;
	mmap_regions->num_regions = bufcnt & 0x00ff;
	mmap_regions->property_flag = 0x00;

	pr_debug("%s: map_regions->num_regions = %d\n", __func__,
				mmap_regions->num_regions);
	payload = ((u8 *) mmap_region_cmd +
				sizeof(struct avs_cmd_shared_mem_map_regions));
	mregions = (struct avs_shared_map_region_payload *)payload;

	for (i = 0; i < bufcnt; i++) {
		mregions->shm_addr_lsw = buf_add[i];
		mregions->shm_addr_msw = 0x00;
		mregions->mem_size_bytes = bufsz[i];
		++mregions;
	}

	atomic_set(&this_adm.copp_stat[index], 0);
	ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
	if (ret < 0) {
		pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
					mmap_regions->hdr.opcode, ret);
		ret = -EINVAL;
		goto fail_cmd;
	}

	ret = wait_event_timeout(this_adm.wait[index],
			atomic_read(&this_adm.copp_stat[index]), 5 * HZ);
	if (!ret) {
		pr_err("%s: timeout. waited for memory_map\n", __func__);
		ret = -EINVAL;
		goto fail_cmd;
	}
fail_cmd:
	kfree(mmap_region_cmd);
	return ret;
}
int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
{
	struct adm_cmd_device_open_v5	open;
	int ret = 0;
	int index;
	int tmp_port = q6audio_get_port_id(port_id);

	pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
				port_id, path, rate, channel_mode);

	port_id = q6audio_convert_virtual_to_portid(port_id);

	if (q6audio_validate_port(port_id) < 0) {
		pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
		return -ENODEV;
	}

	index = q6audio_get_port_index(port_id);
	pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);

	if (this_adm.apr == NULL) {
		this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
						0xFFFFFFFF, &this_adm);
		if (this_adm.apr == NULL) {
			pr_err("%s: Unable to register ADM\n", __func__);
			ret = -ENODEV;
			return ret;
		}
		rtac_set_adm_handle(this_adm.apr);
	}


	/* Create a COPP if port id are not enabled */
	if (atomic_read(&this_adm.copp_cnt[index]) == 0) {

		open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
				APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
		open.hdr.pkt_size = sizeof(open);
		open.hdr.src_svc = APR_SVC_ADM;
		open.hdr.src_domain = APR_DOMAIN_APPS;
		open.hdr.src_port = tmp_port;
		open.hdr.dest_svc = APR_SVC_ADM;
		open.hdr.dest_domain = APR_DOMAIN_ADSP;
		open.hdr.dest_port = tmp_port;
		open.hdr.token = port_id;
		open.hdr.opcode = ADM_CMD_DEVICE_OPEN_V5;

		open.mode_of_operation = path;
		/* Reserved for future use, need to set this to 0 */
		open.flags = 0x00;
		open.endpoint_id_1 = tmp_port;
		open.endpoint_id_2 = 0xFFFF;

		/* convert path to acdb path */
		if (path == ADM_PATH_PLAYBACK)
			open.topology_id = get_adm_rx_topology();
		else {
			open.topology_id = get_adm_tx_topology();
			if ((open.topology_id ==
				VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
			    (open.topology_id ==
				VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
				rate = 16000;
		}

		if (open.topology_id  == 0)
			open.topology_id = topology;

		open.dev_num_channel = channel_mode & 0x00FF;
		open.bit_width = 16;
		open.sample_rate  = rate;
		memset(open.dev_channel_mapping, 0, 8);

		if (channel_mode == 1)	{
			open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
		} else if (channel_mode == 2) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
		} else if (channel_mode == 6) {
			open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
			open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
			open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
			open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
			open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
			open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
		} else {
			pr_err("%s invalid num_chan %d\n", __func__,
					channel_mode);
			return -EINVAL;
		}

		pr_debug("%s: port_id=%d rate=%d"
			"topology_id=0x%X\n", __func__,	open.endpoint_id_1, \
				  open.sample_rate, open.topology_id);

		atomic_set(&this_adm.copp_stat[index], 0);

		ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
		if (ret < 0) {
			pr_err("%s:ADM enable for port %d for[%d] failed\n",
						__func__, tmp_port, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
		/* Wait for the callback with copp id */
		ret = wait_event_timeout(this_adm.wait[index],
			atomic_read(&this_adm.copp_stat[index]),
			msecs_to_jiffies(TIMEOUT_MS));
		if (!ret) {
			pr_err("%s ADM open failed for port %d"
			"for [%d]\n", __func__, tmp_port, port_id);
			ret = -EINVAL;
			goto fail_cmd;
		}
	}
	atomic_inc(&this_adm.copp_cnt[index]);
	return 0;

fail_cmd:

	return ret;
}