/**
 * ipa_mhi_wait_for_bam_empty_timeout() - wait for pending packets in uplink
 * @msecs: timeout to wait
 *
 * This function will poll until there are no packets pending in uplink channels
 * or timeout occured.
 *
 * Return code: true - no pending packets in uplink channels
 *		false - timeout occurred
 */
static bool ipa_mhi_wait_for_bam_empty_timeout(unsigned int msecs)
{
	unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
	unsigned long jiffies_start = jiffies;
	bool empty = false;
	bool pending;
	int i;
	u32 pipe_idx;

	IPA_MHI_FUNC_ENTRY();
	while (!empty) {
		empty = true;
		for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
			if (!ipa_mhi_ctx->ul_channels[i].valid)
				continue;
			pipe_idx = ipa_get_ep_mapping(
				ipa_mhi_ctx->ul_channels[i].client);
			if (sps_pipe_pending_desc(ipa_ctx->bam_handle,
						pipe_idx, &pending)) {
				IPA_MHI_ERR("sps_pipe_pending_desc failed\n");
				WARN_ON(1);
				return false;
			}
			empty &= !pending;
		}

		if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
			IPA_MHI_DBG("timeout waiting for BAM empty\n");
			break;
		}
	}
	IPA_MHI_DBG("Bam is %s\n", (empty) ? "empty" : "not empty");
	IPA_MHI_FUNC_EXIT();
	return empty;
}
Пример #2
0
static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
		struct ipa_rt_entry *entry, u8 *buf)
{
	struct ipa_rt_rule_hw_hdr *rule_hdr;
	const struct ipa_rt_rule *rule =
		(const struct ipa_rt_rule *)&entry->rule;
	u16 en_rule = 0;
	u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
	u8 *start;
	int pipe_idx;

	if (buf == NULL) {
		memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
		buf = (u8 *)tmp;
	}

	start = buf;
	rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
	pipe_idx = ipa_get_ep_mapping(ipa_ctx->mode,
			entry->rule.dst);
	if (pipe_idx == -1) {
		IPAERR("Wrong destination pipe specified in RT rule\n");
		WARN_ON(1);
		return -EPERM;
	}
	rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
	rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
	if (entry->hdr) {
		rule_hdr->u.hdr.hdr_offset =
			entry->hdr->offset_entry->offset >> 2;
	} else {
static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
{
	struct ipa_enable_force_clear_datapath_req_msg_v01 req;
	int i;
	int res;

	IPA_MHI_FUNC_ENTRY();
	memset(&req, 0, sizeof(req));
	req.request_id = request_id;
	req.source_pipe_bitmask = 0;
	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
		if (!ipa_mhi_ctx->ul_channels[i].valid)
			continue;
		req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
					ipa_mhi_ctx->ul_channels[i].client);
	}
	if (throttle_source) {
		req.throttle_source_valid = 1;
		req.throttle_source = 1;
	}
	IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
		req.request_id, req.source_pipe_bitmask,
		req.throttle_source);
	res = qmi_enable_force_clear_datapath_send(&req);
	if (res) {
		IPA_MHI_ERR("qmi_enable_force_clear_datapath_send failed %d\n",
			res);
		return res;
	}

	IPA_MHI_FUNC_EXIT();
	return 0;
}
Пример #4
0
/* sending filter-installed-notify-request to modem*/
int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
{
	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
	struct msg_desc req_desc, resp_desc;
	int rc = 0, i = 0;

	/* check if the filter rules from IPACM is valid */
	if (req->filter_index_list_len == 0) {
		IPAWANERR(" delete UL filter rule for pipe %d\n",
		req->source_pipe_index);
		return -EINVAL;
	} else if (req->filter_index_list[0].filter_index == 0 &&
		req->source_pipe_index !=
		ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD)) {
		IPAWANERR(" get index wrong for pipe %d\n",
			req->source_pipe_index);
		for (i = 0; i < req->filter_index_list_len; i++)
			IPAWANERR(" %d-st handle %d index %d\n",
				i,
				req->filter_index_list[i].filter_handle,
				req->filter_index_list[i].filter_index);
		return -EINVAL;
	}

	mutex_lock(&ipa_qmi_lock);
	if (ipa_qmi_ctx != NULL) {
		/* cache the qmi_filter_request */
		memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
			ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
			req,
			sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
		ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
		ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
	}
	mutex_unlock(&ipa_qmi_lock);
	req_desc.max_msg_len =
	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
	req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
	req_desc.ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei;

	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
	resp_desc.max_msg_len =
		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
	resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;

	rc = qmi_send_req_wait(ipa_q6_clnt,
			&req_desc,
			req,
			sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
			&resp_desc, &resp, sizeof(resp),
			QMI_SEND_REQ_TIMEOUT_MS);
	return ipa_check_qmi_response(rc,
		QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
		resp.resp.error, "ipa_fltr_installed_notif_resp");
}
/**
 * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel context
 * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
 *
 * This function will return the corresponding channel context or NULL in case
 * that channel does not exist.
 */
static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
	u32 clnt_hdl)
{
	int ch_idx;

	for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
		if (ipa_mhi_ctx->ul_channels[ch_idx].valid &&
		    ipa_get_ep_mapping(
		    ipa_mhi_ctx->ul_channels[ch_idx].client) == clnt_hdl)
			return &ipa_mhi_ctx->ul_channels[ch_idx];
	}

	for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
		if (ipa_mhi_ctx->dl_channels[ch_idx].valid &&
		    ipa_get_ep_mapping(
		    ipa_mhi_ctx->dl_channels[ch_idx].client) == clnt_hdl)
			return &ipa_mhi_ctx->dl_channels[ch_idx];
	}

	return NULL;
}
static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
{
	int res;

	IPA_MHI_FUNC_ENTRY();
	res = ipa_disable_data_path(ipa_get_ep_mapping(channel->client));
	if (res) {
		IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res);
		return res;
	}

	res = ipa_uc_mhi_reset_channel(channel->hdl);
	if (res) {
		IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", res);
		goto fail_reset_channel;
	}
	IPA_MHI_FUNC_EXIT();

	return 0;

fail_reset_channel:
	ipa_enable_data_path(ipa_get_ep_mapping(channel->client));
	return res;
}
static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
{
	int res;
	int i;
	int ep_idx;
	struct ipa_ep_cfg_holb ep_holb;
	struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
	bool empty;

	IPA_MHI_FUNC_ENTRY();
	res = ipa_uc_mhi_reset_channel(channel->hdl);
	if (res) {
		IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n", res);
		return res;
	}
	empty = ipa_mhi_wait_for_bam_empty_timeout(
		IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC);
	if (!empty) {
		IPA_MHI_DBG("BAM not empty\n");
		res = ipa_mhi_enable_force_clear(ipa_mhi_ctx->qmi_req_id,
			true);
		if (res) {
			IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
				res);
			BUG();
			return res;
		}

		/* enable packet drop on all DL channels */
		for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
			if (!ipa_mhi_ctx->dl_channels[i].valid)
				continue;
			if (ipa_mhi_ctx->dl_channels[i].state ==
			    IPA_HW_MHI_CHANNEL_STATE_INVALID)
				continue;
			ep_idx = ipa_get_ep_mapping(
				ipa_mhi_ctx->dl_channels[i].client);
			if (-1 == ep_idx) {
				IPA_MHI_ERR("Client %u is not mapped\n",
					ipa_mhi_ctx->dl_channels[i].client);
				BUG();
				return -EFAULT;
			}
			memset(&ep_holb, 0, sizeof(ep_holb));
			ep_holb.en = 1;
			ep_holb.tmr_val = 0;
			old_ep_holb[i] = ipa_ctx->ep[ep_idx].holb;
			res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
			if (res) {
				IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
				BUG();
				return res;
			}
		}

		res = ipa_tag_process(NULL, 0, HZ);
		if (res)
			IPAERR("TAG process failed\n");

		/* disable packet drop on all DL channels */
		for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
			if (!ipa_mhi_ctx->dl_channels[i].valid)
				continue;
			if (ipa_mhi_ctx->dl_channels[i].state ==
				IPA_HW_MHI_CHANNEL_STATE_INVALID)
				continue;
			ep_idx = ipa_get_ep_mapping(
				ipa_mhi_ctx->dl_channels[i].client);
			res = ipa_cfg_ep_holb(ep_idx, &old_ep_holb[i]);
			if (res) {
				IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
				BUG();
				return res;
			}
		}

		res = sps_pipe_disable(ipa_ctx->bam_handle,
			ipa_get_ep_mapping(channel->client));
		if (res) {
			IPA_MHI_ERR("sps_pipe_disable failed %d\n", res);
			BUG();
			return res;
		}

		res = ipa_mhi_disable_force_clear(ipa_mhi_ctx->qmi_req_id);
		if (res) {
			IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
				res);
			BUG();
			return res;
		}
		ipa_mhi_ctx->qmi_req_id++;
	}

	res = ipa_disable_data_path(ipa_get_ep_mapping(channel->client));
	if (res) {
		IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res);
		return res;
	}
	IPA_MHI_FUNC_EXIT();

	return 0;
}
/**
 * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
 * MHI channel
 * @in: connect parameters
 * @clnt_hdl: [out] client handle for this pipe
 *
 * This function is called by MHI client driver on MHI channel start.
 * This function is called after MHI engine was started.
 * This function is doing the following:
 *	- Send command to uC to start corresponding MHI channel
 *	- Configure IPA EP control
 *
 * Return codes: 0	  : success
 *		 negative : error
 */
int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
{
	struct ipa_ep_context *ep;
	int ipa_ep_idx;
	int res;
	struct ipa_mhi_channel_ctx *channel = NULL;
	unsigned long flags;

	IPA_MHI_FUNC_ENTRY();

	if (!in || !clnt_hdl) {
		IPA_MHI_ERR("NULL args\n");
		return -EINVAL;
	}

	if (in->sys.client >= IPA_CLIENT_MAX) {
		IPA_MHI_ERR("bad parm client:%d\n", in->sys.client);
		return -EINVAL;
	}

	if (unlikely(!ipa_mhi_ctx)) {
		IPA_MHI_ERR("IPA MHI was not initialized\n");
		return -EINVAL;
	}

	spin_lock_irqsave(&ipa_mhi_ctx->state_lock, flags);
	if (!ipa_mhi_ctx || ipa_mhi_ctx->state != IPA_MHI_STATE_STARTED) {
		IPA_MHI_ERR("IPA MHI was not started\n");
		spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);
		return -EINVAL;
	}
	spin_unlock_irqrestore(&ipa_mhi_ctx->state_lock, flags);

	ipa_ep_idx = ipa_get_ep_mapping(in->sys.client);
	if (ipa_ep_idx == -1) {
		IPA_MHI_ERR("Invalid client.\n");
		return -EINVAL;
	}

	ep = &ipa_ctx->ep[ipa_ep_idx];

	channel = ipa_mhi_get_channel_context(in->sys.client,
		in->channel_id);
	if (!channel) {
		IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
		return -EINVAL;
	}

	IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n",
		channel->client, channel->hdl, channel->id);

	ipa_inc_client_enable_clks();

	if (ep->valid == 1) {
		IPA_MHI_ERR("EP already allocated.\n");
		goto fail_ep_exists;
	}

	memset(ep, 0, offsetof(struct ipa_ep_context, sys));
	ep->valid = 1;
	ep->skip_ep_cfg = in->sys.skip_ep_cfg;
	ep->client = in->sys.client;
	ep->client_notify = in->sys.notify;
	ep->priv = in->sys.priv;
	ep->keep_ipa_awake = in->sys.keep_ipa_awake;

	/* start channel in uC */
	if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
		IPA_MHI_DBG("Initializing channel\n");
		res = ipa_uc_mhi_init_channel(ipa_ep_idx, channel->hdl,
			channel->id, (IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2));
		if (res) {
			IPA_MHI_ERR("init_channel failed %d\n", res);
			goto fail_init_channel;
		}
	} else if (channel->state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
		if (channel->client != ep->client) {
			IPA_MHI_ERR("previous channel client was %d\n",
				ep->client);
			goto fail_init_channel;
		}
		IPA_MHI_DBG("Starting channel\n");
		res = ipa_uc_mhi_resume_channel(channel->hdl, false);
		if (res) {
			IPA_MHI_ERR("init_channel failed %d\n", res);
			goto fail_init_channel;
		}
	} else {
		IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
		goto fail_init_channel;
	}

	channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;

	res = ipa_enable_data_path(ipa_ep_idx);
	if (res) {
		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
			ipa_ep_idx);
		goto fail_enable_dp;
	}

	if (!ep->skip_ep_cfg) {
		if (ipa_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
			IPAERR("fail to configure EP.\n");
			goto fail_ep_cfg;
		}
		if (ipa_cfg_ep_status(ipa_ep_idx, &ep->status)) {
			IPAERR("fail to configure status of EP.\n");
			goto fail_ep_cfg;
		}
		IPA_MHI_DBG("ep configuration successful\n");
	} else {
		IPA_MHI_DBG("skipping ep configuration\n");
	}

	*clnt_hdl = ipa_ep_idx;

	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
		ipa_install_dflt_flt_rules(ipa_ep_idx);

	if (!ep->keep_ipa_awake)
		ipa_dec_client_disable_clks();

	ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
	IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys.client,
		ipa_ep_idx);

	IPA_MHI_FUNC_EXIT();

	return 0;

fail_ep_cfg:
	ipa_disable_data_path(ipa_ep_idx);
fail_enable_dp:
	ipa_uc_mhi_reset_channel(channel->hdl);
	channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
fail_init_channel:
	memset(ep, 0, offsetof(struct ipa_ep_context, sys));
fail_ep_exists:
	ipa_dec_client_disable_clks();
	return -EPERM;
}
static int qmi_init_modem_send_sync_msg(void)
{
	struct ipa_init_modem_driver_req_msg_v01 req;
	struct ipa_init_modem_driver_resp_msg_v01 resp;
	struct msg_desc req_desc, resp_desc;
	int rc;
	u16 smem_restr_bytes = ipa_get_smem_restr_bytes();

	memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
	memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
	req.platform_type_valid = true;
	req.platform_type = ipa_wan_platform;
	req.hdr_tbl_info_valid = true;
	req.hdr_tbl_info.modem_offset_start =
		IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
	req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
		smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
	req.v4_route_tbl_info_valid = true;
	req.v4_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v4_rt_ofst) +
		smem_restr_bytes;
	req.v4_route_tbl_info.num_indices = IPA_MEM_PART(v4_modem_rt_index_hi);
	req.v6_route_tbl_info_valid = true;
	req.v6_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v6_rt_ofst) +
		smem_restr_bytes;
	req.v6_route_tbl_info.num_indices = IPA_MEM_PART(v6_modem_rt_index_hi);
	req.v4_filter_tbl_start_addr_valid = true;
	req.v4_filter_tbl_start_addr =
		IPA_MEM_PART(v4_flt_ofst) + smem_restr_bytes;
	req.v6_filter_tbl_start_addr_valid = true;
	req.v6_filter_tbl_start_addr =
		IPA_MEM_PART(v6_flt_ofst) + smem_restr_bytes;
	req.modem_mem_info_valid = true;
	req.modem_mem_info.block_start_addr =
		IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
	req.modem_mem_info.size = IPA_MEM_PART(modem_size);
	req.ctrl_comm_dest_end_pt_valid = true;
	req.ctrl_comm_dest_end_pt =
		ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
	req.hdr_proc_ctx_tbl_info_valid = true;
	req.hdr_proc_ctx_tbl_info.modem_offset_start =
		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
	req.hdr_proc_ctx_tbl_info.modem_offset_end =
		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
		IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
	if (is_load_uc) {  /* First time boot */
		req.is_ssr_bootup_valid = false;
		req.is_ssr_bootup = 0;
	} else {  /* After SSR boot */
		req.is_ssr_bootup_valid = true;
		req.is_ssr_bootup = 1;
	}

	IPAWANDBG("platform_type %d\n", req.platform_type);
	IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
			req.hdr_tbl_info.modem_offset_start);
	IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
			req.hdr_tbl_info.modem_offset_end);
	IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
			req.v4_route_tbl_info.route_tbl_start_addr);
	IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
			req.v4_route_tbl_info.num_indices);
	IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
			req.v6_route_tbl_info.route_tbl_start_addr);
	IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
			req.v6_route_tbl_info.num_indices);
	IPAWANDBG("v4_filter_tbl_start_addr %d\n",
			req.v4_filter_tbl_start_addr);
	IPAWANDBG("v6_filter_tbl_start_addr %d\n",
			req.v6_filter_tbl_start_addr);
	IPAWANDBG("modem_mem_info.block_start_addr %d\n",
			req.modem_mem_info.block_start_addr);
	IPAWANDBG("modem_mem_info.size %d\n",
			req.modem_mem_info.size);
	IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
			req.ctrl_comm_dest_end_pt);
	IPAWANDBG("is_ssr_bootup %d\n",
			req.is_ssr_bootup);

	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
	req_desc.ei_array = ipa_init_modem_driver_req_msg_data_v01_ei;

	resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
	resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
	resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;

	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
			&resp_desc, &resp, sizeof(resp),
			QMI_SEND_REQ_TIMEOUT_MS);
	pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
	return ipa_check_qmi_response(rc,
		QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
		resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
}
Пример #10
0
/**
 * ipa_connect() - low-level IPA client connect
 * @in:	[in] input parameters from client
 * @sps:	[out] sps output from IPA needed by client for sps_connect
 * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
 *
 * Should be called by the driver of the peripheral that wants to connect to
 * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
 * expects caller to take responsibility to add any needed headers, routing
 * and filtering tables and rules as needed.
 *
 * Returns:	0 on success, negative on failure
 *
 * Note:	Should not be called from atomic context
 */
int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
		u32 *clnt_hdl)
{
	int ipa_ep_idx;
	int result = -EFAULT;
	struct ipa_ep_context *ep;

	ipa_inc_client_enable_clks();

	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
	    in->client >= IPA_CLIENT_MAX ||
	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
		IPAERR("bad parm.\n");
		result = -EINVAL;
		goto fail;
	}

	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
	if (ipa_ep_idx == -1) {
		IPAERR("fail to alloc EP.\n");
		goto fail;
	}

	ep = &ipa_ctx->ep[ipa_ep_idx];

	if (ep->valid) {
		IPAERR("EP already allocated.\n");
		goto fail;
	}

	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
	ipa_enable_data_path(ipa_ep_idx);

	ep->valid = 1;
	ep->client = in->client;
	ep->client_notify = in->notify;
	ep->priv = in->priv;

	if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
		IPAERR("fail to configure EP.\n");
		goto ipa_cfg_ep_fail;
	}

	result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
	if (result) {
		IPAERR("fail to configure SPS.\n");
		goto ipa_cfg_ep_fail;
	}

	if (in->desc.base == NULL) {
		result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
						  &ep->desc_fifo_in_pipe_mem,
						  &ep->desc_fifo_pipe_mem_ofst,
						  in->desc_fifo_sz, ipa_ep_idx);
		if (result) {
			IPAERR("fail to allocate DESC FIFO.\n");
			goto desc_mem_alloc_fail;
		}
	} else {
		IPADBG("client allocated DESC FIFO\n");
		ep->connect.desc = in->desc;
		ep->desc_fifo_client_allocated = 1;
	}
	IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
	       ep->connect.desc.size);

	if (in->data.base == NULL) {
		result = ipa_connect_allocate_fifo(in, &ep->connect.data,
						&ep->data_fifo_in_pipe_mem,
						&ep->data_fifo_pipe_mem_ofst,
						in->data_fifo_sz, ipa_ep_idx);
		if (result) {
			IPAERR("fail to allocate DATA FIFO.\n");
			goto data_mem_alloc_fail;
		}
	} else {
		IPADBG("client allocated DATA FIFO\n");
		ep->connect.data = in->data;
		ep->data_fifo_client_allocated = 1;
	}
	IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
	       ep->connect.data.size);

	ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */

	if (IPA_CLIENT_IS_CONS(in->client))
		ep->connect.options |= SPS_O_NO_DISABLE;

	result = sps_connect(ep->ep_hdl, &ep->connect);
	if (result) {
		IPAERR("sps_connect fails.\n");
		goto sps_connect_fail;
	}

	sps->ipa_bam_hdl = ipa_ctx->bam_handle;
	sps->ipa_ep_idx = ipa_ep_idx;
	*clnt_hdl = ipa_ep_idx;
	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));

	ipa_program_holb(ep, ipa_ep_idx);

	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);

	return 0;

sps_connect_fail:
	if (!ep->data_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
				  ep->connect.data.size,
				  ep->connect.data.base,
				  ep->connect.data.phys_base);
	else
		ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
				  ep->connect.data.size);

data_mem_alloc_fail:
	if (!ep->desc_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
				  ep->connect.desc.size,
				  ep->connect.desc.base,
				  ep->connect.desc.phys_base);
	else
		ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
				  ep->connect.desc.size);

desc_mem_alloc_fail:
	sps_free_endpoint(ep->ep_hdl);
ipa_cfg_ep_fail:
	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
fail:
	ipa_dec_client_disable_clks();
	return result;
}
Пример #11
0
static int setup_bridge_to_ipa(enum ipa_bridge_dir dir)
{
	struct ipa_bridge_pipe_context *sys;
	struct ipa_ep_cfg_mode mode;
	dma_addr_t dma_addr;
	int ipa_ep_idx;
	int ret;
	int i;

	if (dir == IPA_DL) {
		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
				IPA_CLIENT_A2_TETHERED_PROD);
		if (ipa_ep_idx == -1) {
			IPAERR("Invalid client.\n");
			ret = -EINVAL;
			goto tx_alloc_endpoint_failed;
		}

		sys = &bridge[IPA_DL_TO_IPA];
		sys->pipe = sps_alloc_endpoint();
		if (sys->pipe == NULL) {
			IPAERR("tx alloc endpoint failed\n");
			ret = -ENOMEM;
			goto tx_alloc_endpoint_failed;
		}
		ret = sps_get_config(sys->pipe, &sys->connection);
		if (ret) {
			IPAERR("tx get config failed %d\n", ret);
			goto tx_get_config_failed;
		}

		sys->connection.source = SPS_DEV_HANDLE_MEM;
		sys->connection.src_pipe_index = ipa_ctx->a5_pipe_index++;
		sys->connection.destination = ipa_ctx->bam_handle;
		sys->connection.dest_pipe_index = ipa_ep_idx;
		sys->connection.mode = SPS_MODE_DEST;
		sys->connection.options =
		   SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_POLL;
		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
				sys->desc_mem_buf.size,
				&dma_addr,
				0);
		if (sys->desc_mem_buf.base == NULL) {
			IPAERR("tx memory alloc failed\n");
			ret = -ENOMEM;
			goto tx_get_config_failed;
		}
		sys->desc_mem_buf.phys_base = dma_addr;
		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
		sys->connection.desc = sys->desc_mem_buf;
		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;

		ret = sps_connect(sys->pipe, &sys->connection);
		if (ret < 0) {
			IPAERR("tx connect error %d\n", ret);
			goto tx_connect_failed;
		}

		INIT_LIST_HEAD(&sys->head_desc_list);
		INIT_LIST_HEAD(&sys->free_desc_list);
		spin_lock_init(&sys->spinlock);

		ipa_ctx->ep[ipa_ep_idx].valid = 1;

		mode.mode = IPA_DMA;
		mode.dst = IPA_CLIENT_USB_CONS;
		ret = ipa_cfg_ep_mode(ipa_ep_idx, &mode);
		if (ret < 0) {
			IPAERR("DMA mode set error %d\n", ret);
			goto tx_mode_set_failed;
		}

		return 0;

tx_mode_set_failed:
		sps_disconnect(sys->pipe);
tx_connect_failed:
		dma_free_coherent(NULL, sys->desc_mem_buf.size,
				sys->desc_mem_buf.base,
				sys->desc_mem_buf.phys_base);
tx_get_config_failed:
		sps_free_endpoint(sys->pipe);
tx_alloc_endpoint_failed:
		return ret;
	} else {

		ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode,
				IPA_CLIENT_A2_TETHERED_CONS);
		if (ipa_ep_idx == -1) {
			IPAERR("Invalid client.\n");
			ret = -EINVAL;
			goto rx_alloc_endpoint_failed;
		}

		sys = &bridge[IPA_UL_FROM_IPA];
		sys->pipe = sps_alloc_endpoint();
		if (sys->pipe == NULL) {
			IPAERR("rx alloc endpoint failed\n");
			ret = -ENOMEM;
			goto rx_alloc_endpoint_failed;
		}
		ret = sps_get_config(sys->pipe, &sys->connection);
		if (ret) {
			IPAERR("rx get config failed %d\n", ret);
			goto rx_get_config_failed;
		}

		sys->connection.source = ipa_ctx->bam_handle;
		sys->connection.src_pipe_index = 7;
		sys->connection.destination = SPS_DEV_HANDLE_MEM;
		sys->connection.dest_pipe_index = ipa_ctx->a5_pipe_index++;
		sys->connection.mode = SPS_MODE_SRC;
		sys->connection.options = SPS_O_AUTO_ENABLE | SPS_O_EOT |
		      SPS_O_ACK_TRANSFERS;
		sys->desc_mem_buf.size = IPA_SYS_DESC_FIFO_SZ; /* 2k */
		sys->desc_mem_buf.base = dma_alloc_coherent(NULL,
				sys->desc_mem_buf.size,
				&dma_addr,
				0);
		if (sys->desc_mem_buf.base == NULL) {
			IPAERR("rx memory alloc failed\n");
			ret = -ENOMEM;
			goto rx_get_config_failed;
		}
		sys->desc_mem_buf.phys_base = dma_addr;
		memset(sys->desc_mem_buf.base, 0x0, sys->desc_mem_buf.size);
		sys->connection.desc = sys->desc_mem_buf;
		sys->connection.event_thresh = IPA_EVENT_THRESHOLD;

		ret = sps_connect(sys->pipe, &sys->connection);
		if (ret < 0) {
			IPAERR("rx connect error %d\n", ret);
			goto rx_connect_failed;
		}

		sys->register_event.options = SPS_O_EOT;
		sys->register_event.mode = SPS_TRIGGER_CALLBACK;
		sys->register_event.xfer_done = NULL;
		sys->register_event.callback = ipa_sps_irq_rx_notify;
		sys->register_event.user = NULL;
		ret = sps_register_event(sys->pipe, &sys->register_event);
		if (ret < 0) {
			IPAERR("tx register event error %d\n", ret);
			goto rx_event_reg_failed;
		}

		INIT_LIST_HEAD(&sys->head_desc_list);
		INIT_LIST_HEAD(&sys->free_desc_list);
		spin_lock_init(&sys->spinlock);

		for (i = 0; i < IPA_RX_POOL_CEIL; i++) {
			ret = queue_rx_single(dir);
			if (ret < 0)
				IPAERR("queue fail %d %d\n", dir, i);
		}

		return 0;

rx_event_reg_failed:
		sps_disconnect(sys->pipe);
rx_connect_failed:
		dma_free_coherent(NULL,
				sys->desc_mem_buf.size,
				sys->desc_mem_buf.base,
				sys->desc_mem_buf.phys_base);
rx_get_config_failed:
		sps_free_endpoint(sys->pipe);
rx_alloc_endpoint_failed:
		return ret;
	}
}