Beispiel #1
0
int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t cmd)
{
	int res;

	if (!ipa_uc_mhi_ctx) {
		IPAERR("Not initialized\n");
		return -EFAULT;
	}

	IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
		cmd.params.isDlUlSyncEnabled, cmd.params.UlAccmVal);
	IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
		cmd.params.ulMsiEventThreshold, cmd.params.dlMsiEventThreshold);

	ipa_inc_client_enable_clks();

	res = ipa_uc_send_cmd(cmd.raw32b,
		IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
	if (res) {
		IPAERR("ipa_uc_send_cmd failed %d\n", res);
		goto disable_clks;
	}

	res = 0;
disable_clks:
	ipa_dec_client_disable_clks();
	return res;
}
int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name)
{
	struct ipa_rm_resource *resource;
	int result;

	IPADBG("IPA RM ::ipa_rm_delete_resource num[%d] ENTER\n",
			resource_name);
	write_lock(&ipa_rm_ctx->lock);
	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
					resource_name,
						&resource) != 0) {
		IPADBG("ipa_rm_delete_resource param are bad********\n");
		result = -EINVAL;
		goto bail;
	}
	result = ipa_rm_resource_delete(resource);
	if (result) {
		IPADBG("error in ipa_rm_resource_delete\n");
		goto bail;
	}
	result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph,
								resource_name);
	IPADBG("IPA RM ::ipa_rm_delete_resource [%d] SUCCESS\n",
								resource_name);
bail:
	write_unlock(&ipa_rm_ctx->lock);
	return result;
}
Beispiel #3
0
int ipa_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
{
	struct ipa_uc_hdlrs hdlrs;

	if (ipa_uc_mhi_ctx) {
		IPAERR("Already initialized\n");
		return -EFAULT;
	}

	ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL);
	if (!ipa_uc_mhi_ctx) {
		IPAERR("no mem\n");
		return -ENOMEM;
	}

	ipa_uc_mhi_ctx->ready_cb = ready_cb;
	ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;

	memset(&hdlrs, 0, sizeof(hdlrs));
	hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb;
	hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr;
	hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr;
	hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr;
	ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);

	IPADBG("Done\n");
	return 0;
}
int ipa_rm_initialize(void)
{
	int result;

	ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL);
	if (!ipa_rm_ctx) {
		result = -ENOMEM;
		goto bail;
	}
	ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
	if (!ipa_rm_ctx->ipa_rm_wq) {
		result = -ENOMEM;
		goto create_wq_fail;
	}
	result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph));
	if (result)
		goto graph_alloc_fail;
	rwlock_init(&ipa_rm_ctx->lock);
	IPADBG("IPA RM ipa_rm_initialize SUCCESS\n");
	return 0;

graph_alloc_fail:
	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
create_wq_fail:
	kfree(ipa_rm_ctx);
bail:
	return result;
}
static void ipa_uc_event_handler(enum ipa_irq_type interrupt,
				 void *private_data,
				 void *interrupt_data)
{
	union IpaHwErrorEventData_t evt;
	u8 feature;

	WARN_ON(private_data != ipa_ctx);

	ipa_inc_client_enable_clks();

	IPADBG("uC evt opcode=%u\n",
		ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);


	feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);

	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
		IPAERR("Invalid feature %u for event %u\n",
			feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
		ipa_dec_client_disable_clks();
		return;
	}

	/* Feature specific handling */
	if (uc_hdlrs[feature].ipa_uc_event_hdlr)
		uc_hdlrs[feature].ipa_uc_event_hdlr
			(ipa_ctx->uc_ctx.uc_sram_mmio);

	/* General handling */
	if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
	    IPA_HW_2_CPU_EVENT_ERROR) {
		evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams;
		IPADBG("uC evt errorType=%u\n", evt.params.errorType);
		BUG();
	} else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
		IPA_HW_2_CPU_EVENT_LOG_INFO) {
			IPADBG("uC evt log info ofst=0x%x\n",
				ipa_ctx->uc_ctx.uc_sram_mmio->eventParams);
		ipa_log_evt_hdlr();
	} else {
		IPADBG("unsupported uC evt opcode=%u\n",
				ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
	}
	ipa_dec_client_disable_clks();

}
/**
 * ipa_mhi_notify_wakeup() - Schedule work to notify data available
 *
 * This function will schedule a work to notify data available event.
 * In case this function is called more than once, only one notification will
 * be sent to MHI client driver. No further notifications will be sent until
 * IPA MHI state will become STARTED.
 */
static void ipa_mhi_notify_wakeup(void)
{
	IPA_MHI_FUNC_ENTRY();
	if (ipa_mhi_ctx->wakeup_notified) {
		IPADBG("wakeup already called\n");
		return;
	}
	queue_work(ipa_mhi_ctx->wq, &ipa_mhi_notify_wakeup_work);
	ipa_mhi_ctx->wakeup_notified = true;
	IPA_MHI_FUNC_EXIT();
}
Beispiel #7
0
static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
	*uc_sram_mmio)
{
	if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
	    IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
		union IpaHwMhiChannelErrorEventData_t evt;
		IPAERR("Channel error\n");
		evt.raw32b = uc_sram_mmio->eventParams;
		IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
			evt.params.errorType, evt.params.channelHandle,
			evt.params.reserved);
	} else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
		   IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
		union IpaHwMhiChannelWakeupEventData_t evt;
		IPADBG("WakeUp channel request\n");
		evt.raw32b = uc_sram_mmio->eventParams;
		IPADBG("channelHandle=%d reserved=%d\n",
			evt.params.channelHandle, evt.params.reserved);
		ipa_uc_mhi_ctx->wakeup_request_cb();
	}
}
Beispiel #8
0
static int ipa_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
	*uc_sram_mmio, u32 *uc_status)
{
	IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
	if (uc_sram_mmio->responseOp == ipa_uc_mhi_ctx->expected_responseOp &&
	    uc_sram_mmio->responseParams ==
	    ipa_uc_mhi_ctx->expected_responseParams) {
		*uc_status = 0;
		return 0;
	}
	return -EINVAL;
}
int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
{
	struct ipa_rm_resource *resource;
	int result;
	IPADBG("IPA RM ::ipa_rm_release_resource ENTER\n");

	if (!IPA_RM_RESORCE_IS_PROD(resource_name))
		return -EINVAL;
	read_lock(&ipa_rm_ctx->lock);
	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
					  resource_name,
					  &resource) != 0) {
		result = -EPERM;
		goto bail;
	}
	result = ipa_rm_resource_producer_release(
		    (struct ipa_rm_resource_prod *)resource);

bail:
	IPADBG("IPA RM ::ipa_rm_release_resource EXIT [%d]\n", result);
	read_unlock(&ipa_rm_ctx->lock);
	return result;
}
int ipa_nat_test009(int total_entries, u32 tbl_hdl, u8 sep)
{
	int ret;
	u32 rule_hdl, rule_hdl1;
	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1;

	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */

	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
	ipv4_rule.target_port = 1234;
	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
	ipv4_rule.private_port = 5678;
	ipv4_rule.protocol = IPPROTO_TCP;
	ipv4_rule.public_port = 9050;

	ipv4_rule1.target_ip = 0xC1171602; /* 193.23.22.2 */
	ipv4_rule1.target_port = 1234;
	ipv4_rule1.private_ip = 0xC2171602; /* 194.23.22.2 */
	ipv4_rule1.private_port = 5678;
	ipv4_rule1.protocol = IPPROTO_TCP;
	ipv4_rule1.public_port = 9050;

	IPADBG("%s():\n",__FUNCTION__);

	if(sep)
	{
		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
		CHECK_ERR(ret);
	}

	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
	CHECK_ERR(ret);

	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
	CHECK_ERR(ret);

	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
	CHECK_ERR(ret);

	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
	CHECK_ERR(ret);

	if(sep)
	{
		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
		CHECK_ERR(ret);
	}

	return 0;
}
/**
 * ipa_bridge_init()
 *
 * Return codes: 0: success, -ENOMEM: failure
 */
int ipa_bridge_init(void)
{
	int i;

	ipa_ctx->smem_pipe_mem = smem_alloc(SMEM_BAM_PIPE_MEMORY,
			IPA_SMEM_PIPE_MEM_SZ);
	if (!ipa_ctx->smem_pipe_mem) {
		IPAERR("smem alloc failed\n");
		return -ENOMEM;
	}
	IPADBG("smem_pipe_mem = %p\n", ipa_ctx->smem_pipe_mem);

	for (i = 0; i < IPA_BRIDGE_TYPE_MAX; i++)
		bridge[i].type = i;

	return 0;
}
static int ipa_disable_data_path(u32 clnt_hdl)
{
    DECLARE_COMPLETION_ONSTACK(tag_rsp);
    struct ipa_desc desc = {0};
    struct ipa_ip_packet_tag cmd;
    struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
    struct ipa_tree_node *node;
    int result = 0;

    if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_VIRTUAL) {
        /* IPA_HW_MODE_VIRTUAL lacks support for TAG IC & EP suspend */
        return 0;
    }

    node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
    if (!node) {
        IPAERR("failed to alloc tree node object\n");
        result = -ENOMEM;
        goto fail_alloc;
    }

    if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
        ipa_write_reg(ipa_ctx->mmio,
                      IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 1);

        cmd.tag = (u32) &tag_rsp;

        desc.pyld = &cmd;
        desc.len = sizeof(struct ipa_ip_packet_tag);
        desc.type = IPA_IMM_CMD_DESC;
        desc.opcode = IPA_IP_PACKET_TAG;

        IPADBG("Wait on TAG %p clnt=%d\n", &tag_rsp, clnt_hdl);

        node->hdl = cmd.tag;
        mutex_lock(&ipa_ctx->lock);
        if (ipa_insert(&ipa_ctx->tag_tree, node)) {
            IPAERR("failed to add to tree\n");
            result = -EINVAL;
            mutex_unlock(&ipa_ctx->lock);
            goto fail_insert;
        }
        mutex_unlock(&ipa_ctx->lock);

        if (ipa_send_cmd(1, &desc)) {
            ipa_write_reg(ipa_ctx->mmio,
                          IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 0);
            IPAERR("fail to send TAG command\n");
            result = -EPERM;
            goto fail_send;
        }
        wait_for_completion(&tag_rsp);
        if (IPA_CLIENT_IS_CONS(ep->client) &&
                ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR &&
                ep->cfg.aggr.aggr_time_limit)
            msleep(ep->cfg.aggr.aggr_time_limit);
    }

    return 0;

fail_send:
    rb_erase(&node->node, &ipa_ctx->tag_tree);
fail_insert:
    kmem_cache_free(ipa_ctx->tree_node_cache, node);
fail_alloc:
    return result;
}
static int setup_dma_bam_bridge(enum ipa_bridge_dir dir,
			       enum ipa_bridge_type type,
			       struct ipa_sys_connect_params *props,
			       u32 *clnt_hdl)
{
	struct ipa_connect_params ipa_in_params;
	struct ipa_sps_params sps_out_params;
	int dma_a2_pipe;
	int dma_ipa_pipe;
	struct sps_pipe *pipe;
	struct sps_pipe *pipe_a2;
	struct sps_connect _connection;
	struct sps_connect *connection = &_connection;
	struct a2_mux_pipe_connection pipe_conn = {0};
	enum a2_mux_pipe_direction pipe_dir;
	u32 dma_hdl = sps_dma_get_bam_handle();
	u32 a2_hdl;
	u32 pa;
	int ret;

	memset(&ipa_in_params, 0, sizeof(ipa_in_params));
	memset(&sps_out_params, 0, sizeof(sps_out_params));

	pipe_dir = (dir == IPA_BRIDGE_DIR_UL) ? IPA_TO_A2 : A2_TO_IPA;

	ret = ipa_get_a2_mux_pipe_info(pipe_dir, &pipe_conn);
	if (ret) {
		IPAERR("ipa_get_a2_mux_pipe_info failed dir=%d type=%d\n",
				dir, type);
		goto fail_get_a2_prop;
	}

	pa = (dir == IPA_BRIDGE_DIR_UL) ? pipe_conn.dst_phy_addr :
					  pipe_conn.src_phy_addr;

	ret = sps_phy2h(pa, &a2_hdl);
	if (ret) {
		IPAERR("sps_phy2h failed (A2 BAM) %d dir=%d type=%d\n",
				ret, dir, type);
		goto fail_get_a2_prop;
	}

	ipa_get_dma_pipe_num(dir, type, &dma_a2_pipe, &dma_ipa_pipe);

	ipa_in_params.ipa_ep_cfg = props->ipa_ep_cfg;
	ipa_in_params.client = props->client;
	ipa_in_params.client_bam_hdl = dma_hdl;
	ipa_in_params.client_ep_idx = dma_ipa_pipe;
	ipa_in_params.priv = props->priv;
	ipa_in_params.notify = props->notify;
	ipa_in_params.desc_fifo_sz = ipa_get_desc_fifo_sz(dir, type);
	ipa_in_params.data_fifo_sz = ipa_get_data_fifo_sz(dir, type);

	if (ipa_connect(&ipa_in_params, &sps_out_params, clnt_hdl)) {
		IPAERR("ipa connect failed dir=%d type=%d\n", dir, type);
		goto fail_get_a2_prop;
	}

	pipe = sps_alloc_endpoint();
	if (pipe == NULL) {
		IPAERR("sps_alloc_endpoint failed dir=%d type=%d\n", dir, type);
		ret = -ENOMEM;
		goto fail_sps_alloc;
	}

	memset(&_connection, 0, sizeof(_connection));
	ret = sps_get_config(pipe, connection);
	if (ret) {
		IPAERR("sps_get_config failed %d dir=%d type=%d\n", ret, dir,
				type);
		goto fail_sps_get_config;
	}

	if (dir == IPA_BRIDGE_DIR_DL) {
		connection->mode = SPS_MODE_SRC;
		connection->source = dma_hdl;
		connection->destination = sps_out_params.ipa_bam_hdl;
		connection->src_pipe_index = dma_ipa_pipe;
		connection->dest_pipe_index = sps_out_params.ipa_ep_idx;
	} else {
		connection->mode = SPS_MODE_DEST;
		connection->source = sps_out_params.ipa_bam_hdl;
		connection->destination = dma_hdl;
		connection->src_pipe_index = sps_out_params.ipa_ep_idx;
		connection->dest_pipe_index = dma_ipa_pipe;
	}

	connection->event_thresh = IPA_EVENT_THRESHOLD;
	connection->data = sps_out_params.data;
	connection->desc = sps_out_params.desc;
	connection->options = SPS_O_AUTO_ENABLE;

	ret = sps_connect(pipe, connection);
	if (ret) {
		IPAERR("sps_connect failed %d dir=%d type=%d\n", ret, dir,
				type);
		goto fail_sps_get_config;
	}

	if (dir == IPA_BRIDGE_DIR_DL) {
		bridge[type].pipe[IPA_DL_TO_IPA].pipe = pipe;
		bridge[type].pipe[IPA_DL_TO_IPA].ipa_facing = true;
		bridge[type].pipe[IPA_DL_TO_IPA].valid = true;
	} else {
		bridge[type].pipe[IPA_UL_FROM_IPA].pipe = pipe;
		bridge[type].pipe[IPA_UL_FROM_IPA].ipa_facing = true;
		bridge[type].pipe[IPA_UL_FROM_IPA].valid = true;
	}

	IPADBG("dir=%d type=%d (ipa) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type,
			connection->source, connection->src_pipe_index,
			connection->destination, connection->dest_pipe_index);

	pipe_a2 = sps_alloc_endpoint();
	if (pipe_a2 == NULL) {
		IPAERR("sps_alloc_endpoint failed2 dir=%d type=%d\n", dir,
				type);
		ret = -ENOMEM;
		goto fail_sps_alloc_a2;
	}

	memset(&_connection, 0, sizeof(_connection));
	ret = sps_get_config(pipe_a2, connection);
	if (ret) {
		IPAERR("sps_get_config failed2 %d dir=%d type=%d\n", ret, dir,
				type);
		goto fail_sps_get_config_a2;
	}

	if (dir == IPA_BRIDGE_DIR_DL) {
		connection->mode = SPS_MODE_DEST;
		connection->source = a2_hdl;
		connection->destination = dma_hdl;
		connection->src_pipe_index = ipa_get_a2_pipe_num(dir, type);
		connection->dest_pipe_index = dma_a2_pipe;
	} else {
		connection->mode = SPS_MODE_SRC;
		connection->source = dma_hdl;
		connection->destination = a2_hdl;
		connection->src_pipe_index = dma_a2_pipe;
		connection->dest_pipe_index = ipa_get_a2_pipe_num(dir, type);
	}

	connection->event_thresh = IPA_EVENT_THRESHOLD;

	if (ipa_setup_a2_dma_fifos(dir, type, &connection->desc,
				&connection->data)) {
		IPAERR("fail to setup A2-DMA FIFOs dir=%d type=%d\n",
				dir, type);
		goto fail_sps_get_config_a2;
	}

	connection->options = SPS_O_AUTO_ENABLE;

	ret = sps_connect(pipe_a2, connection);
	if (ret) {
		IPAERR("sps_connect failed2 %d dir=%d type=%d\n", ret, dir,
				type);
		goto fail_sps_get_config_a2;
	}

	if (dir == IPA_BRIDGE_DIR_DL) {
		bridge[type].pipe[IPA_DL_FROM_A2].pipe = pipe_a2;
		bridge[type].pipe[IPA_DL_FROM_A2].valid = true;
	} else {
		bridge[type].pipe[IPA_UL_TO_A2].pipe = pipe_a2;
		bridge[type].pipe[IPA_UL_TO_A2].valid = true;
	}

	IPADBG("dir=%d type=%d (a2) src(0x%x:%u)->dst(0x%x:%u)\n", dir, type,
			connection->source, connection->src_pipe_index,
			connection->destination, connection->dest_pipe_index);

	return 0;

fail_sps_get_config_a2:
	sps_free_endpoint(pipe_a2);
fail_sps_alloc_a2:
	sps_disconnect(pipe);
fail_sps_get_config:
	sps_free_endpoint(pipe);
fail_sps_alloc:
	ipa_disconnect(*clnt_hdl);
fail_get_a2_prop:
	return ret;
}
int ipa_setup_a2_dma_fifos(enum ipa_bridge_dir dir,
		enum ipa_bridge_type type,
		struct sps_mem_buffer *desc,
		struct sps_mem_buffer *data)
{
	int ret;

	if (type == IPA_BRIDGE_TYPE_EMBEDDED) {
		if (dir == IPA_BRIDGE_DIR_UL) {
			desc->base = ipa_ctx->smem_pipe_mem +
				IPA_SMEM_UL_DESC_FIFO_OFST;
			desc->phys_base = smem_virt_to_phys(desc->base);
			desc->size = ipa_get_desc_fifo_sz(dir, type);
			data->base = ipa_ctx->smem_pipe_mem +
				IPA_SMEM_UL_DATA_FIFO_OFST;
			data->phys_base = smem_virt_to_phys(data->base);
			data->size = ipa_get_data_fifo_sz(dir, type);
		} else {
			desc->base = ipa_ctx->smem_pipe_mem +
				IPA_SMEM_DL_DESC_FIFO_OFST;
			desc->phys_base = smem_virt_to_phys(desc->base);
			desc->size = ipa_get_desc_fifo_sz(dir, type);
			data->base = ipa_ctx->smem_pipe_mem +
				IPA_SMEM_DL_DATA_FIFO_OFST;
			data->phys_base = smem_virt_to_phys(data->base);
			data->size = ipa_get_data_fifo_sz(dir, type);
		}
	} else {
		if (dir == IPA_BRIDGE_DIR_UL) {
			ret = sps_setup_bam2bam_fifo(data,
					IPA_OCIMEM_UL_DATA_FIFO_OFST,
					ipa_get_data_fifo_sz(dir, type), 1);
			if (ret) {
				IPAERR("DAFIFO setup fail %d dir %d type %d\n",
						ret, dir, type);
				return ret;
			}

			ret = sps_setup_bam2bam_fifo(desc,
					IPA_OCIMEM_UL_DESC_FIFO_OFST,
					ipa_get_desc_fifo_sz(dir, type), 1);
			if (ret) {
				IPAERR("DEFIFO setup fail %d dir %d type %d\n",
						ret, dir, type);
				return ret;
			}
		} else {
			ret = sps_setup_bam2bam_fifo(data,
					IPA_OCIMEM_DL_DATA_FIFO_OFST,
					ipa_get_data_fifo_sz(dir, type), 1);
			if (ret) {
				IPAERR("DAFIFO setup fail %d dir %d type %d\n",
						ret, dir, type);
				return ret;
			}

			ret = sps_setup_bam2bam_fifo(desc,
					IPA_OCIMEM_DL_DESC_FIFO_OFST,
					ipa_get_desc_fifo_sz(dir, type), 1);
			if (ret) {
				IPAERR("DEFIFO setup fail %d dir %d type %d\n",
						ret, dir, type);
				return ret;
			}
		}
	}

	IPADBG("dir=%d type=%d Dpa=%x Dsz=%u Dva=%p dpa=%x dsz=%u dva=%p\n",
			dir, type, data->phys_base, data->size, data->base,
			desc->phys_base, desc->size, desc->base);

	return 0;
}
/**
 * ipa_disconnect() - low-level IPA client disconnect
 * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
 *
 * Should be called by the driver of the peripheral that wants to disconnect
 * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
 * free any needed headers, routing and filtering tables and rules as needed.
 *
 * Returns:	0 on success, negative on failure
 *
 * Note:	Should not be called from atomic context
 */
int ipa_disconnect(u32 clnt_hdl)
{
	int result;
	struct ipa_ep_context *ep;

	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
		IPAERR("bad parm.\n");
		return -EINVAL;
	}

	ep = &ipa_ctx->ep[clnt_hdl];

	if (ep->suspended) {
		ipa_inc_client_enable_clks();
		ep->suspended = false;
	}

	result = ipa_disable_data_path(clnt_hdl);
	if (result) {
		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
				clnt_hdl);
		return -EPERM;
	}

	result = sps_disconnect(ep->ep_hdl);
	if (result) {
		IPAERR("SPS disconnect failed.\n");
		return -EPERM;
	}

	if (!ep->desc_fifo_client_allocated &&
	     ep->connect.desc.base) {
		if (!ep->desc_fifo_in_pipe_mem)
			dma_free_coherent(NULL,
					  ep->connect.desc.size,
					  ep->connect.desc.base,
					  ep->connect.desc.phys_base);
		else
			ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
					  ep->connect.desc.size);
	}

	if (!ep->data_fifo_client_allocated &&
	     ep->connect.data.base) {
		if (!ep->data_fifo_in_pipe_mem)
			dma_free_coherent(NULL,
					  ep->connect.data.size,
					  ep->connect.data.base,
					  ep->connect.data.phys_base);
		else
			ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
					  ep->connect.data.size);
	}

	result = sps_free_endpoint(ep->ep_hdl);
	if (result) {
		IPAERR("SPS de-alloc EP failed.\n");
		return -EPERM;
	}

	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));

	ipa_dec_client_disable_clks();

	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);

	return 0;
}
/**
 * ipa_mhi_suspend() - Suspend MHI accelerated channels
 * @force:
 *	false: in case of data pending in IPA, MHI channels will not be
 *		suspended and function will fail.
 *	true:  in case of data pending in IPA, make sure no further access from
 *		IPA to PCIe is possible. In this case suspend cannot fail.
 *
 * This function is called by MHI client driver on MHI suspend.
 * This function is called after MHI channel was started.
 * When this function returns device can move to M1/M2/M3/D3cold state.
 * This function is doing the following:
 *	- Send command to uC to suspend corresponding MHI channel
 *	- Make sure no further access is possible from IPA to PCIe
 *	- Release MHI_PROD in IPA RM
 *
 * Return codes: 0	  : success
 *		 negative : error
 */
int ipa_mhi_suspend(bool force)
{
	int res;
	bool bam_empty;
	bool force_clear = false;

	IPA_MHI_FUNC_ENTRY();

	if (unlikely(!ipa_mhi_ctx)) {
		IPA_MHI_ERR("IPA MHI was not initialized\n");
		return -EINVAL;
	}

	res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
	if (res) {
		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
		return res;
	}

	res = ipa_mhi_suspend_ul_channels();
	if (res) {
		IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
		goto fail_suspend_ul_channel;
	}

	bam_empty = ipa_mhi_wait_for_bam_empty_timeout(
		IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC);
	if (!bam_empty) {
		if (force) {
			res = ipa_mhi_enable_force_clear(
				ipa_mhi_ctx->qmi_req_id, false);
			if (res) {
				IPA_MHI_ERR("failed to enable force clear\n");
				BUG();
				return res;
			}
			force_clear = true;
			IPA_MHI_DBG("force clear datapath enabled\n");

			bam_empty = ipa_mhi_wait_for_bam_empty_timeout(
				IPA_MHI_BAM_EMPTY_TIMEOUT_MSEC);
			IPADBG("bam_empty=%d\n", bam_empty);

		} else {
			IPA_MHI_DBG("BAM not empty\n");
			res = -EAGAIN;
			goto fail_suspend_ul_channel;
		}
	}

	res = ipa_mhi_stop_event_update_ul_channels();
	if (res) {
		IPA_MHI_ERR("ipa_mhi_stop_event_update_ul_channels failed %d\n",
			res);
		goto fail_suspend_ul_channel;
	}

	/*
	 * in case BAM not empty, hold IPA clocks and release them after all
	 * IPA RM resource are released to make sure tag process will not start
	 */
	if (!bam_empty)
		ipa_inc_client_enable_clks();

	IPA_MHI_DBG("release prod\n");
	res = ipa_mhi_release_prod();
	if (res) {
		IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
		goto fail_release_prod;
	}

	IPA_MHI_DBG("wait for cons release\n");
	res = ipa_mhi_wait_for_cons_release();
	if (res) {
		IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res);
		goto fail_release_cons;
	}

	usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);

	res = ipa_mhi_suspend_dl_channels();
	if (res) {
		IPA_MHI_ERR("ipa_mhi_suspend_dl_channels failed %d\n", res);
		goto fail_suspend_dl_channel;
	}

	res = ipa_mhi_stop_event_update_dl_channels();
	if (res) {
		IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
		goto fail_stop_event_update_dl_channel;
	}

	if (force_clear) {
		res = ipa_mhi_disable_force_clear(ipa_mhi_ctx->qmi_req_id);
		if (res) {
			IPA_MHI_ERR("failed to disable force clear\n");
			BUG();
			return res;
		}
		IPA_MHI_DBG("force clear datapath disabled\n");
		ipa_mhi_ctx->qmi_req_id++;
	}

	if (!bam_empty) {
		ipa_ctx->tag_process_before_gating = false;
		ipa_dec_client_disable_clks();
	}

	res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
	if (res) {
		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
		goto fail_release_cons;
	}

	IPA_MHI_FUNC_EXIT();
	return 0;

fail_stop_event_update_dl_channel:
	ipa_mhi_resume_dl_channels(true);
fail_suspend_dl_channel:
fail_release_cons:
	ipa_mhi_request_prod();
fail_release_prod:
fail_suspend_ul_channel:
	ipa_mhi_resume_ul_channels(true);
	ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
	return res;
}
/**
 * ipa_connect() - low-level IPA client connect
 * @in:	[in] input parameters from client
 * @sps:	[out] sps output from IPA needed by client for sps_connect
 * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
 *
 * Should be called by the driver of the peripheral that wants to connect to
 * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
 * expects caller to take responsibility to add any needed headers, routing
 * and filtering tables and rules as needed.
 *
 * Returns:	0 on success, negative on failure
 *
 * Note:	Should not be called from atomic context
 */
int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
		u32 *clnt_hdl)
{
	int ipa_ep_idx;
	int result = -EFAULT;
	struct ipa_ep_context *ep;

	ipa_inc_client_enable_clks();

	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
	    in->client >= IPA_CLIENT_MAX ||
	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
		IPAERR("bad parm.\n");
		result = -EINVAL;
		goto fail;
	}

	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
	if (ipa_ep_idx == -1) {
		IPAERR("fail to alloc EP.\n");
		goto fail;
	}

	ep = &ipa_ctx->ep[ipa_ep_idx];

	if (ep->valid) {
		IPAERR("EP already allocated.\n");
		goto fail;
	}

	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
	ipa_enable_data_path(ipa_ep_idx);

	ep->valid = 1;
	ep->client = in->client;
	ep->client_notify = in->notify;
	ep->priv = in->priv;

	if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
		IPAERR("fail to configure EP.\n");
		goto ipa_cfg_ep_fail;
	}

	result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
	if (result) {
		IPAERR("fail to configure SPS.\n");
		goto ipa_cfg_ep_fail;
	}

	if (in->desc.base == NULL) {
		result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
						  &ep->desc_fifo_in_pipe_mem,
						  &ep->desc_fifo_pipe_mem_ofst,
						  in->desc_fifo_sz, ipa_ep_idx);
		if (result) {
			IPAERR("fail to allocate DESC FIFO.\n");
			goto desc_mem_alloc_fail;
		}
	} else {
		IPADBG("client allocated DESC FIFO\n");
		ep->connect.desc = in->desc;
		ep->desc_fifo_client_allocated = 1;
	}
	IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
	       ep->connect.desc.size);

	if (in->data.base == NULL) {
		result = ipa_connect_allocate_fifo(in, &ep->connect.data,
						&ep->data_fifo_in_pipe_mem,
						&ep->data_fifo_pipe_mem_ofst,
						in->data_fifo_sz, ipa_ep_idx);
		if (result) {
			IPAERR("fail to allocate DATA FIFO.\n");
			goto data_mem_alloc_fail;
		}
	} else {
		IPADBG("client allocated DATA FIFO\n");
		ep->connect.data = in->data;
		ep->data_fifo_client_allocated = 1;
	}
	IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
	       ep->connect.data.size);

	ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */

	if (IPA_CLIENT_IS_CONS(in->client))
		ep->connect.options |= SPS_O_NO_DISABLE;

	result = sps_connect(ep->ep_hdl, &ep->connect);
	if (result) {
		IPAERR("sps_connect fails.\n");
		goto sps_connect_fail;
	}

	sps->ipa_bam_hdl = ipa_ctx->bam_handle;
	sps->ipa_ep_idx = ipa_ep_idx;
	*clnt_hdl = ipa_ep_idx;
	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));

	ipa_program_holb(ep, ipa_ep_idx);

	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);

	return 0;

sps_connect_fail:
	if (!ep->data_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
				  ep->connect.data.size,
				  ep->connect.data.base,
				  ep->connect.data.phys_base);
	else
		ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
				  ep->connect.data.size);

data_mem_alloc_fail:
	if (!ep->desc_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
				  ep->connect.desc.size,
				  ep->connect.desc.base,
				  ep->connect.desc.phys_base);
	else
		ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
				  ep->connect.desc.size);

desc_mem_alloc_fail:
	sps_free_endpoint(ep->ep_hdl);
ipa_cfg_ep_fail:
	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
fail:
	ipa_dec_client_disable_clks();
	return result;
}