/**
* ipa_suspend() - low-level IPA client suspend
* @clnt_hdl:	[in] opaque client handle assigned by IPA to client
*
* Should be called by the driver of the peripheral that wants to suspend IPA
* connection. Suspend IPA connection results in turning off IPA clocks in
* case that there is no active clients using IPA. Pipes remains connected in
* case of suspend.
*
* Returns:	0 on success, negative on failure
*
* Note:	Should not be called from atomic context
*/
int ipa_suspend(u32 clnt_hdl)
{
	struct ipa_ep_context *ep;

	if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
		IPAERR("bad parm. clnt_hdl %d\n", clnt_hdl);
		return -EINVAL;
	}

	ep = &ipa_ctx->ep[clnt_hdl];

	if (ep->suspended) {
		IPAERR("EP already suspended. clnt_hdl %d\n", clnt_hdl);
		return -EPERM;
	}

	if (IPA_CLIENT_IS_CONS(ep->client) &&
				ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR &&
				ep->cfg.aggr.aggr_time_limit)
		msleep(ep->cfg.aggr.aggr_time_limit);

	ipa_dec_client_disable_clks();
	ep->suspended = true;

	return 0;
}
static int ipa_disable_data_path(u32 clnt_hdl)
{
	struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];

	if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_VIRTUAL) {
		/* IPA_HW_MODE_VIRTUAL lacks support for TAG IC & EP suspend */
		return 0;
	}

	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
		ipa_write_reg(ipa_ctx->mmio,
				IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 1);
		udelay(IPA_PKT_FLUSH_TO_US);
		if (IPA_CLIENT_IS_CONS(ep->client) &&
				ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR &&
				ep->cfg.aggr.aggr_time_limit)
			msleep(ep->cfg.aggr.aggr_time_limit);
	}

	return 0;
}
static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
				     struct ipa_ep_context *ep, int ipa_ep_idx)
{
	int result = -EFAULT;

	/* Default Config */
	ep->ep_hdl = sps_alloc_endpoint();

	if (ep->ep_hdl == NULL) {
		IPAERR("SPS EP alloc failed EP.\n");
		return -EFAULT;
	}

	result = sps_get_config(ep->ep_hdl,
		&ep->connect);
	if (result) {
		IPAERR("fail to get config.\n");
		return -EFAULT;
	}

	/* Specific Config */
	if (IPA_CLIENT_IS_CONS(in->client)) {
		ep->connect.mode = SPS_MODE_SRC;
		ep->connect.destination =
			in->client_bam_hdl;
		ep->connect.source = ipa_ctx->bam_handle;
		ep->connect.dest_pipe_index =
			in->client_ep_idx;
		ep->connect.src_pipe_index = ipa_ep_idx;
	} else {
		ep->connect.mode = SPS_MODE_DEST;
		ep->connect.source = in->client_bam_hdl;
		ep->connect.destination = ipa_ctx->bam_handle;
		ep->connect.src_pipe_index = in->client_ep_idx;
		ep->connect.dest_pipe_index = ipa_ep_idx;
	}

	return 0;
}
/**
 * ipa_connect() - low-level IPA client connect
 * @in:	[in] input parameters from client
 * @sps:	[out] sps output from IPA needed by client for sps_connect
 * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
 *
 * Should be called by the driver of the peripheral that wants to connect to
 * IPA in BAM-BAM mode. these peripherals are A2, USB and HSIC. this api
 * expects caller to take responsibility to add any needed headers, routing
 * and filtering tables and rules as needed.
 *
 * Returns:	0 on success, negative on failure
 *
 * Note:	Should not be called from atomic context
 */
int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
		u32 *clnt_hdl)
{
	int ipa_ep_idx;
	int result = -EFAULT;
	struct ipa_ep_context *ep;

	ipa_inc_client_enable_clks();

	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
	    in->client >= IPA_CLIENT_MAX ||
	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
		IPAERR("bad parm.\n");
		result = -EINVAL;
		goto fail;
	}

	ipa_ep_idx = ipa_get_ep_mapping(ipa_ctx->mode, in->client);
	if (ipa_ep_idx == -1) {
		IPAERR("fail to alloc EP.\n");
		goto fail;
	}

	ep = &ipa_ctx->ep[ipa_ep_idx];

	if (ep->valid) {
		IPAERR("EP already allocated.\n");
		goto fail;
	}

	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
	ipa_enable_data_path(ipa_ep_idx);

	ep->valid = 1;
	ep->client = in->client;
	ep->client_notify = in->notify;
	ep->priv = in->priv;

	if (ipa_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
		IPAERR("fail to configure EP.\n");
		goto ipa_cfg_ep_fail;
	}

	result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
	if (result) {
		IPAERR("fail to configure SPS.\n");
		goto ipa_cfg_ep_fail;
	}

	if (in->desc.base == NULL) {
		result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
						  &ep->desc_fifo_in_pipe_mem,
						  &ep->desc_fifo_pipe_mem_ofst,
						  in->desc_fifo_sz, ipa_ep_idx);
		if (result) {
			IPAERR("fail to allocate DESC FIFO.\n");
			goto desc_mem_alloc_fail;
		}
	} else {
		IPADBG("client allocated DESC FIFO\n");
		ep->connect.desc = in->desc;
		ep->desc_fifo_client_allocated = 1;
	}
	IPADBG("Descriptor FIFO pa=0x%x, size=%d\n", ep->connect.desc.phys_base,
	       ep->connect.desc.size);

	if (in->data.base == NULL) {
		result = ipa_connect_allocate_fifo(in, &ep->connect.data,
						&ep->data_fifo_in_pipe_mem,
						&ep->data_fifo_pipe_mem_ofst,
						in->data_fifo_sz, ipa_ep_idx);
		if (result) {
			IPAERR("fail to allocate DATA FIFO.\n");
			goto data_mem_alloc_fail;
		}
	} else {
		IPADBG("client allocated DATA FIFO\n");
		ep->connect.data = in->data;
		ep->data_fifo_client_allocated = 1;
	}
	IPADBG("Data FIFO pa=0x%x, size=%d\n", ep->connect.data.phys_base,
	       ep->connect.data.size);

	ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */

	if (IPA_CLIENT_IS_CONS(in->client))
		ep->connect.options |= SPS_O_NO_DISABLE;

	result = sps_connect(ep->ep_hdl, &ep->connect);
	if (result) {
		IPAERR("sps_connect fails.\n");
		goto sps_connect_fail;
	}

	sps->ipa_bam_hdl = ipa_ctx->bam_handle;
	sps->ipa_ep_idx = ipa_ep_idx;
	*clnt_hdl = ipa_ep_idx;
	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));

	ipa_program_holb(ep, ipa_ep_idx);

	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);

	return 0;

sps_connect_fail:
	if (!ep->data_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
				  ep->connect.data.size,
				  ep->connect.data.base,
				  ep->connect.data.phys_base);
	else
		ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
				  ep->connect.data.size);

data_mem_alloc_fail:
	if (!ep->desc_fifo_in_pipe_mem)
		dma_free_coherent(NULL,
				  ep->connect.desc.size,
				  ep->connect.desc.base,
				  ep->connect.desc.phys_base);
	else
		ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
				  ep->connect.desc.size);

desc_mem_alloc_fail:
	sps_free_endpoint(ep->ep_hdl);
ipa_cfg_ep_fail:
	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
fail:
	ipa_dec_client_disable_clks();
	return result;
}
/**
 * ipa_disconnect() - low-level IPA client disconnect
 * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
 *
 * Should be called by the driver of the peripheral that wants to disconnect
 * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
 * free any needed headers, routing and filtering tables and rules as needed.
 *
 * Returns:	0 on success, negative on failure
 *
 * Note:	Should not be called from atomic context
 */
int ipa_disconnect(u32 clnt_hdl)
{
    int result;
    struct ipa_ep_context *ep;

    if (clnt_hdl >= IPA_NUM_PIPES || ipa_ctx->ep[clnt_hdl].valid == 0) {
        IPAERR("bad parm.\n");
        return -EINVAL;
    }

    ep = &ipa_ctx->ep[clnt_hdl];

    if (ep->suspended) {
        ipa_inc_client_enable_clks();
        ep->suspended = false;
    }

    result = ipa_disable_data_path(clnt_hdl);
    if (result) {
        IPAERR("disable data path failed res=%d clnt=%d.\n", result,
               clnt_hdl);
        return -EPERM;
    }

    result = sps_disconnect(ep->ep_hdl);
    if (result) {
        IPAERR("SPS disconnect failed.\n");
        return -EPERM;
    }

    if (!ep->desc_fifo_client_allocated &&
            ep->connect.desc.base) {
        if (!ep->desc_fifo_in_pipe_mem)
            dma_free_coherent(NULL,
                              ep->connect.desc.size,
                              ep->connect.desc.base,
                              ep->connect.desc.phys_base);
        else
            ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
                              ep->connect.desc.size);
    }

    if (!ep->data_fifo_client_allocated &&
            ep->connect.data.base) {
        if (!ep->data_fifo_in_pipe_mem)
            dma_free_coherent(NULL,
                              ep->connect.data.size,
                              ep->connect.data.base,
                              ep->connect.data.phys_base);
        else
            ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
                              ep->connect.data.size);
    }

    result = sps_free_endpoint(ep->ep_hdl);
    if (result) {
        IPAERR("SPS de-alloc EP failed.\n");
        return -EPERM;
    }

    if (IPA_CLIENT_IS_CONS(ep->client)) {
        dma_unmap_single(NULL, ep->dma_addr,
                         sizeof(struct ipa_ip_packet_init),
                         DMA_TO_DEVICE);
        kfree(ep->cmd);
    }

    ipa_enable_data_path(clnt_hdl);
    memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));

    ipa_dec_client_disable_clks();

    IPADBG("client (ep: %d) disconnected\n", clnt_hdl);

    return 0;
}
static int ipa_disable_data_path(u32 clnt_hdl)
{
    DECLARE_COMPLETION_ONSTACK(tag_rsp);
    struct ipa_desc desc = {0};
    struct ipa_ip_packet_tag cmd;
    struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
    struct ipa_tree_node *node;
    int result = 0;

    if (ipa_ctx->ipa_hw_mode == IPA_HW_MODE_VIRTUAL) {
        /* IPA_HW_MODE_VIRTUAL lacks support for TAG IC & EP suspend */
        return 0;
    }

    node = kmem_cache_zalloc(ipa_ctx->tree_node_cache, GFP_KERNEL);
    if (!node) {
        IPAERR("failed to alloc tree node object\n");
        result = -ENOMEM;
        goto fail_alloc;
    }

    if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
        ipa_write_reg(ipa_ctx->mmio,
                      IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 1);

        cmd.tag = (u32) &tag_rsp;

        desc.pyld = &cmd;
        desc.len = sizeof(struct ipa_ip_packet_tag);
        desc.type = IPA_IMM_CMD_DESC;
        desc.opcode = IPA_IP_PACKET_TAG;

        IPADBG("Wait on TAG %p clnt=%d\n", &tag_rsp, clnt_hdl);

        node->hdl = cmd.tag;
        mutex_lock(&ipa_ctx->lock);
        if (ipa_insert(&ipa_ctx->tag_tree, node)) {
            IPAERR("failed to add to tree\n");
            result = -EINVAL;
            mutex_unlock(&ipa_ctx->lock);
            goto fail_insert;
        }
        mutex_unlock(&ipa_ctx->lock);

        if (ipa_send_cmd(1, &desc)) {
            ipa_write_reg(ipa_ctx->mmio,
                          IPA_ENDP_INIT_CTRL_n_OFST(clnt_hdl), 0);
            IPAERR("fail to send TAG command\n");
            result = -EPERM;
            goto fail_send;
        }
        wait_for_completion(&tag_rsp);
        if (IPA_CLIENT_IS_CONS(ep->client) &&
                ep->cfg.aggr.aggr_en == IPA_ENABLE_AGGR &&
                ep->cfg.aggr.aggr_time_limit)
            msleep(ep->cfg.aggr.aggr_time_limit);
    }

    return 0;

fail_send:
    rb_erase(&node->node, &ipa_ctx->tag_tree);
fail_insert:
    kmem_cache_free(ipa_ctx->tree_node_cache, node);
fail_alloc:
    return result;
}