Ejemplo n.º 1
0
static int hb_voltage_change(unsigned int freq)
{
	u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
	struct mbox_client cl;
	int ret = -ETIMEDOUT;
	struct mbox_chan *chan;

	cl.rx_callback = NULL;
	cl.tx_done = NULL;
	cl.tx_block = true;
	cl.tx_tout = 1000; /* 1 sec */
	cl.link_data = NULL;
	cl.knows_txdone = false;
	cl.chan_name = "pl320:A9_to_M3";

	chan = mbox_request_channel(&cl);
	if (IS_ERR(chan))
		return PTR_ERR(chan);

	if (mbox_send_message(chan, (void *)msg))
		ret = msg[1]; /* PL320 updates buffer with FIFO after ACK */

	mbox_free_channel(chan);

	return ret;
}
/**
 * wkup_m3_ping - Send a dummy msg to wkup_m3 to tell to to check IPC regs
 *
 * Returns the result of sending mbox msg or -EIO if no mbox handle is present
 */
int wkup_m3_ping(void)
{
	int ret;
	mbox_msg_t dummy_msg = 0;

	if (!m3_rproc_static->mbox) {
		dev_err(&m3_rproc_static->pdev->dev,
			"No IPC channel to communicate with wkup_m3!\n");
		return -EIO;
	}

	/*
	 * Write a dummy message to the mailbox in order to trigger the RX
	 * interrupt to alert the M3 that data is available in the IPC
	 * registers. We must enable the IRQ here and disable it after in
	 * the RX callback to avoid multiple interrupts being received
	 * by the CM3.
	 */
	ret = mbox_send_message(m3_rproc_static->mbox, (void *)dummy_msg);
	if (ret < 0) {
		pr_err("%s: mbox_send_message() failed: %d\n", __func__, ret);
		return ret;
	}

	mbox_client_txdone(m3_rproc_static->mbox, 0);
	return 0;
}
Ejemplo n.º 3
0
int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
			 void *data)
{
	int err;
	unsigned long flags = 0;
	struct cmdq_client *client = (struct cmdq_client *)pkt->cl;

	err = cmdq_pkt_finalize(pkt);
	if (err < 0)
		return err;

	pkt->cb.cb = cb;
	pkt->cb.data = data;
	pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
	pkt->async_cb.data = pkt;

	dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
				   pkt->cmd_buf_size, DMA_TO_DEVICE);

	if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
		spin_lock_irqsave(&client->lock, flags);
		if (client->pkt_cnt++ == 0)
			mod_timer(&client->timer, jiffies +
				  msecs_to_jiffies(client->timeout_ms));
		spin_unlock_irqrestore(&client->lock, flags);
	}

	mbox_send_message(client->chan, pkt);
	/* We can send next packet immediately, so just call txdone. */
	mbox_client_txdone(client->chan, 0);

	return 0;
}
Ejemplo n.º 4
0
static int send_scpi_cmd(struct scpi_data_buf *scpi_buf, bool high_priority)
{
	struct mbox_chan *chan;
	struct mbox_client cl = {0};
	struct mhu_data_buf *data = scpi_buf->data;
	u32 status;

	cl.dev = the_scpi_device;
	cl.rx_callback = scpi_rx_callback;

	chan = mbox_request_channel(&cl, high_priority);
	if (IS_ERR(chan))
		return PTR_ERR(chan);

	init_completion(&scpi_buf->complete);
	if (mbox_send_message(chan, (void *)data) < 0) {
		status = SCPI_ERR_TIMEOUT;
		goto free_channel;
	}

	wait_for_completion(&scpi_buf->complete);
	status = *(u32 *)(data->rx_buf); /* read first word */

free_channel:
	mbox_free_channel(chan);

	return scpi_to_linux_errno(status);
}
Ejemplo n.º 5
0
static ssize_t mbox_test_message_write(struct file *filp,
				       const char __user *userbuf,
				       size_t count, loff_t *ppos)
{
	struct mbox_test_device *tdev = filp->private_data;
	void *data;
	int ret;

	if (!tdev->tx_channel) {
		dev_err(tdev->dev, "Channel cannot do Tx\n");
		return -EINVAL;
	}

	if (count > MBOX_MAX_MSG_LEN) {
		dev_err(tdev->dev,
			"Message length %zd greater than max allowed %d\n",
			count, MBOX_MAX_MSG_LEN);
		return -EINVAL;
	}

	tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
	if (!tdev->message)
		return -ENOMEM;

	ret = copy_from_user(tdev->message, userbuf, count);
	if (ret) {
		ret = -EFAULT;
		goto out;
	}

	/*
	 * A separate signal is only of use if there is
	 * MMIO to subsequently pass the message through
	 */
	if (tdev->tx_mmio && tdev->signal) {
		print_hex_dump_bytes("Client: Sending: Signal: ", DUMP_PREFIX_ADDRESS,
				     tdev->signal, MBOX_MAX_SIG_LEN);

		data = tdev->signal;
	} else
		data = tdev->message;

	print_hex_dump_bytes("Client: Sending: Message: ", DUMP_PREFIX_ADDRESS,
			     tdev->message, MBOX_MAX_MSG_LEN);

	ret = mbox_send_message(tdev->tx_channel, data);
	if (ret < 0)
		dev_err(tdev->dev, "Failed to send message via mailbox\n");

out:
	kfree(tdev->signal);
	kfree(tdev->message);
	tdev->signal = NULL;

	return ret < 0 ? ret : count;
}
Ejemplo n.º 6
0
static void tegra_tcu_write_one(struct tegra_tcu *tcu, u32 value,
				unsigned int count)
{
	void *msg;

	value |= TCU_MBOX_NUM_BYTES(count);
	msg = (void *)(unsigned long)value;
	mbox_send_message(tcu->tx, msg);
	mbox_flush(tcu->tx, 1000);
}
Ejemplo n.º 7
0
static int hi6220_acpu_set_freq(struct hi6220_stub_clk *stub_clk,
				unsigned int freq)
{
	union hi6220_mbox_data data;

	/* set the frequency in sram */
	regmap_write(stub_clk->dfs_map, ACPU_DFS_FREQ_REQ, freq);

	/* compound mailbox message */
	data.msg.type = HI6220_MBOX_FREQ;
	data.msg.cmd  = HI6220_MBOX_CMD_SET;
	data.msg.obj  = HI6220_MBOX_OBJ_AP;
	data.msg.src  = HI6220_MBOX_OBJ_AP;

	mbox_send_message(stub_clk->mbox, &data);
	return 0;
}
Ejemplo n.º 8
0
static int
scpi_send_message(u8 cmd, void *tx_buf, unsigned int len, void *rx_buf)
{
	int ret;
	u8 token, chan;
	struct scpi_xfer *msg;
	struct scpi_chan *scpi_chan;

	chan = atomic_inc_return(&scpi_info->next_chan) % scpi_info->num_chans;
	scpi_chan = scpi_info->channels + chan;

	msg = get_scpi_xfer(scpi_chan);
	if (!msg)
		return -ENOMEM;

	token = atomic_inc_return(&scpi_chan->token) & CMD_TOKEN_ID_MASK;

	msg->slot = BIT(SCPI_SLOT);
	msg->cmd = PACK_SCPI_CMD(cmd, token, len);
	msg->tx_buf = tx_buf;
	msg->tx_len = len;
	msg->rx_buf = rx_buf;
	init_completion(&msg->done);

	ret = mbox_send_message(scpi_chan->chan, msg);
	if (ret < 0 || !rx_buf)
		goto out;

	if (!wait_for_completion_timeout(&msg->done, MAX_RX_TIMEOUT))
		ret = -ETIMEDOUT;
	else
		/* first status word */
		ret = le32_to_cpu(msg->status);
out:
	if (ret < 0 && rx_buf) /* remove entry from the list if timed-out */
		scpi_process_cmd(scpi_chan, msg->cmd);

	put_scpi_xfer(msg, scpi_chan);
	/* SCPI error codes > 0, translate them to Linux scale*/
	return ret > 0 ? scpi_to_linux_errno(ret) : ret;
}
Ejemplo n.º 9
0
static int send_pcc_cmd(u16 cmd)
{
	int retries, result = -EIO;
	struct acpi_pcct_hw_reduced *pcct_ss = pcc_channel->con_priv;
	struct acpi_pcct_shared_memory *generic_comm_base =
		(struct acpi_pcct_shared_memory *) pcc_comm_addr;
	u32 cmd_latency = pcct_ss->latency;

	/* Min time OS should wait before sending next command. */
	udelay(pcc_cmd_delay);

	/* Write to the shared comm region. */
	writew(cmd, &generic_comm_base->command);

	/* Flip CMD COMPLETE bit */
	writew(0, &generic_comm_base->status);

	/* Ring doorbell */
	result = mbox_send_message(pcc_channel, &cmd);
	if (result < 0) {
		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
				cmd, result);
		return result;
	}

	/* Wait for a nominal time to let platform process command. */
	udelay(cmd_latency);

	/* Retry in case the remote processor was too slow to catch up. */
	for (retries = NUM_RETRIES; retries > 0; retries--) {
		if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) {
			result = 0;
			break;
		}
	}

	mbox_client_txdone(pcc_channel, result);
	return result;
}
Ejemplo n.º 10
0
/**
 * @brief ioctl communicaton function
 *        communicaton with application
 * @param data passed from application memory space
 *
 * @return communicaton status
 */
static long driver_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
	int ret = 0;
	uint32_t mess;

	switch (cmd){
	case SET_IMAGE_STATUS:
		
		if (copy_from_user(&mess, (void __user *)arg, sizeof(unsigned int)))
			return COMMUNICATION_ERROR;

		/* Send message to nios */
		ret = mbox_send_message(chan_sender, (void *)&mess);
		pr_info("%s image is sent to nios, size: %d", __func__, mess);

		if (ret < 0)
			pr_info("Message 1 failure\n");

		set_process_status(PROCESS_START);
		if(__put_user(ret, (unsigned int __user *)arg))
				return COMMUNICATION_ERROR;
		break;

	case GET_IMAGE_STATUS:
		mess = get_process_status();
		/*
		 * Return image status
		 * If process_status is 0,
		 * Nios did not finish to process the last request
		 */
		if(__put_user(mess, (unsigned int __user *)arg))
			return COMMUNICATION_ERROR;
		break;
	default:
		break;
	}

	return ret;
}
Ejemplo n.º 11
0
int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val)
{
#ifdef CONFIG_TIDSPBRIDGE_DVFS
	u32 opplevel = 0;
#endif
	struct omap_dsp_platform_data *pdata =
		omap_dspbridge_dev->dev.platform_data;
	struct cfg_hostres *resources = dev_context->resources;
	int status = 0;
	u32 temp;

	if (!dev_context->mbox)
		return 0;

	if (!resources)
		return -EPERM;

	if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
	    dev_context->brd_state == BRD_HIBERNATION) {
#ifdef CONFIG_TIDSPBRIDGE_DVFS
		if (pdata->dsp_get_opp)
			opplevel = (*pdata->dsp_get_opp) ();
		if (opplevel == VDD1_OPP1) {
			if (pdata->dsp_set_min_opp)
				(*pdata->dsp_set_min_opp) (VDD1_OPP2);
		}
#endif
		/* Restart the peripheral clocks */
		dsp_clock_enable_all(dev_context->dsp_per_clks);
		dsp_wdt_enable(true);

		/*
		 * 2:0 AUTO_IVA2_DPLL - Enabling IVA2 DPLL auto control
		 *     in CM_AUTOIDLE_PLL_IVA2 register
		 */
		(*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);

		/*
		 * 7:4 IVA2_DPLL_FREQSEL - IVA2 internal frq set to
		 *     0.75 MHz - 1.0 MHz
		 * 2:0 EN_IVA2_DPLL - Enable IVA2 DPLL in lock mode
		 */
		(*pdata->dsp_cm_rmw_bits)(OMAP3430_IVA2_DPLL_FREQSEL_MASK |
				OMAP3430_EN_IVA2_DPLL_MASK,
				0x3 << OMAP3430_IVA2_DPLL_FREQSEL_SHIFT |
				0x7 << OMAP3430_EN_IVA2_DPLL_SHIFT,
				OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL);

		/* Restore mailbox settings */
		omap_mbox_restore_ctx(dev_context->mbox);

		/* Access MMU SYS CONFIG register to generate a short wakeup */
		temp = readl(resources->dmmu_base + 0x10);

		dev_context->brd_state = BRD_RUNNING;
	} else if (dev_context->brd_state == BRD_RETENTION) {
		/* Restart the peripheral clocks */
		dsp_clock_enable_all(dev_context->dsp_per_clks);
	}

	status = mbox_send_message(dev_context->mbox, (void *)((u32)mb_val));
	if (status < 0) {
		pr_err("mbox_send_message failed, status = %d\n", status);
		status = -EPERM;
	}

	return 0;
}
Ejemplo n.º 12
0
/*
 * This function transfers the ownership of the PCC to the platform
 * So it must be called while holding write_lock(pcc_lock)
 */
static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
{
	int ret = -EIO, i;
	struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
	struct acpi_pcct_shared_memory *generic_comm_base =
		(struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
	unsigned int time_delta;

	/*
	 * For CMD_WRITE we know for a fact the caller should have checked
	 * the channel before writing to PCC space
	 */
	if (cmd == CMD_READ) {
		/*
		 * If there are pending cpc_writes, then we stole the channel
		 * before write completion, so first send a WRITE command to
		 * platform
		 */
		if (pcc_ss_data->pending_pcc_write_cmd)
			send_pcc_cmd(pcc_ss_id, CMD_WRITE);

		ret = check_pcc_chan(pcc_ss_id, false);
		if (ret)
			goto end;
	} else /* CMD_WRITE */
		pcc_ss_data->pending_pcc_write_cmd = FALSE;

	/*
	 * Handle the Minimum Request Turnaround Time(MRTT)
	 * "The minimum amount of time that OSPM must wait after the completion
	 * of a command before issuing the next command, in microseconds"
	 */
	if (pcc_ss_data->pcc_mrtt) {
		time_delta = ktime_us_delta(ktime_get(),
					    pcc_ss_data->last_cmd_cmpl_time);
		if (pcc_ss_data->pcc_mrtt > time_delta)
			udelay(pcc_ss_data->pcc_mrtt - time_delta);
	}

	/*
	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
	 * "The maximum number of periodic requests that the subspace channel can
	 * support, reported in commands per minute. 0 indicates no limitation."
	 *
	 * This parameter should be ideally zero or large enough so that it can
	 * handle maximum number of requests that all the cores in the system can
	 * collectively generate. If it is not, we will follow the spec and just
	 * not send the request to the platform after hitting the MPAR limit in
	 * any 60s window
	 */
	if (pcc_ss_data->pcc_mpar) {
		if (pcc_ss_data->mpar_count == 0) {
			time_delta = ktime_ms_delta(ktime_get(),
						    pcc_ss_data->last_mpar_reset);
			if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
				pr_debug("PCC cmd not sent due to MPAR limit");
				ret = -EIO;
				goto end;
			}
			pcc_ss_data->last_mpar_reset = ktime_get();
			pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
		}
		pcc_ss_data->mpar_count--;
	}

	/* Write to the shared comm region. */
	writew_relaxed(cmd, &generic_comm_base->command);

	/* Flip CMD COMPLETE bit */
	writew_relaxed(0, &generic_comm_base->status);

	pcc_ss_data->platform_owns_pcc = true;

	/* Ring doorbell */
	ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
	if (ret < 0) {
		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
				cmd, ret);
		goto end;
	}

	/* wait for completion and check for PCC errro bit */
	ret = check_pcc_chan(pcc_ss_id, true);

	if (pcc_ss_data->pcc_mrtt)
		pcc_ss_data->last_cmd_cmpl_time = ktime_get();

	if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
		mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
	else
		mbox_client_txdone(pcc_ss_data->pcc_channel, ret);

end:
	if (cmd == CMD_WRITE) {
		if (unlikely(ret)) {
			for_each_possible_cpu(i) {
				struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
				if (!desc)
					continue;

				if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
					desc->write_cmd_status = ret;
			}
		}
		pcc_ss_data->pcc_write_cnt++;
		wake_up_all(&pcc_ss_data->pcc_write_wait_q);
	}

	return ret;
}
Ejemplo n.º 13
0
static int flexrm_process_completions(struct flexrm_ring *ring)
{
	u64 desc;
	int err, count = 0;
	unsigned long flags;
	struct brcm_message *msg = NULL;
	u32 reqid, cmpl_read_offset, cmpl_write_offset;
	struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];

	spin_lock_irqsave(&ring->lock, flags);

	/* Check last_pending_msg */
	if (ring->last_pending_msg) {
		msg = ring->last_pending_msg;
		ring->last_pending_msg = NULL;
	}

	/*
	 * Get current completion read and write offset
	 *
	 * Note: We should read completion write pointer atleast once
	 * after we get a MSI interrupt because HW maintains internal
	 * MSI status which will allow next MSI interrupt only after
	 * completion write pointer is read.
	 */
	cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
	cmpl_write_offset *= RING_DESC_SIZE;
	cmpl_read_offset = ring->cmpl_read_offset;
	ring->cmpl_read_offset = cmpl_write_offset;

	spin_unlock_irqrestore(&ring->lock, flags);

	/* If last_pending_msg was set then queue it back */
	if (msg)
		mbox_send_message(chan, msg);

	/* For each completed request notify mailbox clients */
	reqid = 0;
	while (cmpl_read_offset != cmpl_write_offset) {
		/* Dequeue next completion descriptor */
		desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));

		/* Next read offset */
		cmpl_read_offset += RING_DESC_SIZE;
		if (cmpl_read_offset == RING_CMPL_SIZE)
			cmpl_read_offset = 0;

		/* Decode error from completion descriptor */
		err = flexrm_cmpl_desc_to_error(desc);
		if (err < 0) {
			dev_warn(ring->mbox->dev,
				 "got completion desc=0x%lx with error %d",
				 (unsigned long)desc, err);
		}

		/* Determine request id from completion descriptor */
		reqid = flexrm_cmpl_desc_to_reqid(desc);

		/* Determine message pointer based on reqid */
		msg = ring->requests[reqid];
		if (!msg) {
			dev_warn(ring->mbox->dev,
				 "null msg pointer for completion desc=0x%lx",
				 (unsigned long)desc);
			continue;
		}

		/* Release reqid for recycling */
		ring->requests[reqid] = NULL;
		ida_simple_remove(&ring->requests_ida, reqid);

		/* Unmap DMA mappings */
		flexrm_dma_unmap(ring->mbox->dev, msg);

		/* Give-back message to mailbox client */
		msg->error = err;
		mbox_chan_received_data(chan, msg);

		/* Increment number of completions processed */
		count++;
	}

	return count;
}
Ejemplo n.º 14
0
static int send_pcc_cmd(u16 cmd)
{
	int ret = -EIO;
	struct acpi_pcct_shared_memory *generic_comm_base =
		(struct acpi_pcct_shared_memory *) pcc_comm_addr;
	static ktime_t last_cmd_cmpl_time, last_mpar_reset;
	static int mpar_count;
	unsigned int time_delta;

	/*
	 * For CMD_WRITE we know for a fact the caller should have checked
	 * the channel before writing to PCC space
	 */
	if (cmd == CMD_READ) {
		ret = check_pcc_chan();
		if (ret)
			return ret;
	}

	/*
	 * Handle the Minimum Request Turnaround Time(MRTT)
	 * "The minimum amount of time that OSPM must wait after the completion
	 * of a command before issuing the next command, in microseconds"
	 */
	if (pcc_mrtt) {
		time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time);
		if (pcc_mrtt > time_delta)
			udelay(pcc_mrtt - time_delta);
	}

	/*
	 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
	 * "The maximum number of periodic requests that the subspace channel can
	 * support, reported in commands per minute. 0 indicates no limitation."
	 *
	 * This parameter should be ideally zero or large enough so that it can
	 * handle maximum number of requests that all the cores in the system can
	 * collectively generate. If it is not, we will follow the spec and just
	 * not send the request to the platform after hitting the MPAR limit in
	 * any 60s window
	 */
	if (pcc_mpar) {
		if (mpar_count == 0) {
			time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset);
			if (time_delta < 60 * MSEC_PER_SEC) {
				pr_debug("PCC cmd not sent due to MPAR limit");
				return -EIO;
			}
			last_mpar_reset = ktime_get();
			mpar_count = pcc_mpar;
		}
		mpar_count--;
	}

	/* Write to the shared comm region. */
	writew_relaxed(cmd, &generic_comm_base->command);

	/* Flip CMD COMPLETE bit */
	writew_relaxed(0, &generic_comm_base->status);

	/* Ring doorbell */
	ret = mbox_send_message(pcc_channel, &cmd);
	if (ret < 0) {
		pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n",
				cmd, ret);
		return ret;
	}

	/*
	 * For READs we need to ensure the cmd completed to ensure
	 * the ensuing read()s can proceed. For WRITEs we dont care
	 * because the actual write()s are done before coming here
	 * and the next READ or WRITE will check if the channel
	 * is busy/free at the entry of this call.
	 *
	 * If Minimum Request Turnaround Time is non-zero, we need
	 * to record the completion time of both READ and WRITE
	 * command for proper handling of MRTT, so we need to check
	 * for pcc_mrtt in addition to CMD_READ
	 */
	if (cmd == CMD_READ || pcc_mrtt) {
		ret = check_pcc_chan();
		if (pcc_mrtt)
			last_cmd_cmpl_time = ktime_get();
	}

	mbox_client_txdone(pcc_channel, ret);
	return ret;
}