Esempio n. 1
0
static int exynos_read(struct hwrng *rng, void *buf,
					size_t max, bool wait)
{
	struct exynos_rng *exynos_rng = container_of(rng,
						struct exynos_rng, rng);
	u32 *data = buf;
	int retry = 100;
	int ret = 4;

	pm_runtime_get_sync(exynos_rng->dev);

	exynos_rng_writel(exynos_rng, PRNG_START, 0);

	while (!(exynos_rng_readl(exynos_rng,
			EXYNOS_PRNG_STATUS_OFFSET) & PRNG_DONE) && --retry)
		cpu_relax();
	if (!retry) {
		ret = -ETIMEDOUT;
		goto out;
	}

	exynos_rng_writel(exynos_rng, PRNG_DONE, EXYNOS_PRNG_STATUS_OFFSET);

	*data = exynos_rng_readl(exynos_rng, EXYNOS_PRNG_OUT1_OFFSET);

out:
	pm_runtime_mark_last_busy(exynos_rng->dev);
	pm_runtime_put_sync_autosuspend(exynos_rng->dev);

	return ret;
}
Esempio n. 2
0
static ssize_t esa_write(struct file *file, const char *buffer,
					size_t size, loff_t *pos)
{
	int ret;

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&si.pdev->dev);

	if (!si.fx_ext_on) {
		esa_debug("%s: fx ext not enabled\n", __func__);
		ret = -EINVAL;
		goto out;
	}

	if (!si.fx_work_buf) {
		esa_debug("%s: fx buf not ready\n", __func__);
		ret = -EBUSY;
		goto out;
	}

	if (copy_from_user(si.fx_work_buf, buffer, size)) {
		esa_err("%s: failed to copy_from_user\n", __func__);
		ret = -EFAULT;
	} else {
		esa_debug("%s: %lu bytes\n", __func__, size);
		ret = FX_BUF_SIZE;
	}
out:
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);

	return ret;
}
void escore_pm_enable(void)
{
	struct escore_priv *escore = &escore_priv;
	int ret = 0;

	dev_dbg(escore->dev, "%s()\n", __func__);

	if (escore->pm_enable) {
		pr_err("%s(): Already Enabled\n", __func__);
		return;
	}

	escore->pm_enable = ES_PM_ON;
	escore->pm_status = ES_PM_ON;
	pm_runtime_set_active(escore->dev);
	pm_runtime_mark_last_busy(escore->dev);
	pm_runtime_set_autosuspend_delay(escore->dev, ES_PM_AUTOSUSPEND_DELAY);
	pm_runtime_use_autosuspend(escore->dev);
	pm_runtime_enable(escore->dev);
	device_init_wakeup(escore->dev, true);
	if (pm_runtime_get_sync(escore->dev) >= 0) {
		ret = pm_runtime_put_sync_autosuspend(escore->dev);
		if (ret < 0) {
			dev_err(escore->dev,
				"%s() escore PM put failed ret = %d\n",
				__func__, ret);
		}
	} else
		dev_err(escore->dev,
			"%s() escore PM get failed ret = %d\n", __func__, ret);
	return;
}
Esempio n. 4
0
void esa_dma_close(int ch)
{
	if (!si.dma_ch[ch] || (ch >= DMA_CH_MAX))
		return;

	si.dma_ch[ch] = false;
	si.dma_cb[ch] = NULL;
	si.dma_cb_param[ch] = NULL;
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
}
Esempio n. 5
0
static void dwc3_pci_resume_work(struct work_struct *work)
{
	struct dwc3_pci *dwc = container_of(work, struct dwc3_pci, wakeup_work);
	struct platform_device *dwc3 = dwc->dwc3;
	int ret;

	ret = pm_runtime_get_sync(&dwc3->dev);
	if (ret)
		return;

	pm_runtime_mark_last_busy(&dwc3->dev);
	pm_runtime_put_sync_autosuspend(&dwc3->dev);
}
Esempio n. 6
0
int esa_effect_write(int type, int *value, int count)
{
	int effect_count = count;
	void __iomem *effect_addr;
	int i, *effect_value;
	int ret = 0;

	pm_runtime_get_sync(&si.pdev->dev);

	effect_value = (int *)kzalloc(effect_count * sizeof(int), GFP_KERNEL);
	effect_value = value;

	switch (type) {
	case SOUNDALIVE:
		effect_addr = si.effect_ram + SA_BASE;
		break;
	case MYSOUND:
		effect_addr = si.effect_ram + MYSOUND_BASE;
		break;
	case PLAYSPEED:
		effect_addr = si.effect_ram + VSP_BASE;
		break;
	case SOUNDBALANCE:
		effect_addr = si.effect_ram + LRSM_BASE;
		break;
	default	:
		pr_err("Not support effect type : %d\n", type);
		ret = -EINVAL;
		goto out;
	}

	if ((effect_value[1] == 0) && (effect_value[15] == 2) &&
		(effect_value[16] == 2))
		si.effect_on = 0;
	else
		si.effect_on = 1;

	for (i = 0; i < effect_count; i++) {
		pr_debug("effect_value[%d] = %d\n", i, effect_value[i]);
		writel(effect_value[i], effect_addr + 0x10 + (i * 4));
	}

	writel(CHANGE_BIT, effect_addr);

	esa_update_qos();
out:
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);

	return ret;
}
void escore_pm_put_autosuspend(void)
{
	struct escore_priv *escore = &escore_priv;
	int ret = 0;

	dev_dbg(escore->dev, "%s()\n", __func__);

	if (!escore->pm_enable)
		return;

	pm_runtime_mark_last_busy(escore->dev);

	ret = pm_runtime_put_sync_autosuspend(escore->dev);
	if (ret)
		dev_err(escore->dev, "%s(): fail %d\n", __func__, ret);
}
Esempio n. 8
0
static ssize_t lpeff_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u32 elpe_cmd;
	u32 arg0, arg1, arg2, arg3, arg4, arg5, arg6;
	u32 arg7, arg8, arg9, arg10, arg11, arg12;

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&g_si.pdev->dev);

	elpe_cmd = readl(g_effect_addr + ELPE_BASE + ELPE_CMD);
	arg0 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG0);
	arg1 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG1);
	arg2 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG2);
	arg3 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG3);
	arg4 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG4);
	arg5 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG5);
	arg6 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG6);
	arg7 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG7);
	arg8 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG8);
	arg9 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG9);
	arg10 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG10);
	arg11 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG11);
	arg12 = readl(g_effect_addr + ELPE_BASE + ELPE_ARG12);

	/* change src, dst address to offset value */
#if 0
	src = (src & 0xFFFFFF) - 0x400000;
	dst = (dst & 0xFFFFFF) - 0x400000;
#endif

	pm_runtime_mark_last_busy(&g_si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&g_si.pdev->dev);
	mutex_unlock(&esa_mutex);

//	flush_cache_all();

	return scnprintf(buf, PAGE_SIZE, "%d %d %d %d %d %d %d %d %d %d %d %d %d %d\n",
			elpe_cmd, arg0, arg1, arg2, arg3, arg4, arg5, arg6,
			arg7, arg8, arg9, arg10, arg11, arg12);
}
Esempio n. 9
0
static ssize_t esa_read(struct file *file, char *buffer,
				size_t size, loff_t *pos)
{
	int ret;

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&si.pdev->dev);

	if (!si.fx_ext_on) {
		esa_debug("%s: fx ext not enabled\n", __func__);
		ret = -EINVAL;
		goto out;
	}

	ret = wait_event_interruptible_timeout(esa_fx_wq, si.fx_irq_done, HZ);
	if (!ret) {
		esa_err("%s: fx irq timeout\n", __func__);
		ret = -EBUSY;
		goto out;
	}

	si.fx_irq_done = false;
	si.fx_work_buf = (unsigned char *)(si.sram + FX_BUF_OFFSET);
	si.fx_work_buf += si.fx_next_idx * FX_BUF_SIZE;
	esa_debug("%s: buf_idx = %d\n", __func__, si.fx_next_idx);

	if (copy_to_user((void *)buffer, si.fx_work_buf, FX_BUF_SIZE)) {
		esa_err("%s: failed to copy_to_user\n", __func__);
		ret = -EFAULT;
	} else {
		ret = FX_BUF_SIZE;
	}
out:
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);

	return ret;
}
Esempio n. 10
0
static long esa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret = 0;

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&si.pdev->dev);

	switch (cmd) {
	case SEIREN_IOCTL_FX_EXT:
		si.fx_ext_on = arg ? true : false;
		writel(si.fx_ext_on ? 1 : 0, si.mailbox + EFFECT_EXT_ON);
		break;
	default:
		esa_err("%s: unknown cmd:%08X, arg:%08X\n",
			__func__, cmd, (unsigned int)arg);
		break;
	}

	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);

	return ret;
}
Esempio n. 11
0
static void request_pm_put_sync(void)
{
	pm_runtime_put_sync_autosuspend(&srp.pdev->dev);
}
Esempio n. 12
0
static void request_pm_put_sync(void)
{
	pm_runtime_use_autosuspend(&srp.pdev->dev);
	pm_runtime_mark_last_busy(&srp.pdev->dev);
	pm_runtime_put_sync_autosuspend(&srp.pdev->dev);
}
Esempio n. 13
0
static ssize_t esa_read(struct file *file, char *buffer,
				size_t size, loff_t *pos)
{
	struct esa_rtd *rtd = file->private_data;
	unsigned char *obuf;
	unsigned int *obuf_filled_size;
	bool *obuf_filled;

	unsigned char *obuf_;
	unsigned int *obuf_filled_size_;
	bool *obuf_filled_;

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&si.pdev->dev);

	/* select OBUF0 or OBUF1 */
	if (rtd->select_obuf == 0) {
		obuf = rtd->obuf0;
		obuf_filled = &rtd->obuf0_filled;
		obuf_filled_size = &rtd->obuf0_filled_size;

		obuf_ = rtd->obuf1;
		obuf_filled_ = &rtd->obuf1_filled;
		obuf_filled_size_ = &rtd->obuf1_filled_size;
		esa_debug("%s: use obuf0\n", __func__);
	} else {
		obuf = rtd->obuf1;
		obuf_filled = &rtd->obuf1_filled;
		obuf_filled_size = &rtd->obuf1_filled_size;

		obuf_ = rtd->obuf0;
		obuf_filled_ = &rtd->obuf0_filled;
		obuf_filled_size_ = &rtd->obuf0_filled_size;
		esa_debug("%s: use obuf1\n", __func__);
	}

	/* select OBUF0 or OBUF1 for next reading */
	rtd->select_obuf = !rtd->select_obuf;

	/* later... invalidate obuf cache */

	/* send pcm data to user */
	if (copy_to_user((void *)buffer, obuf, *obuf_filled_size)) {
		esa_err("%s: failed to copy_to_user\n", __func__);
		goto err;
	}

	/* if meet eos, it sholud also collect data of another buff */
	if (rtd->get_eos && !*obuf_filled_) {
		rtd->get_eos = EOS_FINAL;
	}

	esa_debug("%s: handle_id[%x], idx:[%d], obuf:[%d], obuf_filled_size:[%d]\n",
			__func__, rtd->handle_id, rtd->idx, !rtd->select_obuf,
			(u32)*obuf_filled_size);
	*obuf_filled = false;

	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);

	return *obuf_filled_size;
err:
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);
	return -EFAULT;
}
Esempio n. 14
0
static ssize_t esa_write(struct file *file, const char *buffer,
					size_t size, loff_t *pos)
{
	struct esa_rtd *rtd = file->private_data;
	unsigned char *ibuf;
	unsigned char *obuf;
	unsigned int *obuf_filled_size;
	bool *obuf_filled;
	int response, consumed_size = 0;

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&si.pdev->dev);

	if (rtd->obuf0_filled && rtd->obuf1_filled) {
		esa_err("%s: There is no unfilled obuf\n", __func__);
		goto err;
	}

	/* select IBUF0 or IBUF1 */
	if (rtd->select_ibuf == 0) {
		ibuf = rtd->ibuf0;
		obuf = rtd->obuf0;
		obuf_filled = &rtd->obuf0_filled;
		obuf_filled_size = &rtd->obuf0_filled_size;
		esa_debug("%s: use ibuf0\n", __func__);
	} else {
		ibuf = rtd->ibuf1;
		obuf = rtd->obuf1;
		obuf_filled = &rtd->obuf1_filled;
		obuf_filled_size = &rtd->obuf1_filled_size;
		esa_debug("%s: use ibuf1\n", __func__);
	}

	/* receive stream data from user */
	if (copy_from_user(ibuf, buffer, size)) {
		esa_err("%s: failed to copy_from_user\n", __func__);
		goto err;
	}

	/* select IBUF0 or IBUF1 for next writing */
	rtd->select_ibuf = !rtd->select_ibuf;

	/* send execute command to FW for decoding */
	response = esa_send_cmd_exe(rtd, ibuf, obuf, size);

	/* filled size in OBUF */
	*obuf_filled_size = readl(si.mailbox + SIZE_OUT_DATA);

	/* consumed size */
	consumed_size = readl(si.mailbox + CONSUMED_BYTE_IN);

	if (response == 0 && *obuf_filled_size > 0) {
		*obuf_filled = true;
	} else {
		if (consumed_size <= 0)
			consumed_size = response;
		if (rtd->need_config)
			rtd->need_config = false;
		else if (size != 0)
			esa_debug("%s: No output? response:%x\n", __func__, response);
	}

	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);

	esa_debug("%s: handle_id[%x], idx:[%d], consumed:[%d], filled_size:[%d], ibuf:[%d]\n",
			__func__, rtd->handle_id, rtd->idx, consumed_size,
			*obuf_filled_size, !rtd->select_ibuf);

	return consumed_size;
err:
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);
	return -EFAULT;
}
Esempio n. 15
0
void esa_compr_close(void)
{
	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	ptr_ap = NULL;
}
Esempio n. 16
0
static long esa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct esa_rtd *rtd = file->private_data;
	int ret = 0;
	unsigned int param = cmd >> 16;

	cmd = cmd & 0xffff;

	esa_debug("%s: idx:%d, param:%x, cmd:%x\n", __func__,
				rtd->idx, param, cmd);

	mutex_lock(&esa_mutex);
	pm_runtime_get_sync(&si.pdev->dev);

	switch (cmd) {
	case SEIREN_IOCTL_CH_CREATE:
		rtd->ip_type = (unsigned int) arg;
		arg = arg << 16;
		writel(arg, si.mailbox + IP_TYPE);
		ret = esa_send_cmd(CMD_CREATE);
		if (ret == -EBUSY)
			break;
		ret = readl(si.mailbox + RETURN_CMD);
		if (ret != 0)
			break;
		rtd->handle_id = readl(si.mailbox + IP_ID);
		esa_buffer_init(file);
		esa_debug("CH_CREATE: ret_val:%x, handle_id:%x\n",
				readl(si.mailbox + RETURN_CMD),
				rtd->handle_id);
		esa_update_qos();
		break;
	case SEIREN_IOCTL_CH_DESTROY:
		writel(rtd->handle_id, si.mailbox + HANDLE_ID);
		ret = esa_send_cmd(CMD_DESTROY);
		if (ret == -EBUSY)
			break;
		ret = readl(si.mailbox + RETURN_CMD);
		if (ret != 0)
			break;
		esa_debug("CH_DESTROY: ret_val:%x, handle_id:%x\n",
				readl(si.mailbox + RETURN_CMD),
				rtd->handle_id);
		break;
	case SEIREN_IOCTL_CH_EXE:
		esa_debug("CH_EXE\n");
		ret = esa_exe(file, param, arg);
		break;
	case SEIREN_IOCTL_CH_SET_PARAMS:
		esa_debug("CH_SET_PARAMS\n");
		ret = esa_set_params(file, param, arg);
		break;
	case SEIREN_IOCTL_CH_GET_PARAMS:
		esa_debug("CH_GET_PARAMS\n");
		ret = esa_get_params(file, param, arg);
		break;
	case SEIREN_IOCTL_CH_RESET:
		esa_debug("CH_RESET\n");
		break;
	case SEIREN_IOCTL_CH_FLUSH:
		arg = arg << 16;
		writel(rtd->handle_id, si.mailbox + HANDLE_ID);
		esa_send_cmd(CMD_RESET);
		esa_debug("CH_FLUSH: val: %x, handle_id : %x\n",
				readl(si.mailbox + RETURN_CMD),
				rtd->handle_id);
		rtd->get_eos = EOS_NO;
		rtd->select_ibuf = 0;
		rtd->select_obuf = 0;
		rtd->obuf0_filled = false;
		rtd->obuf1_filled = false;
		break;
	case SEIREN_IOCTL_CH_CONFIG:
		esa_debug("CH_CONFIG\n");
		rtd->need_config = true;
		break;
	}

	pm_runtime_mark_last_busy(&si.pdev->dev);
	pm_runtime_put_sync_autosuspend(&si.pdev->dev);
	mutex_unlock(&esa_mutex);

	return ret;
}