示例#1
0
static inline int proc44x_start(struct device *dev, u32 start_addr)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_rproc *obj = (struct omap_rproc *)platform_get_drvdata(
						to_platform_device(dev));
	int ret = 0;

	/* Enable the Timer that would be used by co-processor */
	if (obj->timer_id >= 0) {
		obj->dmtimer =
			omap_dm_timer_request_specific(obj->timer_id);
		if (!obj->dmtimer) {
			ret = -EBUSY;
			goto err_start;
		}
		omap_dm_timer_set_int_enable(obj->dmtimer,
						OMAP_TIMER_INT_OVERFLOW);
		omap_dm_timer_set_source(obj->dmtimer, OMAP_TIMER_SRC_SYS_CLK);
	}

	ret = omap_device_enable(pdev);
	if (ret)
		goto err_start;

	obj->state = OMAP_RPROC_RUNNING;
	return 0;

err_start:
	dev_err(dev, "%s error 0x%x\n", __func__, ret);
	return ret;
}
static int _od_resume_noirq(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_device *od = to_omap_device(pdev);

	if (od->flags & OMAP_DEVICE_SUSPENDED) {
		od->flags &= ~OMAP_DEVICE_SUSPENDED;
		omap_device_enable(pdev);

		if (od->flags & OMAP_DEVICE_SUSPEND_FORCED) {
			pm_runtime_set_active(dev);
			od->flags &= ~OMAP_DEVICE_SUSPEND_FORCED;
		}

		/*
		 * XXX: we run before core runtime pm has resumed itself. At
		 * this point in time, we just restore the runtime pm state and
		 * considering symmetric operations in resume, we donot expect
		 * to fail. If we failed, something changed in core runtime_pm
		 * framework OR some device driver messed things up, hence, WARN
		 */
		WARN(pm_runtime_set_active(dev),
		     "Could not set %s runtime state active\n", dev_name(dev));

		pm_generic_runtime_resume(dev);
	}

	return pm_generic_resume_noirq(dev);
}
示例#3
0
/**
* hsi_clocks_enable_channel - virtual wrapper for enabling HSI clocks for
* a given channel
* @dev - reference to the hsi device.
* @channel_number - channel number which requests clock to be enabled
*		    0xFF means no particular channel
*
* Returns: -EEXIST if clocks were already active
*	   0 if clocks were previously inactive
*
* Note : there is no real HW clock management per HSI channel, this is only
* virtual to keep track of active channels and ease debug
*
* Function to be called with lock
*/
int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
				const char *s)
{
	struct platform_device *pd = to_platform_device(dev);
	struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);

	if (channel_number != HSI_CH_NUMBER_NONE)
		dev_dbg(dev, "CLK: hsi_clocks_enable for "
			"channel %d: %s\n", channel_number, s);
	else
		dev_dbg(dev, "CLK: hsi_clocks_enable: %s\n", s);

	if (hsi_ctrl->clock_enabled) {
		dev_dbg(dev, "Clocks already enabled, skipping...\n");
		return -EEXIST;
	}

#ifdef CONFIG_PM
	/* Prevent Fclk change */
	if (dpll_cascading_blocker_hold(dev) < 0)
		dev_warn(dev, "Error holding DPLL cascading constraint\n");
#endif /* CONFIG_PM */

#ifndef USE_PM_RUNTIME_FOR_HSI
	omap_device_enable(pd);
	hsi_runtime_resume(dev);
	return 0;
#else
	return pm_runtime_get_sync(dev);
#endif
}
示例#4
0
static inline int omap_rproc_start(struct rproc *rproc, u64 bootaddr)
{
	struct device *dev = rproc->dev;
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_rproc_pdata *pdata = dev->platform_data;
	struct omap_rproc_timers_info *timers = pdata->timers;
	struct omap_rproc_priv *rpp = rproc->priv;
	int i;
	int ret = 0;

	if (rproc->secure_mode) {
		rproc->secure_reset = true;
		ret = rproc_drm_invoke_service(rproc->secure_mode);
		if (ret) {
			dev_err(rproc->dev, "rproc_drm_invoke_service failed "
					"for secure_enable ret = 0x%x\n", ret);
			return -ENXIO;
		}
	}

#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
	ret = _init_pm_flags(rproc);
	if (ret)
		return ret;
#endif

	for (i = 0; i < pdata->timers_cnt; i++) {
		timers[i].odt = omap_dm_timer_request_specific(timers[i].id);
		if (!timers[i].odt) {
			ret = -EBUSY;
			goto out;
		}
		omap_dm_timer_set_source(timers[i].odt, OMAP_TIMER_SRC_SYS_CLK);
#ifdef CONFIG_REMOTEPROC_WATCHDOG
		/* GPT 9 & 11 (ipu); GPT 6 (dsp) are used as watchdog timers */
		if ((!strcmp(rproc->name, "dsp") && timers[i].id == 6) ||
		    (!strcmp(rproc->name, "ipu") &&
				(timers[i].id == DUCATI_WDT_TIMER_1 || timers[i].id == DUCATI_WDT_TIMER_2))) {
			ret = request_irq(omap_dm_timer_get_irq(timers[i].odt),
					 omap_rproc_watchdog_isr, IRQF_DISABLED,
					"rproc-wdt", rproc);
			/* Clean counter, remoteproc proc will set the value */
			omap_dm_timer_set_load(timers[i].odt, 0, 0);
		}
#endif
	}

	rpp->bootaddr = bootaddr;
	ret = omap_device_enable(pdev);
out:
	if (ret) {
		while (i--) {
			omap_dm_timer_free(timers[i].odt);
			timers[i].odt = NULL;
		}
	}

	return ret;
}
示例#5
0
static int _od_runtime_resume(struct device *dev)
{
    struct platform_device *pdev = to_platform_device(dev);

    omap_device_enable(pdev);

    return pm_generic_runtime_resume(dev);
}
示例#6
0
static int rpres_iss_enable(struct platform_device *pdev)
{
	int ret;
	struct rpres_platform_data *pdata = pdev->dev.platform_data;

	ret = omap_device_enable(pdev);
	if (!ret)
		_enable_optional_clocks(pdata->oh);

	return ret;
}
示例#7
0
static int _od_resume_noirq(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_device *od = to_omap_device(pdev);

	if (od->flags & OMAP_DEVICE_SUSPENDED) {
		od->flags &= ~OMAP_DEVICE_SUSPENDED;
		omap_device_enable(pdev);
		pm_generic_runtime_resume(dev);
	}

	return pm_generic_resume_noirq(dev);
}
示例#8
0
static int _od_runtime_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	int ret;

	ret = omap_device_enable(pdev);
	if (ret) {
		dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n");
		return ret;
	}

	return pm_generic_runtime_resume(dev);
}
示例#9
0
static int sr_get(struct omap_sr *sr)
{
	int r;

	if (sr->suspended)
		r = omap_device_enable(sr->pdev);
	else
		r = pm_runtime_get_sync(&sr->pdev->dev);
	if (r < 0)
		dev_err(&sr->pdev->dev, "%s: failed:%d susp=%d\n",
			__func__, r, sr->suspended);
	return r;
}
示例#10
0
文件: pm_bus.c 项目: 250bpm/linux-2.6
int omap_pm_runtime_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	int r;

	dev_dbg(dev, "%s\n", __func__);

	if (dev->parent == &omap_device_parent) {
		r = omap_device_enable(pdev);
		WARN_ON(r);
	}

	return pm_generic_runtime_resume(dev);
};
示例#11
0
int omap2430_async_resume(struct musb *musb)
{
	struct device *dev = musb->controller;
	struct musb_hdrc_platform_data *pdata = dev->platform_data;
	struct omap_musb_board_data *data = pdata->board_data;
	struct platform_device *pdev
		= to_platform_device(musb->controller->parent);
	unsigned long flags = 0;
	u32 val = 0;
	int ret = 0;
	if (!pdev) {
		pr_err("%s pdev is null error\n", __func__);
		return -ENODEV;
	}

	dev_info(&pdev->dev, "%s async_resume=%d +\n",
		__func__, musb->async_resume);

	mutex_lock(&musb->async_musb_lock);
	if (musb->async_resume > 0)
		musb->async_resume++;
	else {
		ret = omap_device_enable(pdev);
		if (ret < 0) {
			dev_err(&pdev->dev, "%s omap_device_enable error ret=%d\n",
				__func__, ret);
			mutex_unlock(&musb->async_musb_lock);
			return ret;
		}
		spin_lock_irqsave(&musb->lock, flags);
		otg_set_suspend(musb->xceiv, 0);
		omap2430_low_level_init(musb);
		val = musb_readl(musb->mregs, OTG_INTERFSEL);
		if (data->interface_type ==
			MUSB_INTERFACE_UTMI) {
			val &= ~ULPI_12PIN;
			val |= UTMI_8BIT;
		} else {
			val |= ULPI_12PIN;
		}
		musb_writel(musb->mregs, OTG_INTERFSEL, val);
		musb_async_resume(musb);
		spin_unlock_irqrestore(&musb->lock, flags);
		musb->async_resume++;
	}
	mutex_unlock(&musb->async_musb_lock);
	dev_info(&pdev->dev, "%s async_resume %d -\n",
		__func__, musb->async_resume);
	return 0;
}
示例#12
0
static int omap2_iommu_enable(struct iommu *obj)
{
	u32 l, pa;
	unsigned long timeout;
	int ret = 0;

	if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd,  SZ_16K))
		return -EINVAL;

	pa = virt_to_phys(obj->iopgd);
	if (!IS_ALIGNED(pa, SZ_16K))
		return -EINVAL;

	ret = omap_device_enable(obj->pdev);
	if (ret)
		return ret;

	iommu_write_reg(obj, MMU_SYS_SOFTRESET, MMU_SYSCONFIG);

	timeout = jiffies + msecs_to_jiffies(20);
	do {
		l = iommu_read_reg(obj, MMU_SYSSTATUS);
		if (l & MMU_SYS_RESETDONE)
			break;
	} while (!time_after(jiffies, timeout));

	if (!(l & MMU_SYS_RESETDONE)) {
		dev_err(obj->dev, "can't take mmu out of reset\n");
		return -ENODEV;
	}

	l = iommu_read_reg(obj, MMU_REVISION);
	dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
		 (l >> 4) & 0xf, l & 0xf);

	l = iommu_read_reg(obj, MMU_SYSCONFIG);
	l &= ~MMU_SYS_IDLE_MASK;
	l |= (MMU_SYS_IDLE_SMART | MMU_SYS_AUTOIDLE);
	iommu_write_reg(obj, l, MMU_SYSCONFIG);

	iommu_write_reg(obj, pa, MMU_TTB);

	omap2_iommu_set_twl(obj, true);

	if (cpu_is_omap44xx())
		iommu_write_reg(obj, 0x1, MMU_GP_REG);

	return 0;
}
示例#13
0
文件: omap_device.c 项目: mbgg/linux
static int _od_resume_noirq(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_device *od = to_omap_device(pdev);

	if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
	    !pm_runtime_status_suspended(dev)) {
		od->flags &= ~OMAP_DEVICE_SUSPENDED;
		if (!(od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND))
			omap_device_enable(pdev);
		pm_generic_runtime_resume(dev);
	}

	return pm_generic_resume_noirq(dev);
}
示例#14
0
static inline int proc44x_wakeup(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_rproc *obj = (struct omap_rproc *)platform_get_drvdata(
						to_platform_device(dev));
	int ret = 0;

	if (obj->dmtimer)
		omap_dm_timer_start(obj->dmtimer);

	ret = omap_device_enable(pdev);
	if (ret)
		goto err_start;

	obj->state = OMAP_RPROC_RUNNING;
	return 0;

err_start:
	dev_err(dev, "%s error 0x%x\n", __func__, ret);
	return ret;
}
示例#15
0
/**
* hsi_clocks_enable_channel - virtual wrapper for enabling HSI clocks for
* a given channel
* @dev - reference to the hsi device.
* @channel_number - channel number which requests clock to be enabled
*		    0xFF means no particular channel
*
* Note : there is no real HW clock management per HSI channel, this is only
* virtual to keep track of active channels and ease debug
*
* Function to be called with lock
*/
int hsi_clocks_enable_channel(struct device *dev, u8 channel_number,
				const char *s)
{
	struct platform_device *pd = to_platform_device(dev);
	struct hsi_dev *hsi_ctrl = platform_get_drvdata(pd);

	if (channel_number != HSI_CH_NUMBER_NONE)
		dev_dbg(dev, "CLK: hsi_clocks_enable for "
			"channel %d: %s\n", channel_number, s);
	else
		dev_dbg(dev, "CLK: hsi_clocks_enable: %s\n", s);

	if (hsi_ctrl->clock_enabled) {
		dev_dbg(dev, "Clocks already enabled, skipping...\n");
		return -EEXIST;
	}
#ifndef USE_PM_RUNTIME_FOR_HSI
	omap_device_enable(pd);
	hsi_runtime_resume(dev);
	return 0;
#else
	return pm_runtime_get_sync(dev);
#endif
}
示例#16
0
/**
 * omap_device_build_from_dt - build an omap_device with multiple hwmods
 * @pdev_name: name of the platform_device driver to use
 * @pdev_id: this platform_device's connection ID
 * @oh: ptr to the single omap_hwmod that backs this omap_device
 * @pdata: platform_data ptr to associate with the platform_device
 * @pdata_len: amount of memory pointed to by @pdata
 *
 * Function for building an omap_device already registered from device-tree
 *
 * Returns 0 or PTR_ERR() on error.
 */
static int omap_device_build_from_dt(struct platform_device *pdev)
{
    struct omap_hwmod **hwmods;
    struct omap_device *od;
    struct omap_hwmod *oh;
    struct device_node *node = pdev->dev.of_node;
    const char *oh_name, *rst_name;
    int oh_cnt, dstr_cnt, i, ret = 0;
    bool device_active = false;

    oh_cnt = of_property_count_strings(node, "ti,hwmods");
    if (oh_cnt <= 0) {
        dev_dbg(&pdev->dev, "No 'hwmods' to build omap_device\n");
        return -ENODEV;
    }

    hwmods = kzalloc(sizeof(struct omap_hwmod *) * oh_cnt, GFP_KERNEL);
    if (!hwmods) {
        ret = -ENOMEM;
        goto odbfd_exit;
    }

    for (i = 0; i < oh_cnt; i++) {
        of_property_read_string_index(node, "ti,hwmods", i, &oh_name);
        oh = omap_hwmod_lookup(oh_name);
        if (!oh) {
            dev_err(&pdev->dev, "Cannot lookup hwmod '%s'\n",
                    oh_name);
            ret = -EINVAL;
            goto odbfd_exit1;
        }
        hwmods[i] = oh;
        if (oh->flags & HWMOD_INIT_NO_IDLE)
            device_active = true;
    }

    od = omap_device_alloc(pdev, hwmods, oh_cnt);
    if (IS_ERR(od)) {
        dev_err(&pdev->dev, "Cannot allocate omap_device for :%s\n",
                oh_name);
        ret = PTR_ERR(od);
        goto odbfd_exit1;
    }

    /* Fix up missing resource names */
    for (i = 0; i < pdev->num_resources; i++) {
        struct resource *r = &pdev->resource[i];

        if (r->name == NULL)
            r->name = dev_name(&pdev->dev);
    }

    pdev->dev.pm_domain = &omap_device_pm_domain;

    if (device_active) {
        omap_device_enable(pdev);
        pm_runtime_set_active(&pdev->dev);
    }
    dstr_cnt =
        of_property_count_strings(node, "ti,deassert-hard-reset");
    if (dstr_cnt > 0) {
        for (i = 0; i < dstr_cnt; i += 2) {
            of_property_read_string_index(
                node, "ti,deassert-hard-reset", i,
                &oh_name);
            of_property_read_string_index(
                node, "ti,deassert-hard-reset", i+1,
                &rst_name);
            oh = omap_hwmod_lookup(oh_name);
            if (!oh) {
                dev_warn(&pdev->dev,
                         "Cannot parse deassert property for '%s'\n",
                         oh_name);
                break;
            }
            omap_hwmod_deassert_hardreset(oh, rst_name);
        }
    }

odbfd_exit1:
    kfree(hwmods);
odbfd_exit:
    /* if data/we are at fault.. load up a fail handler */
    if (ret)
        pdev->dev.pm_domain = &omap_device_fail_pm_domain;

    return ret;
}