Ejemplo n.º 1
0
int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
{
	struct bhi_ctxt_t *bhi_ctxt = &mhi_pcie_device->bhi_ctxt;
	enum MHI_STATUS ret_val = MHI_STATUS_SUCCESS;
	u32 pcie_word_val = 0;
	int r;

	if (NULL == mhi_pcie_device || 0 == mhi_pcie_device->core.bar0_base
	    || 0 == mhi_pcie_device->core.bar0_end)
		return -EIO;

	bhi_ctxt->bhi_base = mhi_pcie_device->core.bar0_base;
	pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHIOFF);
	bhi_ctxt->bhi_base += pcie_word_val;

	mhi_log(MHI_MSG_INFO,
		"Successfully registered char dev. bhi base is: 0x%p.\n",
		bhi_ctxt->bhi_base);
	ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
	if (IS_ERR_VALUE(ret_val)) {
		mhi_log(MHI_MSG_CRITICAL,
			"Failed to alloc char device %d\n",
			ret_val);
		return -EIO;
	}
	bhi_ctxt->bhi_class = class_create(THIS_MODULE, "bhi");
	if (IS_ERR(bhi_ctxt->bhi_class)) {
		mhi_log(MHI_MSG_CRITICAL,
			"Failed to instantiate class %d\n",
			ret_val);
		r = PTR_RET(bhi_ctxt->bhi_class);
		goto err_class_create;
	}
	cdev_init(&bhi_ctxt->cdev, &bhi_fops);
	bhi_ctxt->cdev.owner = THIS_MODULE;
	ret_val = cdev_add(&bhi_ctxt->cdev, bhi_ctxt->bhi_dev, 1);
	bhi_ctxt->dev = device_create(bhi_ctxt->bhi_class, NULL,
					bhi_ctxt->bhi_dev, NULL,
					"bhi");
	if (IS_ERR(bhi_ctxt->dev)) {
		mhi_log(MHI_MSG_CRITICAL,
				"Failed to add bhi cdev\n");
		r = PTR_RET(bhi_ctxt->dev);
		goto err_dev_create;
	}
	return 0;
err_dev_create:
	cdev_del(&bhi_ctxt->cdev);
	class_destroy(bhi_ctxt->bhi_class);
err_class_create:
	unregister_chrdev_region(MAJOR(bhi_ctxt->bhi_dev), 1);
	return r;
}
static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
{
	struct dentry *cpu_dir;
	char buffer[TMP_SIZE];
	void *rc;

	snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
							    cpu_info));
	cpu_dir = hypfs_mkdir(cpus_dir, buffer);
	rc = hypfs_create_u64(cpu_dir, "mgmtime",
			      cpu_info__acc_time(diag204_info_type, cpu_info) -
			      cpu_info__lp_time(diag204_info_type, cpu_info));
	if (IS_ERR(rc))
		return PTR_ERR(rc);
	rc = hypfs_create_u64(cpu_dir, "cputime",
			      cpu_info__lp_time(diag204_info_type, cpu_info));
	if (IS_ERR(rc))
		return PTR_ERR(rc);
	if (diag204_info_type == DIAG204_INFO_EXT) {
		rc = hypfs_create_u64(cpu_dir, "onlinetime",
				      cpu_info__online_time(diag204_info_type,
							    cpu_info));
		if (IS_ERR(rc))
			return PTR_ERR(rc);
	}
	diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
	rc = hypfs_create_str(cpu_dir, "type", buffer);
	return PTR_RET(rc);
}
int tegra_bbc_proxy_bw_register(struct device *dev, u32 bw)
{
	int ret;
	struct tegra_bbc_proxy *bbc = dev_get_drvdata(dev);

	mutex_lock(&bbc->iso_lock);

	if (bbc->isomgr_handle)
		tegra_isomgr_unregister(bbc->isomgr_handle);

	bbc->isomgr_handle = tegra_isomgr_register(TEGRA_ISO_CLIENT_BBC_0,
		bw, NULL, NULL);
	ret = PTR_RET(bbc->isomgr_handle);
	if (ret) {
		dev_err(dev, "error registering bbc with isomgr\n");
		goto done;
	}

	ret = tegra_isomgr_set_margin(TEGRA_ISO_CLIENT_BBC_0,
		bw, true);
	if (ret) {
		dev_err(dev, "can't margin for bbc bw\n");
		tegra_isomgr_unregister(bbc->isomgr_handle);
		goto done;
	}
	else
		bbc->margin = bw;


	dev_dbg(dev, "bbc iso client registered\n");

done:
	mutex_unlock(&bbc->iso_lock);
	return ret;
}
int shinano_wifi_set_power(int on)
{
	int gpio = qpnp_pin_map("pm8941-gpio", WIFI_POWER_PMIC_GPIO);
	if (!wifi_batfet) {
		wifi_batfet = regulator_get(NULL, "batfet");
		if (IS_ERR_OR_NULL(wifi_batfet)) {
			printk(KERN_ERR "unable to get batfet reg. rc=%d\n",
				PTR_RET(wifi_batfet));
			wifi_batfet = NULL;
		}
	}
	if (on) {
		if (!batfet_ena && wifi_batfet) {
			regulator_enable(wifi_batfet);
			batfet_ena = 1;
		}
	}
	gpio_set_value(gpio, on);
	if (!on) {
		if (batfet_ena && wifi_batfet) {
			regulator_disable(wifi_batfet);
			batfet_ena = 0;
		}
	}
	return 0;
}
Ejemplo n.º 5
0
/* resume dev_replace procedure that was interrupted by unmount */
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
{
	struct task_struct *task;
	struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;

	btrfs_dev_replace_lock(dev_replace);
	switch (dev_replace->replace_state) {
	case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
	case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
		btrfs_dev_replace_unlock(dev_replace);
		return 0;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
		break;
	case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
		dev_replace->replace_state =
			BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED;
		break;
	}
	if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) {
		pr_info("btrfs: cannot continue dev_replace, tgtdev is missing\n"
			"btrfs: you may cancel the operation after 'mount -o degraded'\n");
		btrfs_dev_replace_unlock(dev_replace);
		return 0;
	}
	btrfs_dev_replace_unlock(dev_replace);

	WARN_ON(atomic_xchg(
		&fs_info->mutually_exclusive_operation_running, 1));
	task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
	return PTR_RET(task);
}
Ejemplo n.º 6
0
int mhi_esoc_register(struct mhi_device_ctxt *mhi_dev_ctxt)
{
	int ret_val = 0;
	struct device_node *np;
	struct pci_driver *mhi_driver;
	struct device *dev = &mhi_dev_ctxt->dev_info->pcie_device->dev;

	mhi_driver = mhi_dev_ctxt->dev_info->mhi_pcie_driver;
	np = dev->of_node;
	mhi_dev_ctxt->esoc_handle = devm_register_esoc_client(dev, "mdm");
	mhi_log(MHI_MSG_VERBOSE,
		"Of table of pcie struct device property is dev->of_node %p\n",
		np);
	if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_handle)) {
		mhi_log(MHI_MSG_CRITICAL,
			"Failed to register for SSR, ret %lx\n",
			(uintptr_t)mhi_dev_ctxt->esoc_handle);
		return -EIO;
	}

	esoc_parse_link_type(mhi_dev_ctxt);

	mhi_dev_ctxt->esoc_ssr_handle = subsys_notif_register_notifier(
					mhi_dev_ctxt->esoc_handle->name,
					&mhi_ssr_nb);
	if (IS_ERR_OR_NULL(mhi_dev_ctxt->esoc_ssr_handle)) {
		ret_val = PTR_RET(mhi_dev_ctxt->esoc_ssr_handle);
		mhi_log(MHI_MSG_CRITICAL,
			"Can't find esoc desc ret 0x%lx\n",
			(uintptr_t)mhi_dev_ctxt->esoc_ssr_handle);
	}

	return ret_val;
}
Ejemplo n.º 7
0
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
				  struct rtnl_link_ops *ops, char *devname)
{
	struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
	struct ip_tunnel_parm parms;
	unsigned int i;

	for (i = 0; i < IP_TNL_HASH_SIZE; i++)
		INIT_HLIST_HEAD(&itn->tunnels[i]);

	if (!ops) {
		itn->fb_tunnel_dev = NULL;
		return 0;
	}

	memset(&parms, 0, sizeof(parms));
	if (devname)
		strlcpy(parms.name, devname, IFNAMSIZ);

	rtnl_lock();
	itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
	/* FB netdevice is special: we have one, and only one per netns.
	 * Allowing to move it to another netns is clearly unsafe.
	 */
	if (!IS_ERR(itn->fb_tunnel_dev)) {
		itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
		ip_tunnel_add(itn, netdev_priv(itn->fb_tunnel_dev));
	}
	rtnl_unlock();

	return PTR_RET(itn->fb_tunnel_dev);
}
Ejemplo n.º 8
0
static int __init omap3_l3_init(void)
{
    struct omap_hwmod *oh;
    struct platform_device *pdev;
    char oh_name[L3_MODULES_MAX_LEN];

    /*
     * To avoid code running on other OMAPs in
     * multi-omap builds
     */
    if (!(cpu_is_omap34xx()))
        return -ENODEV;

    snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main");

    oh = omap_hwmod_lookup(oh_name);

    if (!oh)
        pr_err("could not look up %s\n", oh_name);

    pdev = omap_device_build("omap_l3_smx", 0, oh, NULL, 0);

    WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);

    return PTR_RET(pdev);
}
Ejemplo n.º 9
0
static int __init omap4_l3_init(void)
{
    int i;
    struct omap_hwmod *oh[3];
    struct platform_device *pdev;
    char oh_name[L3_MODULES_MAX_LEN];

    /* If dtb is there, the devices will be created dynamically */
    if (of_have_populated_dt())
        return -ENODEV;

    /*
     * To avoid code running on other OMAPs in
     * multi-omap builds
     */
    if (!cpu_is_omap44xx() && !soc_is_omap54xx())
        return -ENODEV;

    for (i = 0; i < L3_MODULES; i++) {
        snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main_%d", i+1);

        oh[i] = omap_hwmod_lookup(oh_name);
        if (!(oh[i]))
            pr_err("could not look up %s\n", oh_name);
    }

    pdev = omap_device_build_ss("omap_l3_noc", 0, oh, 3, NULL, 0);

    WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);

    return PTR_RET(pdev);
}
Ejemplo n.º 10
0
static int __init fail_iommu_debugfs(void)
{
	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
						       NULL, &fail_iommu);

	return PTR_RET(dir);
}
Ejemplo n.º 11
0
int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
{
	int i = 0, rc = 0;
	if (enable) {
		for (i = 0; i < num_vreg; i++) {
			rc = PTR_RET(in_vreg[i].vreg);
			if (rc) {
				DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
					__builtin_return_address(0), __func__,
					in_vreg[i].vreg_name, rc);
				goto disable_vreg;
			}
			rc = regulator_enable(in_vreg[i].vreg);
			if (rc < 0) {
				DEV_ERR("%pS->%s: %s enable failed\n",
					__builtin_return_address(0), __func__,
					in_vreg[i].vreg_name);
				goto disable_vreg;
			}
		}
	} else {
		for (i = num_vreg-1; i >= 0; i--)
			if (regulator_is_enabled(in_vreg[i].vreg))
				regulator_disable(in_vreg[i].vreg);
	}
	return rc;

disable_vreg:
	for (i--; i >= 0; i--)
		regulator_disable(in_vreg[i].vreg);
	return rc;
} /* msm_dss_enable_vreg */
Ejemplo n.º 12
0
static int sunxi_ss_hw_init(sunxi_ss_t *sss)
{
#ifdef CONFIG_EVB_PLATFORM
	int ret = 0;
#endif
	struct clk *pclk = NULL;

	pclk = clk_get(&sss->pdev->dev, SS_PLL_CLK);
	if (IS_ERR_OR_NULL(pclk)) {
		SS_ERR("Unable to acquire module clock '%s', return %x\n",
				SS_PLL_CLK, PTR_RET(pclk));
		return PTR_RET(pclk);
	}

	sss->mclk = clk_get(&sss->pdev->dev, sss->dev_name);
	if (IS_ERR_OR_NULL(sss->mclk)) {
		SS_ERR("Unable to acquire module clock '%s', return %x\n",
				sss->dev_name, PTR_RET(sss->mclk));
		return PTR_RET(sss->mclk);
	}

#ifdef CONFIG_EVB_PLATFORM
	ret = clk_set_parent(sss->mclk, pclk);
	if (ret != 0) {
		SS_ERR("clk_set_parent() failed! return %d\n", ret);
		return ret;
	}

	ret = clk_set_rate(sss->mclk, SS_CLK_RATE);
	if (ret != 0) {
		SS_ERR("clk_set_rate(%d) failed! return %d\n", SS_CLK_RATE, ret);
		return ret;
	}
#endif
	SS_DBG("SS mclk %luMHz, pclk %luMHz\n", clk_get_rate(sss->mclk)/1000000,
			clk_get_rate(pclk)/1000000);

	if (clk_prepare_enable(sss->mclk)) {
		SS_ERR("Couldn't enable module clock\n");
		return -EBUSY;
	}

	clk_put(pclk);
	return 0;
}
Ejemplo n.º 13
0
int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
{
	struct dentry *d;

	d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
			write, &dss_debug_fops);

	return PTR_RET(d);
}
Ejemplo n.º 14
0
static int __init rtc_init(void)
{
	struct platform_device *pdev;

	if (!mach_hwclk)
		return -ENODEV;

	pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
	return PTR_RET(pdev);
}
Ejemplo n.º 15
0
static __init int q40_add_kbd_device(void)
{
    struct platform_device *pdev;

    if (!MACH_IS_Q40)
        return -ENODEV;

    pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
    return PTR_RET(pdev);
}
Ejemplo n.º 16
0
Archivo: dhd_dbg.c Proyecto: mbgg/linux
int brcmf_debugfs_attach(struct brcmf_pub *drvr)
{
	struct device *dev = drvr->bus_if->dev;

	if (!root_folder)
		return -ENODEV;

	drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
	return PTR_RET(drvr->dbgfs_dir);
}
static int __net_init iptable_nat_net_init(struct net *net)
{
	struct ipt_replace *repl;

	repl = ipt_alloc_initial_table(&nf_nat_ipv4_table);
	if (repl == NULL)
		return -ENOMEM;
	net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
	kfree(repl);
	return PTR_RET(net->ipv4.nat_table);
}
Ejemplo n.º 18
0
static int __init ps3_rtc_init(void)
{
	struct platform_device *pdev;

	if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
		return -ENODEV;

	pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);

	return PTR_RET(pdev);
}
static int __net_init iptable_mangle_net_init(struct net *net)
{
	struct ipt_replace *repl;

	repl = ipt_alloc_initial_table(&packet_mangler);
	if (repl == NULL)
		return -ENOMEM;
	net->ipv4.iptable_mangle =
		ipt_register_table(net, &packet_mangler, repl);
	kfree(repl);
	return PTR_RET(net->ipv4.iptable_mangle);
}
static int __net_init ip6table_security_net_init(struct net *net)
{
	struct ip6t_replace *repl;

	repl = ip6t_alloc_initial_table(&security_table);
	if (repl == NULL)
		return -ENOMEM;
	net->ipv6.ip6table_security =
		ip6t_register_table(net, &security_table, repl);
	kfree(repl);
	return PTR_RET(net->ipv6.ip6table_security);
}
static int __net_init iptable_filter_net_init(struct net *net)
{
	struct ipt_replace *repl;

	repl = ipt_alloc_initial_table(&packet_filter);
	if (repl == NULL)
		return -ENOMEM;
	/* Entry 1 is the FORWARD hook */
	((struct ipt_standard *)repl->entries)[1].target.verdict =
		forward ? -NF_ACCEPT - 1 : -NF_DROP - 1;

	net->ipv4.iptable_filter =
		ipt_register_table(net, &packet_filter, repl);
	kfree(repl);
	return PTR_RET(net->ipv4.iptable_filter);
}
static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
{
	struct dentry *cpu_dir;
	char buffer[TMP_SIZE];
	void *rc;

	snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
							    cpu_info));
	cpu_dir = hypfs_mkdir(cpus_dir, buffer);
	if (IS_ERR(cpu_dir))
		return PTR_ERR(cpu_dir);
	rc = hypfs_create_u64(cpu_dir, "mgmtime",
			      phys_cpu__mgm_time(diag204_info_type, cpu_info));
	if (IS_ERR(rc))
		return PTR_ERR(rc);
	diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
	rc = hypfs_create_str(cpu_dir, "type", buffer);
	return PTR_RET(rc);
}
Ejemplo n.º 23
0
static int fan53555_regulator_register(struct fan53555_device_info *di,
			struct regulator_config *config)
{
	struct regulator_desc *rdesc = &di->desc;

	rdesc->name = "fan53555-reg";
	rdesc->ops = &fan53555_regulator_ops;
	rdesc->type = REGULATOR_VOLTAGE;
	rdesc->n_voltages = FAN53555_NVOLTAGES;
	rdesc->enable_reg = di->vol_reg;
	rdesc->enable_mask = VSEL_BUCK_EN;
	rdesc->min_uV = di->vsel_min;
	rdesc->uV_step = di->vsel_step;
	rdesc->vsel_reg = di->vol_reg;
	rdesc->vsel_mask = VSEL_NSEL_MASK;
	rdesc->owner = THIS_MODULE;

	di->rdev = regulator_register(&di->desc, config);
	return PTR_RET(di->rdev);

}
int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
{
	int i, rc = 0;

	for (i = 0; i < num_clk; i++) {
		clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
		rc = PTR_RET(clk_arry[i].clk);
		if (rc) {
			DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
				__builtin_return_address(0), __func__,
				clk_arry[i].clk_name, rc);
			goto error;
		}
	}

	return rc;

error:
	msm_dss_put_clk(clk_arry, num_clk);

	return rc;
} /* msm_dss_get_clk */
Ejemplo n.º 25
0
Archivo: i2c.c Proyecto: AllenDou/linux
static inline int omap2_i2c_add_bus(int bus_id)
{
	int l;
	struct omap_hwmod *oh;
	struct platform_device *pdev;
	char oh_name[MAX_OMAP_I2C_HWMOD_NAME_LEN];
	struct omap_i2c_bus_platform_data *pdata;
	struct omap_i2c_dev_attr *dev_attr;

	omap2_i2c_mux_pins(bus_id);

	l = snprintf(oh_name, MAX_OMAP_I2C_HWMOD_NAME_LEN, "i2c%d", bus_id);
	WARN(l >= MAX_OMAP_I2C_HWMOD_NAME_LEN,
		"String buffer overflow in I2C%d device setup\n", bus_id);
	oh = omap_hwmod_lookup(oh_name);
	if (!oh) {
			pr_err("Could not look up %s\n", oh_name);
			return -EEXIST;
	}

	pdata = &i2c_pdata[bus_id - 1];
	/*
	 * pass the hwmod class's CPU-specific knowledge of I2C IP revision in
	 * use, and functionality implementation flags, up to the OMAP I2C
	 * driver via platform data
	 */
	pdata->rev = oh->class->rev;

	dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
	pdata->flags = dev_attr->flags;

	pdev = omap_device_build(name, bus_id, oh, pdata,
			sizeof(struct omap_i2c_bus_platform_data),
			NULL, 0, 0);
	WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name);

	return PTR_RET(pdev);
}
Ejemplo n.º 26
0
/**
 * omap2_init_pmu - creates and registers PMU platform device
 * @oh_num:	Number of OMAP HWMODs required to create PMU device
 * @oh_names:	Array of OMAP HWMODS names required to create PMU device
 *
 * Uses OMAP HWMOD framework to create and register an ARM PMU device
 * from a list of HWMOD names passed. Currently supports OMAP2, OMAP3
 * and OMAP4 devices.
 */
static int __init omap2_init_pmu(unsigned oh_num, char *oh_names[])
{
	int i;
	struct omap_hwmod *oh[3];
	char *dev_name = "arm-pmu";

	if ((!oh_num) || (oh_num > 3))
		return -EINVAL;

	for (i = 0; i < oh_num; i++) {
		oh[i] = omap_hwmod_lookup(oh_names[i]);
		if (!oh[i]) {
			pr_err("Could not look up %s hwmod\n", oh_names[i]);
			return -ENODEV;
		}
	}

	omap_pmu_dev = omap_device_build_ss(dev_name, -1, oh, oh_num, NULL, 0);
	WARN(IS_ERR(omap_pmu_dev), "Can't build omap_device for %s.\n",
	     dev_name);

	return PTR_RET(omap_pmu_dev);
}
Ejemplo n.º 27
0
static int da9052_rtc_probe(struct platform_device *pdev)
{
	struct da9052_rtc *rtc;
	int ret;

	rtc = devm_kzalloc(&pdev->dev, sizeof(struct da9052_rtc), GFP_KERNEL);
	if (!rtc)
		return -ENOMEM;

	rtc->da9052 = dev_get_drvdata(pdev->dev.parent);
	platform_set_drvdata(pdev, rtc);
	rtc->irq =  DA9052_IRQ_ALARM;
	ret = da9052_request_irq(rtc->da9052, rtc->irq, "ALM",
				da9052_rtc_irq, rtc);
	if (ret != 0) {
		rtc_err(rtc->da9052, "irq registration failed: %d\n", ret);
		return ret;
	}

	rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
				       &da9052_rtc_ops, THIS_MODULE);
	return PTR_RET(rtc->rtc);
}
Ejemplo n.º 28
0
static int __init omap_gpmc_init(void)
{
	struct omap_hwmod *oh;
	struct platform_device *pdev;
	char *oh_name = "gpmc";

	/*
	 * if the board boots up with a populated DT, do not
	 * manually add the device from this initcall
	 */
	if (of_have_populated_dt())
		return -ENODEV;

	oh = omap_hwmod_lookup(oh_name);
	if (!oh) {
		pr_err("Could not look up %s\n", oh_name);
		return -ENODEV;
	}

	pdev = omap_device_build("omap-gpmc", -1, oh, NULL, 0);
	WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);

	return PTR_RET(pdev);
}
Ejemplo n.º 29
0
	/* avoid HT sibilings if possible */
	if (cpumask_empty(tmp))
		cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
	if (cpumask_empty(tmp)) {
		mutex_unlock(&round_robin_lock);
		return;
	}
	for_each_cpu(cpu, tmp) {
		if (cpu_weight[cpu] < min_weight) {
			min_weight = cpu_weight[cpu];
			preferred_cpu = cpu;
		}
	}

	if (tsk_in_cpu[tsk_index] != -1)
		cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = preferred_cpu;
	cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
	cpu_weight[preferred_cpu]++;
	mutex_unlock(&round_robin_lock);

	set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}

static void exit_round_robin(unsigned int tsk_index)
{
	struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
	cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
	tsk_in_cpu[tsk_index] = -1;
}

static unsigned int idle_pct = 5; /* percentage */
static unsigned int round_robin_time = 1; /* second */
static int power_saving_thread(void *data)
{
	struct sched_param param = {.sched_priority = 1};
	int do_sleep;
	unsigned int tsk_index = (unsigned long)data;
	u64 last_jiffies = 0;

	sched_setscheduler(current, SCHED_RR, &param);
	set_freezable();

	while (!kthread_should_stop()) {
		int cpu;
		u64 expire_time;

		try_to_freeze();

		/* round robin to cpus */
		if (last_jiffies + round_robin_time * HZ < jiffies) {
			last_jiffies = jiffies;
			round_robin_cpu(tsk_index);
		}

		do_sleep = 0;

		expire_time = jiffies + HZ * (100 - idle_pct) / 100;

		while (!need_resched()) {
			if (tsc_detected_unstable && !tsc_marked_unstable) {
				/* TSC could halt in idle, so notify users */
				mark_tsc_unstable("TSC halts in idle");
				tsc_marked_unstable = 1;
			}
			if (lapic_detected_unstable && !lapic_marked_unstable) {
				int i;
				/* LAPIC could halt in idle, so notify users */
				for_each_online_cpu(i)
					clockevents_notify(
						CLOCK_EVT_NOTIFY_BROADCAST_ON,
						&i);
				lapic_marked_unstable = 1;
			}
			local_irq_disable();
			cpu = smp_processor_id();
			if (lapic_marked_unstable)
				clockevents_notify(
					CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
			stop_critical_timings();

			__monitor((void *)&current_thread_info()->flags, 0, 0);
			smp_mb();
			if (!need_resched())
				__mwait(power_saving_mwait_eax, 1);

			start_critical_timings();
			if (lapic_marked_unstable)
				clockevents_notify(
					CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
			local_irq_enable();

			if (jiffies > expire_time) {
				do_sleep = 1;
				break;
			}
		}

		/*
		 * current sched_rt has threshold for rt task running time.
		 * When a rt task uses 95% CPU time, the rt thread will be
		 * scheduled out for 5% CPU time to not starve other tasks. But
		 * the mechanism only works when all CPUs have RT task running,
		 * as if one CPU hasn't RT task, RT task from other CPUs will
		 * borrow CPU time from this CPU and cause RT task use > 95%
		 * CPU time. To make 'avoid starvation' work, takes a nap here.
		 */
		if (do_sleep)
			schedule_timeout_killable(HZ * idle_pct / 100);
	}

	exit_round_robin(tsk_index);
	return 0;
}

static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
	int rc = -ENOMEM;

	ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
		(void *)(unsigned long)ps_tsk_num,
		"acpi_pad/%d", ps_tsk_num);
	rc = PTR_RET(ps_tsks[ps_tsk_num]);
	if (!rc)
		ps_tsk_num++;
	else
		ps_tsks[ps_tsk_num] = NULL;

	return rc;
}
Ejemplo n.º 30
0
static int __net_init frame_filter_net_init(struct net *net) {
  net->xt.frame_filter = ebt_register_table(net, &frame_filter);
  return PTR_RET(net->xt.frame_filter);
}