Beispiel #1
0
/*
 * Platform driver:
 */
static int etnaviv_bind(struct device *dev)
{
	struct etnaviv_drm_private *priv;
	struct drm_device *drm;
	int ret;

	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
	if (!drm)
		return -ENOMEM;

	drm->platformdev = to_platform_device(dev);

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(dev, "failed to allocate private data\n");
		ret = -ENOMEM;
		goto out_unref;
	}
	drm->dev_private = priv;

	priv->wq = alloc_ordered_workqueue("etnaviv", 0);
	if (!priv->wq) {
		ret = -ENOMEM;
		goto out_wq;
	}

	mutex_init(&priv->gem_lock);
	INIT_LIST_HEAD(&priv->gem_list);
	priv->num_gpus = 0;

	dev_set_drvdata(dev, drm);

	ret = component_bind_all(dev, drm);
	if (ret < 0)
		goto out_bind;

	load_gpu(drm);

	ret = drm_dev_register(drm, 0);
	if (ret)
		goto out_register;

	return 0;

out_register:
	component_unbind_all(dev, drm);
out_bind:
	flush_workqueue(priv->wq);
	destroy_workqueue(priv->wq);
out_wq:
	kfree(priv);
out_unref:
	drm_dev_unref(drm);

	return ret;
}
Beispiel #2
0
int
linux_workqueue_init(void)
{

	system_wq = alloc_ordered_workqueue("lnxsyswq", 0);
	if (system_wq == NULL)
		return ENOMEM;

	return 0;
}
Beispiel #3
0
static int __devinit cx18_create_in_workq(struct cx18 *cx)
{
	snprintf(cx->in_workq_name, sizeof(cx->in_workq_name), "%s-in",
		 cx->v4l2_dev.name);
	cx->in_work_queue = alloc_ordered_workqueue(cx->in_workq_name, 0);
	if (cx->in_work_queue == NULL) {
		CX18_ERR("Unable to create incoming mailbox handler thread\n");
		return -ENOMEM;
	}
	return 0;
}
Beispiel #4
0
static int __init xusb_init(void)
{

	xusb_wq = alloc_ordered_workqueue("xusb", 0);

	if (xusb_wq == NULL) {
		return -ENOMEM;
	}

	return 0;
}
void MTKPP_Init(void)
{
	int i;
	struct {
		MTKPP_ID uid;
		MTKPP_BUFFERTYPE type;
		int data_size;
		int line_size;
	} MTKPP_TABLE[] =
	{
		{MTKPP_ID_FW,       MTKPP_BUFFERTYPE_QUEUEBUFFER,  248 * 1024,  1 * 1024}, /* 256 KB */
		{MTKPP_ID_SYNC,     MTKPP_BUFFERTYPE_RINGBUFFER,    56 * 1024,  1 * 1024}, /*  64 KB */
	};

	for (i = 0; i < MTKPP_ID_SIZE; ++i)
	{
		if (i != MTKPP_TABLE[i].uid)
		{
			_MTKPP_DEBUG_LOG("%s: index(%d) != tabel_uid(%d)", __func__, i, MTKPP_TABLE[i].uid);
			goto err_out;
		}

		g_MTKPPdata[i] = MTKPP_AllocStruct(MTKPP_TABLE[i].type);

		if (g_MTKPPdata[i] == NULL)
		{
			_MTKPP_DEBUG_LOG("%s: alloc struct fail: flags = %d", __func__, MTKPP_TABLE[i].type);
			goto err_out;
		}

		if (MTKPP_TABLE[i].data_size > 0)
		{
			MTKPP_AllocData(g_MTKPPdata[i], MTKPP_TABLE[i].data_size, MTKPP_TABLE[i].line_size);
			MTKPP_CleanData(g_MTKPPdata[i]);
		}
	}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
	g_MTKPP_proc = proc_create("gpulog", 0, NULL, &g_MTKPP_proc_ops);
#else
	g_MTKPP_proc = create_proc_entry("gpulog", 0, NULL);
	g_MTKPP_proc->proc_fops = &g_MTKPP_proc_ops;
#endif

#if defined(ENABLE_AEE_WHEN_LOCKUP)
	g_MTKPP_workqueue.psWorkQueue = alloc_ordered_workqueue("mwp", WQ_FREEZABLE | WQ_MEM_RECLAIM);
	INIT_WORK(&g_MTKPP_worker.sWork, MTKPP_WORKR_Handle);
#endif

	g_init_done = 1;

err_out:
	return;
}
Beispiel #6
0
/* Create a swap chain work queue */
OMAPLFB_ERROR OMAPLFBCreateSwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
	/*
	 * Calling alloc_ordered_workqueue with the WQ_FREEZABLE and
	 * WQ_MEM_RECLAIM flags set, (currently) has the same effect as
	 * calling create_freezable_workqueue. None of the other WQ
	 * flags are valid. Setting WQ_MEM_RECLAIM should allow the
	 * workqueue to continue to service the swap chain in low memory
	 * conditions, preventing the driver from holding on to
	 * resources longer than it needs to.
	 */
#if (LINUX_VERSION_CODE == KERNEL_VERSION(2,6,37))
	psSwapChain->psWorkQueue = alloc_ordered_workqueue(DEVNAME, WQ_FREEZEABLE | WQ_MEM_RECLAIM);
#else
	psSwapChain->psWorkQueue = alloc_ordered_workqueue(DEVNAME, WQ_FREEZABLE | WQ_MEM_RECLAIM);
#endif

#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
	psSwapChain->psWorkQueue = create_freezable_workqueue(DEVNAME);
#else
	/*
	 * Create a single-threaded, freezable, rt-prio workqueue.
	 * Such workqueues are frozen with user threads when a system
	 * suspends, before driver suspend entry points are called.
	 * This ensures this driver will not call into the Linux
	 * framebuffer driver after the latter is suspended.
	 */
	psSwapChain->psWorkQueue = __create_workqueue(DEVNAME, 1, 1, 1);
#endif
#endif
	if (psSwapChain->psWorkQueue == NULL)
	{
		printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: Couldn't create workqueue\n", __FUNCTION__, psSwapChain->uiFBDevID);

		return (OMAPLFB_ERROR_INIT_FAILURE);
	}

	return (OMAPLFB_OK);
}
int __init pm_autosleep_init(void)
{
	autosleep_ws = wakeup_source_register("autosleep");
	if (!autosleep_ws)
		return -ENOMEM;

	autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
	if (autosleep_wq)
		return 0;

	wakeup_source_unregister(autosleep_ws);
	return -ENOMEM;
}
/**
 * load - setup chip and create an initial config
 * @dev: DRM device
 * @flags: startup flags
 *
 * The driver load routine has to do several things:
 *   - initialize the memory manager
 *   - allocate initial config memory
 *   - setup the DRM framebuffer with the allocated memory
 */
static int dev_load(struct drm_device *dev, unsigned long flags)
{
	struct omap_drm_platform_data *pdata = dev->dev->platform_data;
	struct omap_drm_private *priv;
	int ret;

	DBG("load: dev=%p", dev);

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	priv->omaprev = pdata->omaprev;

	dev->dev_private = priv;

	priv->wq = alloc_ordered_workqueue("omapdrm", 0);

	spin_lock_init(&priv->list_lock);
	INIT_LIST_HEAD(&priv->obj_list);

	omap_gem_init(dev);

	ret = omap_modeset_init(dev);
	if (ret) {
		dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
		dev->dev_private = NULL;
		kfree(priv);
		return ret;
	}

	ret = drm_vblank_init(dev, priv->num_crtcs);
	if (ret)
		dev_warn(dev->dev, "could not init vblank\n");

	priv->fbdev = omap_fbdev_init(dev);
	if (!priv->fbdev) {
		dev_warn(dev->dev, "omap_fbdev_init failed\n");
		/* well, limp along without an fbdev.. maybe X11 will work? */
	}

	/* store off drm_device for use in pm ops */
	dev_set_drvdata(dev->dev, dev);

	drm_kms_helper_poll_init(dev);

	return 0;
}
Beispiel #9
0
/**
 * tb_domain_alloc() - Allocate a domain
 * @nhi: Pointer to the host controller
 * @privsize: Size of the connection manager private data
 *
 * Allocates and initializes a new Thunderbolt domain. Connection
 * managers are expected to call this and then fill in @cm_ops
 * accordingly.
 *
 * Call tb_domain_put() to release the domain before it has been added
 * to the system.
 *
 * Return: allocated domain structure on %NULL in case of error
 */
struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
{
	struct tb *tb;

	/*
	 * Make sure the structure sizes map with that the hardware
	 * expects because bit-fields are being used.
	 */
	BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
	BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
	BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);

	tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
	if (!tb)
		return NULL;

	tb->nhi = nhi;
	mutex_init(&tb->lock);

	tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
	if (tb->index < 0)
		goto err_free;

	tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
	if (!tb->wq)
		goto err_remove_ida;

	tb->dev.parent = &nhi->pdev->dev;
	tb->dev.bus = &tb_bus_type;
	tb->dev.type = &tb_domain_type;
	tb->dev.groups = domain_attr_groups;
	dev_set_name(&tb->dev, "domain%d", tb->index);
	device_initialize(&tb->dev);

	return tb;

err_remove_ida:
	ida_simple_remove(&tb_domain_ida, tb->index);
err_free:
	kfree(tb);

	return NULL;
}
int __init pm_autosleep_init(void)
{
	int ret;

	ret = sysfs_create_group(power_kobj, &attr_group);
	if (ret) {
		pr_err("pm_autosleep_init: sysfs_create_group failed\n");
	}

	autosleep_ws = wakeup_source_register("autosleep");
	if (!autosleep_ws)
		return -ENOMEM;

	autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
	if (autosleep_wq)
		return 0;

	wakeup_source_unregister(autosleep_ws);
	return -ENOMEM;
}
void proc_mali_register(void)
{
    mali_pentry = proc_mkdir("mali", NULL);

    if (!mali_pentry)
        return;

    g_aee_workqueue = alloc_ordered_workqueue("mali_aeewp", WQ_FREEZABLE | WQ_MEM_RECLAIM);
    INIT_WORK(&g_aee_work, aee_Handle);

    proc_create("help", 0, mali_pentry, &kbasep_gpu_help_debugfs_fops);
    proc_create("memory_usage", 0, mali_pentry, &kbasep_gpu_memory_usage_debugfs_open);
    proc_create("utilization", 0, mali_pentry, &kbasep_gpu_utilization_debugfs_fops);
    proc_create("frequency", 0, mali_pentry, &kbasep_gpu_frequency_debugfs_fops);
    proc_create("dvfs_enable", S_IRUGO | S_IWUSR, mali_pentry, &kbasep_gpu_dvfs_enable_debugfs_fops);
    //    proc_create("input_boost", S_IRUGO | S_IWUSR, mali_pentry, &kbasep_gpu_input_boost_debugfs_fops);
    //    proc_create("dvfs_freq", S_IRUGO | S_IWUSR, mali_pentry, &kbasep_gpu_dvfs_freq_debugfs_fops);
    //    proc_create("dvfs_threshold", S_IRUGO | S_IWUSR, mali_pentry, &kbasep_gpu_dvfs_threshold_debugfs_fops);
    //    proc_create("dvfs_deferred_count", S_IRUGO | S_IWUSR, mali_pentry, &kbasep_gpu_dvfs_deferred_count_debugfs_fops);
}
/* Create a swap chain work queue */
static DC_SUNXI_ERROR DC_SUNXICreateSwapQueue(DC_SUNXI_SWAPCHAIN *psSwapChain)
{
	/*
	 * Calling alloc_ordered_workqueue with the WQ_FREEZABLE and
	 * WQ_MEM_RECLAIM flags set, (currently) has the same effect as
	 * calling create_freezable_workqueue. None of the other WQ
	 * flags are valid. Setting WQ_MEM_RECLAIM should allow the
	 * workqueue to continue to service the swap chain in low memory
	 * conditions, preventing the driver from holding on to
	 * resources longer than it needs to.
	 */
	psSwapChain->psWorkQueue = alloc_ordered_workqueue(DEVNAME, WQ_FREEZABLE | WQ_MEM_RECLAIM);
	if (psSwapChain->psWorkQueue == NULL)
	{
		printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: Couldn't create workqueue\n", __FUNCTION__, psSwapChain->uiFBDevID);
		
		return (DC_SUNXI_ERROR_INIT_FAILURE);
	}

	return (DC_SUNXI_OK);
}
Beispiel #13
0
static int __init nfcsim_init(void)
{
	int rc;

	/* We need an ordered wq to ensure that poll_work items are executed
	 * one at a time.
	 */
	wq = alloc_ordered_workqueue("nfcsim", 0);
	if (!wq) {
		rc = -ENOMEM;
		goto exit;
	}

	dev0 = nfcsim_init_dev();
	if (IS_ERR(dev0)) {
		rc = PTR_ERR(dev0);
		goto exit;
	}

	dev1 = nfcsim_init_dev();
	if (IS_ERR(dev1)) {
		kfree(dev0);

		rc = PTR_ERR(dev1);
		goto exit;
	}

	dev0->peer_dev = dev1;
	dev1->peer_dev = dev0;

	pr_debug("NFCsim " NFCSIM_VERSION " initialized\n");

	rc = 0;
exit:
	if (rc)
		pr_err("Failed to initialize nfcsim driver (%d)\n",
		       rc);

	return rc;
}
Beispiel #14
0
OMAPLFB_ERROR OMAPLFBCreateSwapQueue(OMAPLFB_SWAPCHAIN *psSwapChain)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
	
	psSwapChain->psWorkQueue = alloc_ordered_workqueue(DEVNAME, WQ_FREEZABLE | WQ_MEM_RECLAIM);
#else
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
	psSwapChain->psWorkQueue = create_freezable_workqueue(DEVNAME);
#else
	
	psSwapChain->psWorkQueue = __create_workqueue(DEVNAME, 1, 1, 1);
#endif
#endif
	if (psSwapChain->psWorkQueue == NULL)
	{
		printk(KERN_ERR DRIVER_PREFIX ": %s: Device %u: Couldn't create workqueue\n", __FUNCTION__, psSwapChain->uiFBDevID);

		return (OMAPLFB_ERROR_INIT_FAILURE);
	}

	return (OMAPLFB_OK);
}
static int __init modem_init(void)
{
	static struct work_struct modem_init_work;
	struct proc_dir_entry *entry;

    /*added by zhuzheng for v8r2*/
    if (1 == check_acore_only())
        return 0;

    if (mmc_read_done()){
        printk(KERN_ERR "%s load modem mmc_read_done failed\n",__func__);
        return -1;
    }

    modem_init_workqueue = alloc_ordered_workqueue("kmodeminitqd", 0);

	if (!modem_init_workqueue) {
		printk(KERN_ERR "%s start modem init  qd failed\n",__func__);
		return -1;
	}

	entry = proc_create("modem_depend", 0660, NULL, &modem_work_depend_file_ops);
	if (!entry) {
		printk(KERN_ERR "modem_depend: failed to create proc entry\n");
		goto destroy_workqueue;
	}

	INIT_WORK(&modem_init_work, modem_init_work_func);

	queue_work(modem_init_workqueue, &modem_init_work);

	goto out;

destroy_workqueue:
	destroy_workqueue(modem_init_workqueue);
out:

	return 0;
}
//[---]Debug for active wakelock before entering suspend
int __init pm_autosleep_init(void)
{
	//[+++]Debug for active wakelock before entering suspend
    int ret;
    pmsp_dev.name = "PowerManagerServicePrinter";
    pmsp_dev.index = 0;
    INIT_WORK(&pms_printer, pms_printer_func);
    ret = switch_dev_register(&pmsp_dev);
    if (ret < 0)
        printk("%s:fail to register switch power_manager_printer \n",__func__);
    else
        printk("%s:success to register pmsp switch \n",__func__);
	//[---]Debug for active wakelock before entering suspend
	autosleep_ws = wakeup_source_register("autosleep");
	if (!autosleep_ws)
		return -ENOMEM;

	autosleep_wq = alloc_ordered_workqueue("autosleep", 0);
	if (autosleep_wq)
		return 0;

	wakeup_source_unregister(autosleep_ws);
	return -ENOMEM;
}
/*
 * driver allocation handlers.
 */
int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
{
	int retval = -ENOMEM;

	/*
	 * Allocate the driver data memory, if necessary.
	 */
	if (rt2x00dev->ops->drv_data_size > 0) {
		rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size,
			                      GFP_KERNEL);
		if (!rt2x00dev->drv_data) {
			retval = -ENOMEM;
			goto exit;
		}
	}

	spin_lock_init(&rt2x00dev->irqmask_lock);
	mutex_init(&rt2x00dev->csr_mutex);

	set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);

	/*
	 * Make room for rt2x00_intf inside the per-interface
	 * structure ieee80211_vif.
	 */
	rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);

	/*
	 * Determine which operating modes are supported, all modes
	 * which require beaconing, depend on the availability of
	 * beacon entries.
	 */
	rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
	if (rt2x00dev->ops->bcn->entry_num > 0)
		rt2x00dev->hw->wiphy->interface_modes |=
		    BIT(NL80211_IFTYPE_ADHOC) |
		    BIT(NL80211_IFTYPE_AP) |
		    BIT(NL80211_IFTYPE_MESH_POINT) |
		    BIT(NL80211_IFTYPE_WDS);

	/*
	 * Initialize work.
	 */
	rt2x00dev->workqueue =
	    alloc_ordered_workqueue(wiphy_name(rt2x00dev->hw->wiphy), 0);
	if (!rt2x00dev->workqueue) {
		retval = -ENOMEM;
		goto exit;
	}

	INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
	INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup);
	INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep);

	/*
	 * Let the driver probe the device to detect the capabilities.
	 */
	retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev);
	if (retval) {
		ERROR(rt2x00dev, "Failed to allocate device.\n");
		goto exit;
	}

	/*
	 * Allocate queue array.
	 */
	retval = rt2x00queue_allocate(rt2x00dev);
	if (retval)
		goto exit;

	/*
	 * Initialize ieee80211 structure.
	 */
	retval = rt2x00lib_probe_hw(rt2x00dev);
	if (retval) {
		ERROR(rt2x00dev, "Failed to initialize hw.\n");
		goto exit;
	}

	/*
	 * Register extra components.
	 */
	rt2x00link_register(rt2x00dev);
	rt2x00leds_register(rt2x00dev);
	rt2x00debug_register(rt2x00dev);

	return 0;

exit:
	rt2x00lib_remove_dev(rt2x00dev);

	return retval;
}
Beispiel #18
0
static int atmel_hlcdc_dc_load(struct drm_device *dev)
{
	struct platform_device *pdev = to_platform_device(dev->dev);
	const struct of_device_id *match;
	struct atmel_hlcdc_dc *dc;
	int ret;

	match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node);
	if (!match) {
		dev_err(&pdev->dev, "invalid compatible string\n");
		return -ENODEV;
	}

	if (!match->data) {
		dev_err(&pdev->dev, "invalid hlcdc description\n");
		return -EINVAL;
	}

	dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL);
	if (!dc)
		return -ENOMEM;

	dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0);
	if (!dc->wq)
		return -ENOMEM;

	init_waitqueue_head(&dc->commit.wait);
	dc->desc = match->data;
	dc->hlcdc = dev_get_drvdata(dev->dev->parent);
	dev->dev_private = dc;

	ret = clk_prepare_enable(dc->hlcdc->periph_clk);
	if (ret) {
		dev_err(dev->dev, "failed to enable periph_clk\n");
		goto err_destroy_wq;
	}

	pm_runtime_enable(dev->dev);

	ret = drm_vblank_init(dev, 1);
	if (ret < 0) {
		dev_err(dev->dev, "failed to initialize vblank\n");
		goto err_periph_clk_disable;
	}

	ret = atmel_hlcdc_dc_modeset_init(dev);
	if (ret < 0) {
		dev_err(dev->dev, "failed to initialize mode setting\n");
		goto err_periph_clk_disable;
	}

	drm_mode_config_reset(dev);

	pm_runtime_get_sync(dev->dev);
	ret = drm_irq_install(dev, dc->hlcdc->irq);
	pm_runtime_put_sync(dev->dev);
	if (ret < 0) {
		dev_err(dev->dev, "failed to install IRQ handler\n");
		goto err_periph_clk_disable;
	}

	platform_set_drvdata(pdev, dev);

	drm_kms_helper_poll_init(dev);

	/* force connectors detection */
	drm_helper_hpd_irq_event(dev);

	return 0;

err_periph_clk_disable:
	pm_runtime_disable(dev->dev);
	clk_disable_unprepare(dc->hlcdc->periph_clk);

err_destroy_wq:
	destroy_workqueue(dc->wq);

	return ret;
}
Beispiel #19
0
int whc_init(struct whc *whc)
{
	u32 whcsparams;
	int ret, i;
	resource_size_t start, len;

	spin_lock_init(&whc->lock);
	mutex_init(&whc->mutex);
	init_waitqueue_head(&whc->cmd_wq);
	init_waitqueue_head(&whc->async_list_wq);
	init_waitqueue_head(&whc->periodic_list_wq);
	whc->workqueue = alloc_ordered_workqueue(dev_name(&whc->umc->dev), 0);
	if (whc->workqueue == NULL) {
		ret = -ENOMEM;
		goto error;
	}
	INIT_WORK(&whc->dn_work, whc_dn_work);

	INIT_WORK(&whc->async_work, scan_async_work);
	INIT_LIST_HEAD(&whc->async_list);
	INIT_LIST_HEAD(&whc->async_removed_list);

	INIT_WORK(&whc->periodic_work, scan_periodic_work);
	for (i = 0; i < 5; i++)
		INIT_LIST_HEAD(&whc->periodic_list[i]);
	INIT_LIST_HEAD(&whc->periodic_removed_list);

	/* Map HC registers. */
	start = whc->umc->resource.start;
	len   = whc->umc->resource.end - start + 1;
	if (!request_mem_region(start, len, "whci-hc")) {
		dev_err(&whc->umc->dev, "can't request HC region\n");
		ret = -EBUSY;
		goto error;
	}
	whc->base_phys = start;
	whc->base = ioremap(start, len);
	if (!whc->base) {
		dev_err(&whc->umc->dev, "ioremap\n");
		ret = -ENOMEM;
		goto error;
	}

	whc_hw_reset(whc);

	/* Read maximum number of devices, keys and MMC IEs. */
	whcsparams = le_readl(whc->base + WHCSPARAMS);
	whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
	whc->n_keys    = WHCSPARAMS_TO_N_KEYS(whcsparams);
	whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);

	dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
		whc->n_devices, whc->n_keys, whc->n_mmc_ies);

	whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
					 sizeof(struct whc_qset), 64, 0);
	if (whc->qset_pool == NULL) {
		ret = -ENOMEM;
		goto error;
	}

	ret = asl_init(whc);
	if (ret < 0)
		goto error;
	ret = pzl_init(whc);
	if (ret < 0)
		goto error;

	/* Allocate and initialize a buffer for generic commands, the
	   Device Information buffer, and the Device Notification
	   buffer. */

	whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
					      &whc->gen_cmd_buf_dma, GFP_KERNEL);
	if (whc->gen_cmd_buf == NULL) {
		ret = -ENOMEM;
		goto error;
	}

	whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
					 sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
					 &whc->dn_buf_dma, GFP_KERNEL);
	if (!whc->dn_buf) {
		ret = -ENOMEM;
		goto error;
	}
	whc_hw_init_dn_buf(whc);

	whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
					 sizeof(struct di_buf_entry) * whc->n_devices,
					 &whc->di_buf_dma, GFP_KERNEL);
	if (!whc->di_buf) {
		ret = -ENOMEM;
		goto error;
	}
	whc_hw_init_di_buf(whc);

	return 0;

error:
	whc_clean_up(whc);
	return ret;
}
Beispiel #20
0
int qtnf_core_attach(struct qtnf_bus *bus)
{
	unsigned int i;
	int ret;

	qtnf_trans_init(bus);

	bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
	qtnf_bus_data_rx_start(bus);

	bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0);
	if (!bus->workqueue) {
		pr_err("failed to alloc main workqueue\n");
		ret = -ENOMEM;
		goto error;
	}

	INIT_WORK(&bus->event_work, qtnf_event_work_handler);

	ret = qtnf_cmd_send_init_fw(bus);
	if (ret) {
		pr_err("failed to init FW: %d\n", ret);
		goto error;
	}

	bus->fw_state = QTNF_FW_STATE_ACTIVE;

	ret = qtnf_cmd_get_hw_info(bus);
	if (ret) {
		pr_err("failed to get HW info: %d\n", ret);
		goto error;
	}

	if (bus->hw_info.ql_proto_ver != QLINK_PROTO_VER) {
		pr_err("qlink version mismatch %u != %u\n",
		       QLINK_PROTO_VER, bus->hw_info.ql_proto_ver);
		ret = -EPROTONOSUPPORT;
		goto error;
	}

	if (bus->hw_info.num_mac > QTNF_MAX_MAC) {
		pr_err("no support for number of MACs=%u\n",
		       bus->hw_info.num_mac);
		ret = -ERANGE;
		goto error;
	}

	for (i = 0; i < bus->hw_info.num_mac; i++) {
		ret = qtnf_core_mac_attach(bus, i);

		if (ret) {
			pr_err("MAC%u: attach failed: %d\n", i, ret);
			goto error;
		}
	}

	return 0;

error:
	qtnf_core_detach(bus);

	return ret;
}
Beispiel #21
0
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
	struct qca_data *qca;

	BT_DBG("hu %p qca_open", hu);

	qca = kzalloc(sizeof(struct qca_data), GFP_ATOMIC);
	if (!qca)
		return -ENOMEM;

	skb_queue_head_init(&qca->txq);
	skb_queue_head_init(&qca->tx_wait_q);
	spin_lock_init(&qca->hci_ibs_lock);
	qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
	if (!qca->workqueue) {
		BT_ERR("QCA Workqueue not initialized properly");
		kfree(qca);
		return -ENOMEM;
	}

	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);

	qca->hu = hu;

	/* Assume we start with both sides asleep -- extra wakes OK */
	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;

	/* clocks actually on, but we start votes off */
	qca->tx_vote = false;
	qca->rx_vote = false;
	qca->flags = 0;

	qca->ibs_sent_wacks = 0;
	qca->ibs_sent_slps = 0;
	qca->ibs_sent_wakes = 0;
	qca->ibs_recv_wacks = 0;
	qca->ibs_recv_slps = 0;
	qca->ibs_recv_wakes = 0;
	qca->vote_last_jif = jiffies;
	qca->vote_on_ms = 0;
	qca->vote_off_ms = 0;
	qca->votes_on = 0;
	qca->votes_off = 0;
	qca->tx_votes_on = 0;
	qca->tx_votes_off = 0;
	qca->rx_votes_on = 0;
	qca->rx_votes_off = 0;

	hu->priv = qca;

	init_timer(&qca->wake_retrans_timer);
	qca->wake_retrans_timer.function = hci_ibs_wake_retrans_timeout;
	qca->wake_retrans_timer.data = (u_long)hu;
	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;

	init_timer(&qca->tx_idle_timer);
	qca->tx_idle_timer.function = hci_ibs_tx_idle_timeout;
	qca->tx_idle_timer.data = (u_long)hu;
	qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;

	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
	       qca->tx_idle_delay, qca->wake_retrans);

	return 0;
}
Beispiel #22
0
int msm_edp_ctrl_init(struct msm_edp *edp)
{
	struct edp_ctrl *ctrl = NULL;
	struct device *dev = &edp->pdev->dev;
	int ret;

	if (!edp) {
		pr_err("%s: edp is NULL!\n", __func__);
		return -EINVAL;
	}

	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return -ENOMEM;

	edp->ctrl = ctrl;
	ctrl->pdev = edp->pdev;

	ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
	if (IS_ERR(ctrl->base))
		return PTR_ERR(ctrl->base);

	/* Get regulator, clock, gpio, pwm */
	ret = edp_regulator_init(ctrl);
	if (ret) {
		pr_err("%s:regulator init fail\n", __func__);
		return ret;
	}
	ret = edp_clk_init(ctrl);
	if (ret) {
		pr_err("%s:clk init fail\n", __func__);
		return ret;
	}
	ret = edp_gpio_config(ctrl);
	if (ret) {
		pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
		return ret;
	}

	/* Init aux and phy */
	ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
	if (!ctrl->aux || !ctrl->drm_aux) {
		pr_err("%s:failed to init aux\n", __func__);
		return -ENOMEM;
	}

	ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
	if (!ctrl->phy) {
		pr_err("%s:failed to init phy\n", __func__);
		ret = -ENOMEM;
		goto err_destory_aux;
	}

	spin_lock_init(&ctrl->irq_lock);
	mutex_init(&ctrl->dev_mutex);
	init_completion(&ctrl->idle_comp);

	/* setup workqueue */
	ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
	INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
	INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);

	return 0;

err_destory_aux:
	msm_edp_aux_destroy(dev, ctrl->aux);
	ctrl->aux = NULL;
	return ret;
}
Beispiel #23
0
int ieee80211_register_hw(struct ieee80211_hw *hw)
{
	struct ieee80211_local *local = hw_to_local(hw);
	int result, i;
	enum ieee80211_band band;
	int channels, max_bitrates;
	bool supp_ht;
	static const u32 cipher_suites[] = {
		/* keep WEP first, it may be removed below */
		WLAN_CIPHER_SUITE_WEP40,
		WLAN_CIPHER_SUITE_WEP104,
		WLAN_CIPHER_SUITE_TKIP,
		WLAN_CIPHER_SUITE_CCMP,

		/* keep last -- depends on hw flags! */
		WLAN_CIPHER_SUITE_AES_CMAC
	};

	if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
#ifdef CONFIG_PM
	    && (!local->ops->suspend || !local->ops->resume)
#endif
	    )
		return -EINVAL;

	if (hw->max_report_rates == 0)
		hw->max_report_rates = hw->max_rates;

	/*
	 * generic code guarantees at least one band,
	 * set this very early because much code assumes
	 * that hw.conf.channel is assigned
	 */
	channels = 0;
	max_bitrates = 0;
	supp_ht = false;
	for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
		struct ieee80211_supported_band *sband;

		sband = local->hw.wiphy->bands[band];
		if (!sband)
			continue;
		if (!local->oper_channel) {
			/* init channel we're on */
			local->hw.conf.channel =
			local->oper_channel = &sband->channels[0];
			local->hw.conf.channel_type = NL80211_CHAN_NO_HT;
		}
		channels += sband->n_channels;

		if (max_bitrates < sband->n_bitrates)
			max_bitrates = sband->n_bitrates;
		supp_ht = supp_ht || sband->ht_cap.ht_supported;
	}

	local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
				      sizeof(void *) * channels, GFP_KERNEL);
	if (!local->int_scan_req)
		return -ENOMEM;

	/* if low-level driver supports AP, we also support VLAN */
	if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
		hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
		hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
	}

	/* mac80211 always supports monitor */
	hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
	hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);

	/*
	 * mac80211 doesn't support more than 1 channel, and also not more
	 * than one IBSS interface
	 */
	for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
		const struct ieee80211_iface_combination *c;
		int j;

		c = &hw->wiphy->iface_combinations[i];

		if (c->num_different_channels > 1)
			return -EINVAL;

		for (j = 0; j < c->n_limits; j++)
			if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
			    c->limits[j].max > 1)
				return -EINVAL;
	}

#ifndef CONFIG_MAC80211_MESH
	/* mesh depends on Kconfig, but drivers should set it if they want */
	local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
#endif

	/* if the underlying driver supports mesh, mac80211 will (at least)
	 * provide routing of mesh authentication frames to userspace */
	if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
		local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH;

	/* mac80211 supports control port protocol changing */
	local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;

	if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
		local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
	else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
		local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;

	WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
	     && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
	     "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");

	/*
	 * Calculate scan IE length -- we need this to alloc
	 * memory and to subtract from the driver limit. It
	 * includes the DS Params, (extended) supported rates, and HT
	 * information -- SSID is the driver's responsibility.
	 */
	local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ +
		3 /* DS Params */;
	if (supp_ht)
		local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);

	if (!local->ops->hw_scan) {
		/* For hw_scan, driver needs to set these up. */
		local->hw.wiphy->max_scan_ssids = 4;
		local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
	}

	/*
	 * If the driver supports any scan IEs, then assume the
	 * limit includes the IEs mac80211 will add, otherwise
	 * leave it at zero and let the driver sort it out; we
	 * still pass our IEs to the driver but userspace will
	 * not be allowed to in that case.
	 */
	if (local->hw.wiphy->max_scan_ie_len)
		local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;

	/* Set up cipher suites unless driver already did */
	if (!local->hw.wiphy->cipher_suites) {
		local->hw.wiphy->cipher_suites = cipher_suites;
		local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
		if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
			local->hw.wiphy->n_cipher_suites--;
	}
	if (IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)) {
		if (local->hw.wiphy->cipher_suites == cipher_suites) {
			local->hw.wiphy->cipher_suites += 2;
			local->hw.wiphy->n_cipher_suites -= 2;
		} else {
			u32 *suites;
			int r, w = 0;

			/* Filter out WEP */

			suites = kmemdup(
				local->hw.wiphy->cipher_suites,
				sizeof(u32) * local->hw.wiphy->n_cipher_suites,
				GFP_KERNEL);
			if (!suites)
				return -ENOMEM;
			for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
				u32 suite = local->hw.wiphy->cipher_suites[r];
				if (suite == WLAN_CIPHER_SUITE_WEP40 ||
				    suite == WLAN_CIPHER_SUITE_WEP104)
					continue;
				suites[w++] = suite;
			}
			local->hw.wiphy->cipher_suites = suites;
			local->hw.wiphy->n_cipher_suites = w;
			local->wiphy_ciphers_allocated = true;
		}
	}

	if (!local->ops->remain_on_channel)
		local->hw.wiphy->max_remain_on_channel_duration = 5000;

	if (local->ops->sched_scan_start)
		local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;

	result = wiphy_register(local->hw.wiphy);
	if (result < 0)
		goto fail_wiphy_register;

	/*
	 * We use the number of queues for feature tests (QoS, HT) internally
	 * so restrict them appropriately.
	 */
	if (hw->queues > IEEE80211_MAX_QUEUES)
		hw->queues = IEEE80211_MAX_QUEUES;

	local->workqueue =
		alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
	if (!local->workqueue) {
		result = -ENOMEM;
		goto fail_workqueue;
	}

	/*
	 * The hardware needs headroom for sending the frame,
	 * and we need some headroom for passing the frame to monitor
	 * interfaces, but never both at the same time.
	 */
#ifndef __CHECKER__
	BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
			sizeof(struct ieee80211_tx_status_rtap_hdr));
#endif
	local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
				   sizeof(struct ieee80211_tx_status_rtap_hdr));

	debugfs_hw_add(local);

	/*
	 * if the driver doesn't specify a max listen interval we
	 * use 5 which should be a safe default
	 */
	if (local->hw.max_listen_interval == 0)
		local->hw.max_listen_interval = 5;

	local->hw.conf.listen_interval = local->hw.max_listen_interval;

	local->dynamic_ps_forced_timeout = -1;

	result = ieee80211_wep_init(local);
	if (result < 0)
		wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
			    result);

	rtnl_lock();

	result = ieee80211_init_rate_ctrl_alg(local,
					      hw->rate_control_algorithm);
	if (result < 0) {
		wiphy_debug(local->hw.wiphy,
			    "Failed to initialize rate control algorithm\n");
		goto fail_rate;
	}

	/* add one default STA interface if supported */
	if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
		result = ieee80211_if_add(local, "wlan%d", NULL,
					  NL80211_IFTYPE_STATION, NULL);
		if (result)
			wiphy_warn(local->hw.wiphy,
				   "Failed to add default virtual iface\n");
	}

	rtnl_unlock();

	ieee80211_led_init(local);

	local->network_latency_notifier.notifier_call =
		ieee80211_max_network_latency;
	result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
				     &local->network_latency_notifier);
	if (result) {
		rtnl_lock();
		goto fail_pm_qos;
	}

#ifdef CONFIG_INET
	local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
	result = register_inetaddr_notifier(&local->ifa_notifier);
	if (result)
		goto fail_ifa;
#endif

	netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
			local->hw.napi_weight);

	return 0;

#ifdef CONFIG_INET
 fail_ifa:
	pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
			       &local->network_latency_notifier);
	rtnl_lock();
#endif
 fail_pm_qos:
	ieee80211_led_exit(local);
	ieee80211_remove_interfaces(local);
 fail_rate:
	rtnl_unlock();
	ieee80211_wep_free(local);
	sta_info_stop(local);
	destroy_workqueue(local->workqueue);
 fail_workqueue:
	wiphy_unregister(local->hw.wiphy);
 fail_wiphy_register:
	if (local->wiphy_ciphers_allocated)
		kfree(local->hw.wiphy->cipher_suites);
	kfree(local->int_scan_req);
	return result;
}
Beispiel #24
0
static int pdev_probe(struct platform_device *pdev)
{
	const struct soc_device_attribute *soc;
	struct omap_drm_private *priv;
	struct drm_device *ddev;
	unsigned int i;
	int ret;

	DBG("%s", pdev->name);

	if (omapdss_is_initialized() == false)
		return -EPROBE_DEFER;

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		dev_err(&pdev->dev, "Failed to set the DMA mask\n");
		return ret;
	}

	omap_crtc_pre_init();

	ret = omap_connect_dssdevs();
	if (ret)
		goto err_crtc_uninit;

	/* Allocate and initialize the driver private structure. */
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		ret = -ENOMEM;
		goto err_disconnect_dssdevs;
	}

	priv->dispc_ops = dispc_get_ops();

	soc = soc_device_match(omapdrm_soc_devices);
	priv->omaprev = soc ? (unsigned int)soc->data : 0;
	priv->wq = alloc_ordered_workqueue("omapdrm", 0);

	spin_lock_init(&priv->list_lock);
	INIT_LIST_HEAD(&priv->obj_list);

	/* Allocate and initialize the DRM device. */
	ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev);
	if (IS_ERR(ddev)) {
		ret = PTR_ERR(ddev);
		goto err_free_priv;
	}

	ddev->dev_private = priv;
	platform_set_drvdata(pdev, ddev);

	/* Get memory bandwidth limits */
	if (priv->dispc_ops->get_memory_bandwidth_limit)
		priv->max_bandwidth =
				priv->dispc_ops->get_memory_bandwidth_limit();

	omap_gem_init(ddev);

	ret = omap_modeset_init(ddev);
	if (ret) {
		dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret);
		goto err_free_drm_dev;
	}

	/* Initialize vblank handling, start with all CRTCs disabled. */
	ret = drm_vblank_init(ddev, priv->num_crtcs);
	if (ret) {
		dev_err(&pdev->dev, "could not init vblank\n");
		goto err_cleanup_modeset;
	}

	for (i = 0; i < priv->num_crtcs; i++)
		drm_crtc_vblank_off(priv->crtcs[i]);

	priv->fbdev = omap_fbdev_init(ddev);

	drm_kms_helper_poll_init(ddev);
	omap_modeset_enable_external_hpd();

	/*
	 * Register the DRM device with the core and the connectors with
	 * sysfs.
	 */
	ret = drm_dev_register(ddev, 0);
	if (ret)
		goto err_cleanup_helpers;

	return 0;

err_cleanup_helpers:
	omap_modeset_disable_external_hpd();
	drm_kms_helper_poll_fini(ddev);
	if (priv->fbdev)
		omap_fbdev_free(ddev);
err_cleanup_modeset:
	drm_mode_config_cleanup(ddev);
	omap_drm_irq_uninstall(ddev);
err_free_drm_dev:
	omap_gem_deinit(ddev);
	drm_dev_unref(ddev);
err_free_priv:
	destroy_workqueue(priv->wq);
	kfree(priv);
err_disconnect_dssdevs:
	omap_disconnect_dssdevs();
err_crtc_uninit:
	omap_crtc_pre_uninit();
	return ret;
}
Beispiel #25
0
/* construct hdmi at bind/probe time, grab all the resources.  If
 * we are to EPROBE_DEFER we want to do it here, rather than later
 * at modeset_init() time
 */
static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
{
	struct hdmi_platform_config *config = pdev->dev.platform_data;
	struct hdmi *hdmi = NULL;
	struct resource *res;
	int i, ret;

	hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
	if (!hdmi) {
		ret = -ENOMEM;
		goto fail;
	}

	hdmi->pdev = pdev;
	hdmi->config = config;
	spin_lock_init(&hdmi->reg_lock);

	hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
	if (IS_ERR(hdmi->mmio)) {
		ret = PTR_ERR(hdmi->mmio);
		goto fail;
	}

	/* HDCP needs physical address of hdmi register */
	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
		config->mmio_name);
	hdmi->mmio_phy_addr = res->start;

	hdmi->qfprom_mmio = msm_ioremap(pdev,
		config->qfprom_mmio_name, "HDMI_QFPROM");
	if (IS_ERR(hdmi->qfprom_mmio)) {
		dev_info(&pdev->dev, "can't find qfprom resource\n");
		hdmi->qfprom_mmio = NULL;
	}

	hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
			config->hpd_reg_cnt, GFP_KERNEL);
	if (!hdmi->hpd_regs) {
		ret = -ENOMEM;
		goto fail;
	}
	for (i = 0; i < config->hpd_reg_cnt; i++) {
		struct regulator *reg;

		reg = devm_regulator_get(&pdev->dev,
				config->hpd_reg_names[i]);
		if (IS_ERR(reg)) {
			ret = PTR_ERR(reg);
			dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
					config->hpd_reg_names[i], ret);
			goto fail;
		}

		hdmi->hpd_regs[i] = reg;
	}

	hdmi->pwr_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_regs[0]) *
			config->pwr_reg_cnt, GFP_KERNEL);
	if (!hdmi->pwr_regs) {
		ret = -ENOMEM;
		goto fail;
	}
	for (i = 0; i < config->pwr_reg_cnt; i++) {
		struct regulator *reg;

		reg = devm_regulator_get(&pdev->dev,
				config->pwr_reg_names[i]);
		if (IS_ERR(reg)) {
			ret = PTR_ERR(reg);
			dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
					config->pwr_reg_names[i], ret);
			goto fail;
		}

		hdmi->pwr_regs[i] = reg;
	}

	hdmi->hpd_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_clks[0]) *
			config->hpd_clk_cnt, GFP_KERNEL);
	if (!hdmi->hpd_clks) {
		ret = -ENOMEM;
		goto fail;
	}
	for (i = 0; i < config->hpd_clk_cnt; i++) {
		struct clk *clk;

		clk = msm_clk_get(pdev, config->hpd_clk_names[i]);
		if (IS_ERR(clk)) {
			ret = PTR_ERR(clk);
			dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
					config->hpd_clk_names[i], ret);
			goto fail;
		}

		hdmi->hpd_clks[i] = clk;
	}

	hdmi->pwr_clks = devm_kzalloc(&pdev->dev, sizeof(hdmi->pwr_clks[0]) *
			config->pwr_clk_cnt, GFP_KERNEL);
	if (!hdmi->pwr_clks) {
		ret = -ENOMEM;
		goto fail;
	}
	for (i = 0; i < config->pwr_clk_cnt; i++) {
		struct clk *clk;

		clk = msm_clk_get(pdev, config->pwr_clk_names[i]);
		if (IS_ERR(clk)) {
			ret = PTR_ERR(clk);
			dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
					config->pwr_clk_names[i], ret);
			goto fail;
		}

		hdmi->pwr_clks[i] = clk;
	}

	pm_runtime_enable(&pdev->dev);

	hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);

	hdmi->i2c = msm_hdmi_i2c_init(hdmi);
	if (IS_ERR(hdmi->i2c)) {
		ret = PTR_ERR(hdmi->i2c);
		dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
		hdmi->i2c = NULL;
		goto fail;
	}

	ret = msm_hdmi_get_phy(hdmi);
	if (ret) {
		dev_err(&pdev->dev, "failed to get phy\n");
		goto fail;
	}

	hdmi->hdcp_ctrl = msm_hdmi_hdcp_init(hdmi);
	if (IS_ERR(hdmi->hdcp_ctrl)) {
		dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
		hdmi->hdcp_ctrl = NULL;
	}

	return hdmi;

fail:
	if (hdmi)
		msm_hdmi_destroy(hdmi);

	return ERR_PTR(ret);
}
Beispiel #26
0
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
	struct drm_i915_private *dev_priv;
	struct intel_device_info *info, *device_info;
	int ret = 0, mmio_bar, mmio_size;
	uint32_t aperture_size;

	info = (struct intel_device_info *) flags;

	/* Refuse to load on gen6+ without kms enabled. */
	if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET)) {
		DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
		DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
		return -ENODEV;
	}

	/* UMS needs agp support. */
	if (!drm_core_check_feature(dev, DRIVER_MODESET) && !dev->agp)
		return -EINVAL;

	dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
	if (dev_priv == NULL)
		return -ENOMEM;

	dev->dev_private = (void *)dev_priv;
	gpu_perf_dev_priv = (void *)dev_priv;
	dev_priv->dev = dev;

	/* Setup the write-once "constant" device info */
	device_info = (struct intel_device_info *)&dev_priv->info;
	memcpy(device_info, info, sizeof(dev_priv->info));
	device_info->device_id = dev->pdev->device;

	spin_lock_init(&dev_priv->irq_lock);
	spin_lock_init(&dev_priv->gpu_error.lock);
	mutex_init(&dev_priv->backlight_lock);
	spin_lock_init(&dev_priv->uncore.lock);
	spin_lock_init(&dev_priv->mm.object_stat_lock);
	spin_lock_init(&dev_priv->mmio_flip_lock);
	mutex_init(&dev_priv->dpio_lock);
	mutex_init(&dev_priv->modeset_restore_lock);

	intel_pm_setup(dev);

	intel_display_crc_init(dev);

	i915_dump_device_info(dev_priv);

	/* Not all pre-production machines fall into this category, only the
	 * very first ones. Almost everything should work, except for maybe
	 * suspend/resume. And we don't implement workarounds that affect only
	 * pre-production machines. */
	if (IS_HSW_EARLY_SDV(dev))
		DRM_INFO("This is an early pre-production Haswell machine. "
			 "It may not be fully functional.\n");

	if (i915_get_bridge_dev(dev)) {
		ret = -EIO;
		goto free_priv;
	}

	mmio_bar = IS_GEN2(dev) ? 1 : 0;
	/* Before gen4, the registers and the GTT are behind different BARs.
	 * However, from gen4 onwards, the registers and the GTT are shared
	 * in the same BAR, so we want to restrict this ioremap from
	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
	 * the register BAR remains the same size for all the earlier
	 * generations up to Ironlake.
	 */
	if (info->gen < 5)
		mmio_size = 512*1024;
	else
		mmio_size = 2*1024*1024;

	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
	if (!dev_priv->regs) {
		DRM_ERROR("failed to map registers\n");
		ret = -EIO;
		goto put_bridge;
	}

	/* This must be called before any calls to HAS_PCH_* */
	intel_detect_pch(dev);

	intel_uncore_init(dev);

	if (i915_start_vgt(dev->pdev))
		i915_host_mediate = true;
	printk("i915_start_vgt: %s\n", i915_host_mediate ? "success" : "fail");

	i915_check_vgt(dev_priv);
	if (USES_VGT(dev))
		i915.enable_fbc = 0;

	ret = i915_gem_gtt_init(dev);
	if (ret)
		goto out_regs;

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		/* WARNING: Apparently we must kick fbdev drivers before vgacon,
		 * otherwise the vga fbdev driver falls over. */
		ret = i915_kick_out_firmware_fb(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
			goto out_gtt;
		}

		ret = i915_kick_out_vgacon(dev_priv);
		if (ret) {
			DRM_ERROR("failed to remove conflicting VGA console\n");
			goto out_gtt;
		}
	}

	pci_set_master(dev->pdev);

	/* overlay on gen2 is broken and can't address above 1G */
	if (IS_GEN2(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));

	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
	 * using 32bit addressing, overwriting memory if HWS is located
	 * above 4GB.
	 *
	 * The documentation also mentions an issue with undefined
	 * behaviour if any general state is accessed within a page above 4GB,
	 * which also needs to be handled carefully.
	 */
	if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));

	aperture_size = dev_priv->gtt.mappable_end;

	dev_priv->gtt.mappable =
		io_mapping_create_wc(dev_priv->gtt.mappable_base,
				     aperture_size);
	if (dev_priv->gtt.mappable == NULL) {
		ret = -EIO;
		goto out_gtt;
	}

	dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
					      aperture_size);

	/* The i915 workqueue is primarily used for batched retirement of
	 * requests (and thus managing bo) once the task has been completed
	 * by the GPU. i915_gem_retire_requests() is called directly when we
	 * need high-priority retirement, such as waiting for an explicit
	 * bo.
	 *
	 * It is also used for periodic low-priority events, such as
	 * idle-timers and recording error state.
	 *
	 * All tasks on the workqueue are expected to acquire the dev mutex
	 * so there is no point in running more than one instance of the
	 * workqueue at any time.  Use an ordered one.
	 */
	dev_priv->wq = alloc_ordered_workqueue("i915", 0);
	if (dev_priv->wq == NULL) {
		DRM_ERROR("Failed to create our workqueue.\n");
		ret = -ENOMEM;
		goto out_mtrrfree;
	}

	dev_priv->dp_wq = alloc_ordered_workqueue("i915-dp", 0);
	if (dev_priv->dp_wq == NULL) {
		DRM_ERROR("Failed to create our dp workqueue.\n");
		ret = -ENOMEM;
		goto out_freewq;
	}

	intel_irq_init(dev_priv);
	intel_uncore_sanitize(dev);

	/* Try to make sure MCHBAR is enabled before poking at it */
	intel_setup_mchbar(dev);
	intel_setup_gmbus(dev);
	intel_opregion_setup(dev);

	intel_setup_bios(dev);

	i915_gem_load(dev);

	/* On the 945G/GM, the chipset reports the MSI capability on the
	 * integrated graphics even though the support isn't actually there
	 * according to the published specs.  It doesn't appear to function
	 * correctly in testing on 945G.
	 * This may be a side effect of MSI having been made available for PEG
	 * and the registers being closely associated.
	 *
	 * According to chipset errata, on the 965GM, MSI interrupts may
	 * be lost or delayed, but we use them anyways to avoid
	 * stuck interrupts on some machines.
	 */
	if (!IS_I945G(dev) && !IS_I945GM(dev))
		pci_enable_msi(dev->pdev);

	intel_device_info_runtime_init(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
		if (ret)
			goto out_gem_unload;
	}

	intel_power_domains_init(dev_priv);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		ret = i915_load_modeset_init(dev);
		if (ret < 0) {
			DRM_ERROR("failed to init modeset\n");
			goto out_power_well;
		}
#ifdef DRM_I915_VGT_SUPPORT
		if (USES_VGT(dev)) {
			/*
			 * Tell VGT that we have a valid surface to show
			 * after modesetting. We doesn't distinguish DOM0 and
			 * Linux guest here, The PVINFO write handler will
			 * handle this.
			 */
			I915_WRITE(vgt_info_off(display_ready), 1);
		}
#endif
	}

	i915_setup_sysfs(dev);

	if (INTEL_INFO(dev)->num_pipes) {
		/* Must be done after probing outputs */
		intel_opregion_init(dev);
		acpi_video_register();
	}

	if (IS_GEN5(dev))
		intel_gpu_ips_init(dev_priv);

	intel_runtime_pm_enable(dev_priv);

	return 0;

out_power_well:
	intel_power_domains_fini(dev_priv);
	drm_vblank_cleanup(dev);
out_gem_unload:
	WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
	unregister_shrinker(&dev_priv->mm.shrinker);

	if (dev->pdev->msi_enabled)
		pci_disable_msi(dev->pdev);

	intel_teardown_gmbus(dev);
	intel_teardown_mchbar(dev);
	pm_qos_remove_request(&dev_priv->pm_qos);
	destroy_workqueue(dev_priv->dp_wq);
out_freewq:
	destroy_workqueue(dev_priv->wq);
out_mtrrfree:
	arch_phys_wc_del(dev_priv->gtt.mtrr);
	io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
	i915_global_gtt_cleanup(dev);
out_regs:
	intel_uncore_fini(dev);
	pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
	pci_dev_put(dev_priv->bridge_dev);
free_priv:
	if (dev_priv->slab)
		kmem_cache_destroy(dev_priv->slab);
	kfree(dev_priv);
	return ret;
}
Beispiel #27
0
/* Initialize protocol */
static int qca_open(struct hci_uart *hu)
{
	struct qca_serdev *qcadev;
	struct qca_data *qca;
	int ret;

	BT_DBG("hu %p qca_open", hu);

	qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
	if (!qca)
		return -ENOMEM;

	skb_queue_head_init(&qca->txq);
	skb_queue_head_init(&qca->tx_wait_q);
	spin_lock_init(&qca->hci_ibs_lock);
	qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
	if (!qca->workqueue) {
		BT_ERR("QCA Workqueue not initialized properly");
		kfree(qca);
		return -ENOMEM;
	}

	INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx);
	INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
	INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
	INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);

	qca->hu = hu;

	/* Assume we start with both sides asleep -- extra wakes OK */
	qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
	qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;

	/* clocks actually on, but we start votes off */
	qca->tx_vote = false;
	qca->rx_vote = false;
	qca->flags = 0;

	qca->ibs_sent_wacks = 0;
	qca->ibs_sent_slps = 0;
	qca->ibs_sent_wakes = 0;
	qca->ibs_recv_wacks = 0;
	qca->ibs_recv_slps = 0;
	qca->ibs_recv_wakes = 0;
	qca->vote_last_jif = jiffies;
	qca->vote_on_ms = 0;
	qca->vote_off_ms = 0;
	qca->votes_on = 0;
	qca->votes_off = 0;
	qca->tx_votes_on = 0;
	qca->tx_votes_off = 0;
	qca->rx_votes_on = 0;
	qca->rx_votes_off = 0;

	hu->priv = qca;

	if (hu->serdev) {

		qcadev = serdev_device_get_drvdata(hu->serdev);
		if (qcadev->btsoc_type != QCA_WCN3990) {
			gpiod_set_value_cansleep(qcadev->bt_en, 1);
		} else {
			hu->init_speed = qcadev->init_speed;
			hu->oper_speed = qcadev->oper_speed;
			ret = qca_power_setup(hu, true);
			if (ret) {
				destroy_workqueue(qca->workqueue);
				kfree_skb(qca->rx_skb);
				hu->priv = NULL;
				kfree(qca);
				return ret;
			}
		}
	}

	timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
	qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;

	timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
	qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;

	BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
	       qca->tx_idle_delay, qca->wake_retrans);

	return 0;
}
void MTKPP_Init(void)
{
	int i;
	struct {
		MTKPP_ID uid;
		MTKPP_BUFFERTYPE type;
		int data_size;
		int max_line;
	} mtk_pp_register_tabls[] =
	{
		/* buffer is allocated in MTK_PP_4_SGXOSTimer_register */
		{MTKPP_ID_SGXDumpDebugInfo, MTKPP_BUFFERTYPE_QUEUEBUFFER,   0,                  0}, 
		{MTKPP_ID_DEVMEM,           MTKPP_BUFFERTYPE_RINGBUFFER,    1024 * 1024 * 2,    1024 * 64},
		{MTKPP_ID_SYNC,             MTKPP_BUFFERTYPE_RINGBUFFER,    1024 * 8,           128},
		{MTKPP_ID_MUTEX,            MTKPP_BUFFERTYPE_RINGBUFFER,    1024 * 32,          512},
	};

	for (i = 0; i < MTKPP_ID_SIZE; ++i)
	{
		if (i != mtk_pp_register_tabls[i].uid)
		{
			_MTKPP_DEBUG_LOG("%s: index(%d) != tabel_uid(%d)", __func__, i, mtk_pp_register_tabls[i].uid);
			goto err_out;
		}
		
		g_MTKPPdata[i] = MTKPP_AllocStruct(mtk_pp_register_tabls[i].type);

		if (g_MTKPPdata[i] == NULL)
		{
			_MTKPP_DEBUG_LOG("%s: alloc struct fail: flags = %d", __func__, mtk_pp_register_tabls[i].type);
			goto err_out;
		}

		if (mtk_pp_register_tabls[i].data_size > 0)
		{
			MTKPP_AllocData(
				g_MTKPPdata[i],
				mtk_pp_register_tabls[i].data_size,
				mtk_pp_register_tabls[i].max_line
				);
			
			MTKPP_CleanData(g_MTKPPdata[i]);
		}
	}
	
	g_MTKPP_proc = create_proc_entry("gpulog", 0, NULL);
	g_MTKPP_proc->proc_fops = &g_MTKPP_proc_ops;
	
	g_MTKPP_4_SGXDumpDebugInfo_current = NULL;
	spin_lock_init(&g_MTKPP_4_SGXDumpDebugInfo_lock);
	
#if defined(ENABLE_AEE_WHEN_LOCKUP)
	g_MTKPP_workqueue.psWorkQueue = alloc_ordered_workqueue("mwp", WQ_FREEZABLE | WQ_MEM_RECLAIM);
	INIT_WORK(&g_MTKPP_worker.sWork, MTKPP_WORKR_Handle);
#endif

	return;
	
err_out:	
	return;
}