int gsmd_ctrl_setup(unsigned int count)
{
    int	i;
    int	ret;

    pr_debug("%s: requested ports:%d\n", __func__, count);

    if (!count || count > NR_CTRL_SMD_PORTS) {
        pr_err("%s: Invalid num of ports count:%d\n",
               __func__, count);
        return -EINVAL;
    }

    grmnet_ctrl_wq = alloc_workqueue("gsmd_ctrl",
                                     WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
    if (!grmnet_ctrl_wq) {
        pr_err("%s: Unable to create workqueue grmnet_ctrl\n",
               __func__);
        return -ENOMEM;
    }

    for (i = 0; i < count; i++) {
        n_rmnet_ctrl_ports++;
        ret = grmnet_ctrl_smd_port_alloc(i);
        if (ret) {
            pr_err("%s: Unable to alloc port:%d\n", __func__, i);
            n_rmnet_ctrl_ports--;
            goto free_ctrl_smd_ports;
        }
    }

    return 0;

free_ctrl_smd_ports:
    for (i = 0; i < n_rmnet_ctrl_ports; i++)
        grmnet_ctrl_smd_port_free(i);

    destroy_workqueue(grmnet_ctrl_wq);

    return ret;
}
Example #2
0
int cc2520_csma_init()
{
    csma_top->tx = cc2520_csma_tx;
    csma_bottom->tx_done = cc2520_csma_tx_done;
    csma_bottom->rx_done = cc2520_csma_rx_done;

    backoff_min = CC2520_DEF_MIN_BACKOFF;
    backoff_max_init = CC2520_DEF_INIT_BACKOFF;
    backoff_max_cong = CC2520_DEF_CONG_BACKOFF;
    csma_enabled = CC2520_DEF_CSMA_ENABLED;

    spin_lock_init(&state_sl);
    csma_state = CC2520_CSMA_IDLE;

    cur_tx_buf = kmalloc(PKT_BUFF_SIZE, GFP_KERNEL);
    if (!cur_tx_buf) {
        goto error;
    }

    wq = alloc_workqueue("csma_wq", WQ_HIGHPRI, 128);
    if (!wq) {
        goto error;
    }

    hrtimer_init(&backoff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
    backoff_timer.function = &cc2520_csma_timer_cb;

    return 0;

error:
    if (cur_tx_buf) {
        kfree(cur_tx_buf);
        cur_tx_buf = NULL;
    }

    if (wq) {
        destroy_workqueue(wq);
    }

    return -EFAULT;
}
Example #3
0
static int __init crt_init(void)
{	
	int err = -EINVAL;

	printk("nkfs_crt: initing\n");
	err = klog_init();
	if (err)
		goto out;

	err = crt_random_init();
	if (err) {
		goto rel_klog;
	}

	crt_wq = alloc_workqueue("crt_wq",
			WQ_MEM_RECLAIM|WQ_UNBOUND, 1);
	if (!crt_wq) {
		KLOG(KL_ERR, "cant create wq");
		err = -ENOMEM;
		goto rel_rnd;
	}

	KLOG(KL_INF, "nk8 initing");	
	err = nk8_init();
	if (err) {
		KLOG(KL_ERR, "nk8 init err %d", err);
		goto del_wq;
	}

	KLOG(KL_INF, "inited");
	return 0;

del_wq:
	destroy_workqueue(crt_wq);
rel_rnd:
	crt_random_release();
rel_klog:
	klog_release();
out:
	return err;
}
static int cpu_boost_init(void)
{
	int cpu, ret;
	struct cpu_sync *s;

	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);

	cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
	if (!cpu_boost_wq)
		return -EFAULT;

	INIT_WORK(&input_boost_work, do_input_boost);

	for_each_possible_cpu(cpu) {
		s = &per_cpu(sync_info, cpu);
		s->cpu = cpu;
		init_waitqueue_head(&s->sync_wq);
		spin_lock_init(&s->lock);
		INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
		INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
		s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
					"boost_sync/%d", cpu);
		set_cpus_allowed(s->thread, *cpumask_of(cpu));
	}
	atomic_notifier_chain_register(&migration_notifier_head,
					&boost_migration_nb);

	ret = input_register_handler(&cpuboost_input_handler);
	return 0;

	if (ret)
		pr_err("Cannot register cpuboost input handler.\n");

	ret = register_hotcpu_notifier(&cpu_nblk);
	if (ret)
		pr_err("Cannot register cpuboost hotplug handler.\n");

	notif.notifier_call = fb_notifier_callback;

	return ret;
}
static int mem_rx_setup(struct link_device *ld)
{
	struct mem_link_device *mld = to_mem_link_device(ld);

	if (!zalloc_cpumask_var(&mld->dmask, GFP_KERNEL))
		return -ENOMEM;
	if (!zalloc_cpumask_var(&mld->imask, GFP_KERNEL))
		return -ENOMEM;
	if (!zalloc_cpumask_var(&mld->tmask, GFP_KERNEL))
		return -ENOMEM;

#ifdef CONFIG_ARGOS
	/* Below hard-coded mask values should be removed later on.
	 * Like net-sysfs, argos module also should support sysfs knob,
	 * so that user layer must be able to control these cpu mask. */
#ifdef CONFIG_SCHED_HMP
	cpumask_copy(mld->dmask, &hmp_slow_cpu_mask);
#endif

	cpumask_or(mld->imask, mld->imask, cpumask_of(3));

	argos_irq_affinity_setup_label(217, "IPC", mld->imask, mld->dmask);
#endif

	ld->tx_wq = create_singlethread_workqueue("mem_tx_work");
	if (!ld->tx_wq) {
		mif_err("%s: ERR! fail to create tx_wq\n", ld->name);
		return -ENOMEM;
	}

	ld->rx_wq = alloc_workqueue(
			"mem_rx_work", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
	if (!ld->rx_wq) {
		mif_err("%s: ERR! fail to create rx_wq\n", ld->name);
		return -ENOMEM;
	}

	INIT_DELAYED_WORK(&ld->rx_delayed_work, link_to_demux_work);

	return 0;
}
Example #6
0
static int cpu_boost_init(void)
{
	int cpu, ret;
	struct cpu_sync *s;

	cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
	if (!cpu_boost_wq)
		return -EFAULT;

	INIT_WORK(&input_boost_work, do_input_boost);
	INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);

	for_each_possible_cpu(cpu) {
		s = &per_cpu(sync_info, cpu);
		s->cpu = cpu;
	}
	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
	ret = input_register_handler(&cpuboost_input_handler);

	return ret;
}
Example #7
0
static int init(void)
{
	int ret;
	input_boost_wq = alloc_workqueue("input_boost_wq", WQ_FREEZABLE | WQ_HIGHPRI, 1);

	if (!input_boost_wq)
		return -EFAULT;

	INIT_WORK(&input_boost_work, do_input_boost);
	INIT_DELAYED_WORK(&rem_input_boost, do_rem_input_boost);

	ret = input_register_handler(&boost_input_handler);
	if (ret) {
		pr_err("Failed to register input handler, error: %d", ret);
		return ret;
	}

	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);

	return 0;
}
int mmpfb_overlay_vsync_notify_init(struct mmpfb_info *fbi)
{
	int ret = 0;
	struct mmp_vsync_notifier_node *notifier_node;

	notifier_node = &fbi->vsync.notifier_node;
	fbi->vsync.wq =
		alloc_workqueue(fbi->name, WQ_HIGHPRI |
				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
	if (!fbi->vsync.wq) {
		dev_err(fbi->dev, "alloc_workqueue failed\n");
		return ret;
	}

	INIT_WORK(&fbi->vsync.fence_work, mmpfb_overlay_fence_work);
	notifier_node->cb_notify = mmpfb_overlay_vsync_cb;
	notifier_node->cb_data = (void *)fbi;

	mmp_register_vsync_cb(&fbi->path->vsync,
			notifier_node);
	return ret;
}
Example #9
0
static int pcie_init_slot(struct controller *ctrl)
{
	struct slot *slot;

	slot = kzalloc(sizeof(*slot), GFP_KERNEL);
	if (!slot)
		return -ENOMEM;

	slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl));
	if (!slot->wq)
		goto abort;

	slot->ctrl = ctrl;
	mutex_init(&slot->lock);
	mutex_init(&slot->hotplug_lock);
	INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
	ctrl->slot = slot;
	return 0;
abort:
	kfree(slot);
	return -ENOMEM;
}
Example #10
0
void _mali_osk_pm_dev_enable(void) /* @@@@ todo: change to init of some kind.. or change the way or where atomics are initialized? */
{
	_mali_osk_atomic_init(&mali_pm_ref_count, 0);
	_mali_osk_atomic_init(&mali_suspend_called, 0);
	pm_timer = _mali_osk_timer_init();
	_mali_osk_timer_setcallback(pm_timer, _mali_pm_callback, NULL);	
	
    pm_lock = _mali_osk_lock_init(_MALI_OSK_LOCKFLAG_NONINTERRUPTABLE | _MALI_OSK_LOCKFLAG_ORDERED, 0, 0);

#if MALI_LICENSE_IS_GPL
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
    mali_pm_wq = alloc_workqueue("mali_pm", WQ_UNBOUND, 0);
#else
    mali_pm_wq = create_workqueue("mali_pm");
#endif
    if(NULL == mali_pm_wq)
    {
        MALI_PRINT_ERROR(("Unable to create Mali pm workqueue\n"));
    }
#endif
    INIT_WORK( &mali_pm_wq_work_handle, mali_bottom_half_pm );
}
Example #11
0
static struct i915_mmu_notifier *
i915_mmu_notifier_create(struct mm_struct *mm)
{
	struct i915_mmu_notifier *mn;

	mn = kmalloc(sizeof(*mn), GFP_KERNEL);
	if (mn == NULL)
		return ERR_PTR(-ENOMEM);

	mtx_init(&mn->lock);
	mn->mn.ops = &i915_gem_userptr_notifier;
	mn->objects = RB_ROOT_CACHED;
	mn->wq = alloc_workqueue("i915-userptr-release",
				 WQ_UNBOUND | WQ_MEM_RECLAIM,
				 0);
	if (mn->wq == NULL) {
		kfree(mn);
		return ERR_PTR(-ENOMEM);
	}

	return mn;
}
Example #12
0
int fdv_dev_init(struct fdv_dev* ptr_fdv_dev)
{
	spin_lock_init(&fdv_spin_lock);
	
	ptr_fdv_dev->fdv_dev_status = 0;
	ptr_fdv_dev->fdv_dev_step = 0;
  if(ptr_fdv_dev == 0)
   	printk("warning:fd_devp->ptr_fdv_dev == 0\n");
  
  ptr_fdv_dev->ptr_iomap_addrs = &fd_devp->iomap_addrs;
  ptr_fdv_dev->ptr_mem_addr = &fd_devp->mem_addr;

  p_fdv_queue = alloc_workqueue("fdv_work_queue",0,0);
  if(p_fdv_queue == 0)
  	return -EINVAL;
  INIT_WORK(&fdv_work,fdv_work_handle);
 
  //alloc 
  ptr_fdv_dev->ptr_fdv_det_buffer = kmalloc(get_sizeof_fdv_buffer(),GFP_KERNEL);
  memset(ptr_fdv_dev->ptr_fdv_det_buffer,0,get_sizeof_fdv_buffer());
	return 0;
}
Example #13
0
static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);

	/* <1> timer */
	init_timer(&rtlpriv->works.watchdog_timer);
	setup_timer(&rtlpriv->works.watchdog_timer,
		    rtl_watch_dog_timer_callback, (unsigned long)hw);

	/* <2> work queue */
	rtlpriv->works.hw = hw;
	rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
	INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
			  (void *)rtl_watchdog_wq_callback);
	INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
			  (void *)rtl_ips_nic_off_wq_callback);
	INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
			  (void *)rtl_swlps_wq_callback);
	INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
			  (void *)rtl_swlps_rfon_wq_callback);

}
Example #14
0
void _mali_osk_pm_dev_enable(void)
{
	_mali_osk_atomic_init(&mali_pm_ref_count, 0);
	_mali_osk_atomic_init(&mali_suspend_called, 0);
	pm_timer = _mali_osk_timer_init();
	_mali_osk_timer_setcallback(pm_timer, _mali_pm_callback, NULL);	
	
    pm_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED, 0);

#if MALI_LICENSE_IS_GPL
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
    mali_pm_wq = alloc_workqueue("mali_pm", WQ_UNBOUND, 0);
#else
    mali_pm_wq = create_workqueue("mali_pm");
#endif
    if(NULL == mali_pm_wq)
    {
        MALI_PRINT_ERROR(("Unable to create Mali pm workqueue\n"));
    }
#endif
    INIT_WORK( &mali_pm_wq_work_handle, mali_bottom_half_pm );
}
Example #15
0
static int sdio_dmux_probe(struct platform_device *pdev)
{
	int rc;

	DBG("%s probe called\n", __func__);

	if (!sdio_mux_initialized) {
		//ALRAN
		//sdio_mux_workqueue = create_singlethread_workqueue("sdio_dmux"); QCT_PATCH 20120531 bluetooth.kang
		sdio_mux_workqueue = alloc_workqueue("sdio_dmux", /*WQ_UNBOUND*/WQ_MEM_RECLAIM, 1);
		//ALRANEND

		if (!sdio_mux_workqueue)
			return -ENOMEM;

		skb_queue_head_init(&sdio_mux_write_pool);
		spin_lock_init(&sdio_mux_write_lock);

		for (rc = 0; rc < SDIO_DMUX_NUM_CHANNELS; ++rc)
			spin_lock_init(&sdio_ch[rc].lock);


		wake_lock_init(&sdio_mux_ch_wakelock, WAKE_LOCK_SUSPEND,
				   "sdio_dmux");
	}

	rc = sdio_open("SDIO_RMNT", &sdio_mux_ch, NULL, sdio_mux_notify);
	if (rc < 0) {
		pr_err("%s: sido open failed %d\n", __func__, rc);
		wake_lock_destroy(&sdio_mux_ch_wakelock);
		destroy_workqueue(sdio_mux_workqueue);
		sdio_mux_initialized = 0;
		return rc;
	}

	fatal_error = 0;
	sdio_mux_initialized = 1;
	return 0;
}
Example #16
0
int bam_data_setup(unsigned int no_bam2bam_port)
{
	int	i;
	int	ret;

	pr_debug("requested %d BAM2BAM ports", no_bam2bam_port);

	if (!no_bam2bam_port || no_bam2bam_port > BAM2BAM_DATA_N_PORTS) {
		pr_err("Invalid num of ports count:%d\n", no_bam2bam_port);
		return -EINVAL;
	}

	bam_data_wq = alloc_workqueue("k_bam_data",
				      WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
	if (!bam_data_wq) {
		pr_err("Failed to create workqueue\n");
		return -ENOMEM;
	}

	for (i = 0; i < no_bam2bam_port; i++) {
		n_bam2bam_data_ports++;
		ret = bam2bam_data_port_alloc(i);
		if (ret) {
			n_bam2bam_data_ports--;
			pr_err("Failed to alloc port:%d\n", i);
			goto free_bam_ports;
		}
	}

	return 0;

free_bam_ports:
	for (i = 0; i < n_bam2bam_data_ports; i++)
		bam2bam_data_port_free(i);
	destroy_workqueue(bam_data_wq);

	return ret;
}
Example #17
0
int tegra_auto_hotplug_init(struct mutex *cpu_lock)
{
	/*
	 * Not bound to the issuer CPU (=> high-priority), has rescue worker
	 * task, single-threaded, freezable.
	 */
	hotplug_wq = alloc_workqueue(
		"cpu-tegra3", WQ_UNBOUND | WQ_RESCUER | WQ_FREEZABLE, 1);
	if (!hotplug_wq)
		return -ENOMEM;
	INIT_DELAYED_WORK(&hotplug_work, tegra_auto_hotplug_work_func);
	INIT_DELAYED_WORK(&cpu_down_work, tegra_cpu_down_work_func);

	cpu_clk = clk_get_sys(NULL, "cpu");
	cpu_g_clk = clk_get_sys(NULL, "cpu_g");
	cpu_lp_clk = clk_get_sys(NULL, "cpu_lp");
	if (IS_ERR(cpu_clk) || IS_ERR(cpu_g_clk) || IS_ERR(cpu_lp_clk))
		return -ENOENT;

	idle_top_freq = clk_get_max_rate(cpu_lp_clk) / 1000;
	idle_bottom_freq = clk_get_min_rate(cpu_g_clk) / 1000;

	up2g0_delay = msecs_to_jiffies(UP2G0_DELAY_MS);
	up2gn_delay = msecs_to_jiffies(UP2Gn_DELAY_MS);
	down_delay = msecs_to_jiffies(DOWN_DELAY_MS);

	tegra3_cpu_lock = cpu_lock;
	hp_state = INITIAL_STATE;
	hp_init_stats();
	pr_info("Tegra auto-hotplug initialized: %s\n",
		(hp_state == TEGRA_HP_DISABLED) ? "disabled" : "enabled");

	if (pm_qos_add_notifier(PM_QOS_MIN_ONLINE_CPUS, &min_cpus_notifier))
		pr_err("%s: Failed to register min cpus PM QoS notifier\n",
			__func__);

	return 0;
}
mali_error kbase_pm_policy_init(kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	kbdev->pm.gpu_poweroff_wq = alloc_workqueue("kbase_pm_do_poweroff", WQ_HIGHPRI | WQ_UNBOUND, 1);
	if (NULL == kbdev->pm.gpu_poweroff_wq)
		return MALI_ERROR_OUT_OF_MEMORY;
	INIT_WORK(&kbdev->pm.gpu_poweroff_work, kbasep_pm_do_gpu_poweroff_wq);

	hrtimer_init(&kbdev->pm.gpu_poweroff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	kbdev->pm.gpu_poweroff_timer.function = kbasep_pm_do_gpu_poweroff_callback;

	kbdev->pm.pm_current_policy = policy_list[0];

	kbdev->pm.pm_current_policy->init(kbdev);

	kbdev->pm.gpu_poweroff_time = HR_TIMER_DELAY_NSEC(kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_GPU_POWEROFF_TICK_NS));

	kbdev->pm.poweroff_shader_ticks = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_SHADER);
	kbdev->pm.poweroff_gpu_ticks = kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_POWEROFF_TICK_GPU);

	return MALI_ERROR_NONE;
}
int ccci_subsys_bm_init(void)
{
    // init ccci_request
    ccci_req_queue_init(&req_pool);
    CCCI_INF_MSG(-1, BM, "MTU=%d/%d, pool size %d/%d/%d/%d\n", CCCI_MTU, CCMNI_MTU,
                 SKB_POOL_SIZE_4K, SKB_POOL_SIZE_1_5K, SKB_POOL_SIZE_16, req_pool.max_len);
    // init skb pool
    ccci_skb_queue_init(&skb_pool_4K, SKB_4K, SKB_POOL_SIZE_4K);
    ccci_skb_queue_init(&skb_pool_1_5K, SKB_1_5K, SKB_POOL_SIZE_1_5K);
    ccci_skb_queue_init(&skb_pool_16, SKB_16, SKB_POOL_SIZE_16);
    // init pool reload work
    pool_reload_work_queue = alloc_workqueue("pool_reload_work", WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
    INIT_WORK(&skb_pool_4K.reload_work, __4K_reload_work);
    INIT_WORK(&skb_pool_1_5K.reload_work, __1_5K_reload_work);
    INIT_WORK(&skb_pool_16.reload_work, __16_reload_work);

#ifdef CCCI_STATISTIC
    init_timer(&ccci_bm_stat_timer);
    ccci_bm_stat_timer.function = ccci_bm_stat_timer_func;
    mod_timer(&ccci_bm_stat_timer, jiffies+10*HZ);
#endif
    return 0;
}
static int __init cpufreq_interactive_init(void)
{
	unsigned int i;
	struct timer_list *t;
	min_sample_time = DEFAULT_MIN_SAMPLE_TIME;

	/* Initalize per-cpu timers */
	for_each_possible_cpu(i) {
		t = &per_cpu(cpu_timer, i);
		init_timer_deferrable(t);
		t->function = cpufreq_interactive_timer;
		t->data = i;
	}

	/* Scale up is high priority */
	up_wq = alloc_workqueue("kinteractive_up", WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
	down_wq = create_workqueue("knteractive_down");

	INIT_WORK(&freq_scale_work, cpufreq_interactive_freq_change_time_work);

        pr_info("[imoseyon] interactive enter\n");
	return cpufreq_register_governor(&cpufreq_gov_interactive);
}
Example #21
0
int __init f2fs_init_crypto(void)
{
	int res = -ENOMEM;

	f2fs_read_workqueue = alloc_workqueue("f2fs_crypto", WQ_HIGHPRI, 0);
	if (!f2fs_read_workqueue)
		goto fail;

	f2fs_crypto_ctx_cachep = KMEM_CACHE(f2fs_crypto_ctx,
						SLAB_RECLAIM_ACCOUNT);
	if (!f2fs_crypto_ctx_cachep)
		goto fail;

	f2fs_crypt_info_cachep = KMEM_CACHE(f2fs_crypt_info,
						SLAB_RECLAIM_ACCOUNT);
	if (!f2fs_crypt_info_cachep)
		goto fail;

	return 0;
fail:
	f2fs_exit_crypto();
	return res;
}
Example #22
0
static int cpu_boost_init(void)
{
	int cpu;
	struct cpu_sync *s;

	cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);

	boost_rem_wq = alloc_workqueue("cpuboost_rem_wq", WQ_HIGHPRI, 0);
	if (!boost_rem_wq)
		return -EFAULT;

	for_each_possible_cpu(cpu) {
		s = &per_cpu(sync_info, cpu);
		s->cpu = cpu;
		init_waitqueue_head(&s->sync_wq);
		spin_lock_init(&s->lock);
		INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
		s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu, "boost_sync/%d", cpu);
	}
	atomic_notifier_chain_register(&migration_notifier_head, &boost_migration_nb);

	return 0;
}
Example #23
0
static void gspi_intf_start(PADAPTER padapter)
{
	PGSPI_DATA pgspi;


	if (padapter == NULL) {
		DBG_871X(KERN_ERR "%s: padapter is NULL!\n", __FUNCTION__);
		return;
	}

	pgspi = &adapter_to_dvobj(padapter)->intf_data;

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
	pgspi->priv_wq = alloc_workqueue("spi_wq", 0, 0);
#else
	pgspi->priv_wq = create_workqueue("spi_wq");
#endif
	INIT_DELAYED_WORK(&pgspi->irq_work, (void*)spi_irq_work);

	enable_irq(oob_irq);
	//hal dep
	rtw_hal_enable_interrupt(padapter);
}
Example #24
0
int __init sleepy_plug_init(void)
{
	int rc;

	pr_info("sleepy_plug: version %d.%d by rmbq\n",
		 SLEEPY_PLUG_MAJOR_VERSION,
		 SLEEPY_PLUG_MINOR_VERSION);

	rc = input_register_handler(&sleepy_plug_input_handler);

	sleepy_plug_wq = alloc_workqueue("sleepyplug",
				WQ_HIGHPRI | WQ_UNBOUND, 1);

#ifdef CONFIG_POWERSUSPEND
	register_power_suspend(&sleepy_plug_power_suspend_driver);
#endif

	INIT_DELAYED_WORK(&sleepy_plug_work, sleepy_plug_work_fn);
	queue_delayed_work_on(0, sleepy_plug_wq, &sleepy_plug_work,
		msecs_to_jiffies(10));

	return 0;
}
int mmpfb_vsync_notify_init(struct mmpfb_info *fbi)
{
	int ret = 0;
	struct mmp_vsync_notifier_node *notifier_node;

	notifier_node = &fbi->vsync.notifier_node;
	ret = device_create_file(fbi->dev, &dev_attr_vsync);
	if (ret < 0) {
		dev_err(fbi->dev, "device attr create fail: %d\n", ret);
		return ret;
	}

	ret = device_create_file(fbi->dev, &dev_attr_vsync_ts);
	if (ret < 0) {
		dev_err(fbi->dev, "device attr create fail: %d\n", ret);
		return ret;
	}

	fbi->vsync.wq =
		alloc_workqueue(fbi->name, WQ_HIGHPRI |
				WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
	if (!fbi->vsync.wq) {
		dev_err(fbi->dev, "alloc_workqueue failed\n");
		return ret;
	}

	INIT_WORK(&fbi->vsync.work, mmpfb_vsync_notify_work);
	INIT_WORK(&fbi->vsync.fence_work, mmpfb_overlay_fence_work);
	notifier_node->cb_notify = mmpfb_vsync_cb;
	notifier_node->cb_data = (void *)fbi;
	mmp_register_vsync_cb(&fbi->path->vsync,
				notifier_node);

	mmpfb_vcnt_clean(fbi);

	return ret;
}
Example #26
0
static int __init tcm_loop_fabric_init(void)
{
	int ret = -ENOMEM;

	tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
	if (!tcm_loop_workqueue)
		goto out;

	tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
				sizeof(struct tcm_loop_cmd),
				__alignof__(struct tcm_loop_cmd),
				0, NULL);
	if (!tcm_loop_cmd_cache) {
		pr_debug("kmem_cache_create() for"
			" tcm_loop_cmd_cache failed\n");
		goto out_destroy_workqueue;
	}

	ret = tcm_loop_alloc_core_bus();
	if (ret)
		goto out_destroy_cache;

	ret = tcm_loop_register_configfs();
	if (ret)
		goto out_release_core_bus;

	return 0;

out_release_core_bus:
	tcm_loop_release_core_bus();
out_destroy_cache:
	kmem_cache_destroy(tcm_loop_cmd_cache);
out_destroy_workqueue:
	destroy_workqueue(tcm_loop_workqueue);
out:
	return ret;
}
Example #27
0
int __init
tfw_cache_init(void)
{
	int r = 0;

	if (!tfw_cfg.cache)
		return 0;

	db = tdb_open(tfw_cfg.c_path, tfw_cfg.c_size, 0);
	if (!db)
		return 1;

	cache_mgr_thr = kthread_run(tfw_cache_mgr, NULL, "tfw_cache_mgr");
	if (IS_ERR(cache_mgr_thr)) {
		r = PTR_ERR(cache_mgr_thr);
		TFW_ERR("Can't start cache manager, %d\n", r);
		goto err_thr;
	}

	c_cache = KMEM_CACHE(tfw_cache_work_t, 0);
	if (!c_cache)
		goto err_cache;

	cache_wq = alloc_workqueue("tfw_cache_wq", WQ_MEM_RECLAIM, 0);
	if (!cache_wq)
		goto err_wq;

	return 0;
err_wq:
	kmem_cache_destroy(c_cache);
err_cache:
	kthread_stop(cache_mgr_thr);
err_thr:
	tdb_close(db);
	return r;
}
Example #28
0
mali_error kbase_pm_policy_init(kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	kbdev->pm.gpu_poweroff_wq = alloc_workqueue("kbase_pm_do_poweroff", WQ_HIGHPRI, 1);
	if (NULL == kbdev->pm.gpu_poweroff_wq)
		return MALI_ERROR_OUT_OF_MEMORY;
	INIT_WORK(&kbdev->pm.gpu_poweroff_work, kbasep_pm_do_gpu_poweroff_wq);

	hrtimer_init(&kbdev->pm.shader_poweroff_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	kbdev->pm.shader_poweroff_timer.function = kbasep_pm_do_shader_poweroff_callback;

	kbdev->pm.pm_current_policy = policy_list[2];

	kbdev->pm.pm_current_policy->init(kbdev);

	kbdev->pm.shader_poweroff_time = HR_TIMER_DELAY_NSEC(kbasep_get_config_value(kbdev, kbdev->config_attributes, KBASE_CONFIG_ATTR_PM_SHADER_POWEROFF_TIME) * 1000);

#if SLSI_INTEGRATION
	kbdev->hwcnt.prev_policy = policy_list[2];
#endif

	return MALI_ERROR_NONE;
}
Example #29
0
/**
 * fscrypt_init() - Set up for fs encryption.
 */
static int __init fscrypt_init(void)
{
	fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
							WQ_HIGHPRI, 0);
	if (!fscrypt_read_workqueue)
		goto fail;

	fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_ctx_cachep)
		goto fail_free_queue;

	fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
	if (!fscrypt_info_cachep)
		goto fail_free_ctx;

	return 0;

fail_free_ctx:
	kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
	destroy_workqueue(fscrypt_read_workqueue);
fail:
	return -ENOMEM;
}
static int __init acpi_thermal_init(void)
{
	int result = 0;

	dmi_check_system(thermal_dmi_table);

	if (off) {
		pr_notice(PREFIX "thermal control disabled\n");
		return -ENODEV;
	}

	acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
						WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
	if (!acpi_thermal_pm_queue)
		return -ENODEV;

	result = acpi_bus_register_driver(&acpi_thermal_driver);
	if (result < 0) {
		destroy_workqueue(acpi_thermal_pm_queue);
		return -ENODEV;
	}

	return 0;
}