void __init call_function_init(void) { int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); smpcfd_prepare_cpu(smp_processor_id()); }
static int __init null_init(void) { unsigned int i; #if !defined(CONFIG_SMP) if (irqmode == NULL_IRQ_SOFTIRQ) { pr_warn("null_blk: softirq completions not available.\n"); pr_warn("null_blk: using direct completions.\n"); irqmode = NULL_IRQ_NONE; } #endif if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", nr_online_nodes); submit_queues = nr_online_nodes; } } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; mutex_init(&lock); /* Initialize a separate list for each CPU for issuing softirqs */ for_each_possible_cpu(i) { struct completion_queue *cq = &per_cpu(completion_queues, i); init_llist_head(&cq->list); if (irqmode != NULL_IRQ_TIMER) continue; hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cq->timer.function = null_cmd_timer_expired; } null_major = register_blkdev(0, "nullb"); if (null_major < 0) return null_major; for (i = 0; i < nr_devices; i++) { if (null_add_dev()) { unregister_blkdev(null_major, "nullb"); return -EINVAL; } } pr_info("null: module loaded\n"); return 0; }
struct r92su *r92su_alloc(struct device *main_dev) { struct r92su *r92su = NULL; struct wiphy *wiphy; int err; wiphy = wiphy_new(&r92su_cfg80211_ops, sizeof(struct r92su)); if (!wiphy) { err = -ENOMEM; goto err_out; } r92su = wiphy_priv(wiphy); r92su->wdev.wiphy = wiphy; mutex_init(&r92su->lock); spin_lock_init(&r92su->rx_path); if (modparam_noht) r92su->disable_ht = true; INIT_LIST_HEAD(&r92su->sta_list); /* Note: The sta_lock is only needed, if an entry in the * station list is updated. The station data itself is * protected by RCU. */ spin_lock_init(&r92su->sta_lock); set_wiphy_dev(r92su->wdev.wiphy, main_dev); r92su->wdev.iftype = NL80211_IFTYPE_STATION; wiphy->privid = r92su_priv_id; wiphy->mgmt_stypes = r92su_default_mgmt_stypes; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR); wiphy->max_scan_ssids = 1; wiphy->max_scan_ie_len = 256; wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = r92su_chiper_suites; wiphy->n_cipher_suites = ARRAY_SIZE(r92su_chiper_suites); wiphy->bss_priv_size = sizeof(struct r92su_bss_priv); init_completion(&r92su->scan_done); init_llist_head(&r92su->add_bss_list); INIT_WORK(&r92su->add_bss_work, r92su_bss_add_work); INIT_WORK(&r92su->connect_bss_work, r92su_bss_connect_work); INIT_WORK(&r92su->disconnect_work, r92su_disconnect_work); INIT_DELAYED_WORK(&r92su->survey_done_work, r92su_survey_done_work); r92su_hw_init(r92su); r92su->wq = create_singlethread_workqueue(R92SU_DRVNAME); if (!r92su->wq) { err = -ENOMEM; goto err_out; } return r92su; err_out: r92su_unregister(r92su); r92su_free(r92su); return ERR_PTR(err); }
/*---------------------------------------------------------------------------*/ void *xio_ev_loop_init(unsigned long flags, struct xio_context *ctx, struct xio_loop_ops *loop_ops) { struct xio_ev_loop *loop; char queue_name[64]; loop = kzalloc(sizeof(struct xio_ev_loop), GFP_KERNEL); if (loop == NULL) { xio_set_error(ENOMEM); ERROR_LOG("kmalloc failed. %m\n"); goto cleanup0; } set_bit(XIO_EV_LOOP_STOP, &loop->states); init_llist_head(&loop->ev_llist); /* use default implementation */ loop->run = priv_ev_loop_run; loop->stop = priv_ev_loop_stop; loop->loop_object = loop; switch (flags) { case XIO_LOOP_USER_LOOP: /* override with user provided routines and object */ loop->run = loop_ops->run; loop->stop = loop_ops->stop; loop->add_event = loop_ops->add_event; loop->loop_object = loop_ops->ev_loop; break; case XIO_LOOP_GIVEN_THREAD: loop->add_event = priv_ev_add_thread; init_waitqueue_head(&loop->wait); break; case XIO_LOOP_TASKLET: loop->add_event = priv_ev_add_tasklet; tasklet_init(&loop->tasklet, priv_ev_loop_run_tasklet, (unsigned long)loop); break; case XIO_LOOP_WORKQUEUE: /* temp (also change to single thread) */ sprintf(queue_name, "xio-%p", loop); /* check flags and bw comp */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) loop->workqueue = create_workqueue(queue_name); #else loop->workqueue = alloc_workqueue(queue_name, WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); #endif if (!loop->workqueue) { ERROR_LOG("workqueue create failed.\n"); goto cleanup1; } loop->add_event = priv_ev_add_workqueue; break; default: ERROR_LOG("wrong type. %lu\n", flags); goto cleanup1; } loop->flags = flags; loop->ctx = ctx; return loop; cleanup1: clear_bit(XIO_EV_LOOP_STOP, &loop->states); kfree(loop); cleanup0: ERROR_LOG("event loop creation failed.\n"); return NULL; }
static int __init null_init(void) { unsigned int i; if (bs > PAGE_SIZE) { pr_warn("null_blk: invalid block size\n"); pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); bs = PAGE_SIZE; } if (use_lightnvm && bs != 4096) { pr_warn("null_blk: LightNVM only supports 4k block size\n"); pr_warn("null_blk: defaults block size to 4k\n"); bs = 4096; } if (use_lightnvm && queue_mode != NULL_Q_MQ) { pr_warn("null_blk: LightNVM only supported for blk-mq\n"); pr_warn("null_blk: defaults queue mode to blk-mq\n"); queue_mode = NULL_Q_MQ; } if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", nr_online_nodes); submit_queues = nr_online_nodes; } } else if (submit_queues > nr_cpu_ids) submit_queues = nr_cpu_ids; else if (!submit_queues) submit_queues = 1; mutex_init(&lock); /* Initialize a separate list for each CPU for issuing softirqs */ for_each_possible_cpu(i) { struct completion_queue *cq = &per_cpu(completion_queues, i); init_llist_head(&cq->list); if (irqmode != NULL_IRQ_TIMER) continue; hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cq->timer.function = null_cmd_timer_expired; } null_major = register_blkdev(0, "nullb"); if (null_major < 0) return null_major; if (use_lightnvm) { ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), 0, 0, NULL); if (!ppa_cache) { pr_err("null_blk: unable to create ppa cache\n"); return -ENOMEM; } } for (i = 0; i < nr_devices; i++) { if (null_add_dev()) { unregister_blkdev(null_major, "nullb"); goto err_ppa; } } pr_info("null: module loaded\n"); return 0; err_ppa: kmem_cache_destroy(ppa_cache); return -EINVAL; }