Пример #1
0
static void __init gic_pm_init(struct gic_chip_data *gic)
{
	gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
		sizeof(u32));
	BUG_ON(!gic->saved_ppi_enable);

	gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
		sizeof(u32));
	BUG_ON(!gic->saved_ppi_conf);

	cpu_pm_register_notifier(&gic_notifier_block);
}
Пример #2
0
void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
{
	void __percpu *p;
	int i;

	/* older kernel do not allow all GFP flags, specifically atomic
	 * allocation.
	 */
	if (gfp & ~(GFP_KERNEL | __GFP_ZERO))
		return NULL;
	p = __alloc_percpu(size, align);
	if (!p)
		return p;

	if (!(gfp & __GFP_ZERO))
		return p;

	for_each_possible_cpu(i) {
		void *d;

		d = per_cpu_ptr(p, i);
		memset(d, 0, size);
	}
	return p;
}
Пример #3
0
/*
 * Mempool for percpu memory.
 */
static void *percpu_mempool_alloc_fn(gfp_t gfp_mask, void *data)
{
        struct percpu_mempool *pcpu_pool = data;
        void __percpu *p;

        /*
         * Percpu allocator doesn't do NOIO.  This makes percpu mempool
         * always try reserved elements first, which isn't such a bad idea
         * given that percpu allocator is pretty heavy and percpu areas are
         * expensive.
         */
        if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
                return NULL;

        p = __alloc_percpu(pcpu_pool->size, pcpu_pool->align);
        return (void __kernel __force *)p;
}
Пример #4
0
/**
 * percpu_ida_init - initialize a percpu tag pool
 * @pool: pool to initialize
 * @nr_tags: number of tags that will be available for allocation
 *
 * Initializes @pool so that it can be used to allocate tags - integers in the
 * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
 * preallocated array of tag structures.
 *
 * Allocation is percpu, but sharding is limited by nr_tags - for best
 * performance, the workload should not span more cpus than nr_tags / 128.
 */
int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
	unsigned long max_size, unsigned long batch_size)
{
	unsigned i, cpu, order;

	memset(pool, 0, sizeof(*pool));

	init_waitqueue_head(&pool->wait);
	spin_lock_init(&pool->lock);
	pool->nr_tags = nr_tags;
	pool->percpu_max_size = max_size;
	pool->percpu_batch_size = batch_size;

	/* Guard against overflow */
	if (nr_tags > (unsigned) INT_MAX + 1) {
		pr_err("percpu_ida_init(): nr_tags too large\n");
		return -EINVAL;
	}

	order = get_order(nr_tags * sizeof(unsigned));
	pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
	if (!pool->freelist)
		return -ENOMEM;

	for (i = 0; i < nr_tags; i++)
		pool->freelist[i] = i;

	pool->nr_free = nr_tags;

	pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
				       pool->percpu_max_size * sizeof(unsigned),
				       sizeof(unsigned));
	if (!pool->tag_cpu)
		goto err;

	for_each_possible_cpu(cpu)
		spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);

	return 0;
err:
	percpu_ida_destroy(pool);
	return -ENOMEM;
}
Пример #5
0
static void __init sve_efi_setup(void)
{
	if (!IS_ENABLED(CONFIG_EFI))
		return;

	/*
	 * alloc_percpu() warns and prints a backtrace if this goes wrong.
	 * This is evidence of a crippled system and we are returning void,
	 * so no attempt is made to handle this situation here.
	 */
	if (!sve_vl_valid(sve_max_vl))
		goto fail;

	efi_sve_state = __alloc_percpu(
		SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
	if (!efi_sve_state)
		goto fail;

	return;

fail:
	panic("Cannot allocate percpu memory for EFI SVE save/restore");
}
Пример #6
0
static int
pcpu_alloc_test(void)
{
	int rv = 0;
#ifndef CONFIG_NEED_PER_CPU_KM
	void __percpu **pcpu;
	size_t size, align;
	int i;

	pcpu = vmalloc(sizeof(void __percpu *) * 35000);
	if (!pcpu)
		return -1;

	for (i = 0; i < 35000; i++) {
		unsigned int r;

		get_random_bytes(&r, sizeof(i));
		size = (r % (PAGE_SIZE / 4)) + 1;

		/*
		 * Maximum PAGE_SIZE
		 */
		get_random_bytes(&r, sizeof(i));
		align = 1 << ((i % 11) + 1);

		pcpu[i] = __alloc_percpu(size, align);
		if (!pcpu[i])
			rv = -1;
	}

	for (i = 0; i < 35000; i++)
		free_percpu(pcpu[i]);

	vfree(pcpu);
#endif
	return rv;
}
static int __init xen_guest_init(void)
{
	struct xen_add_to_physmap xatp;
	static struct shared_info *shared_info_page = 0;
	struct device_node *node;
	int len;
	const char *s = NULL;
	const char *version = NULL;
	const char *xen_prefix = "xen,xen-";
	struct resource res;

	node = of_find_compatible_node(NULL, NULL, "xen,xen");
	if (!node) {
		pr_debug("No Xen support\n");
		return 0;
	}
	s = of_get_property(node, "compatible", &len);
	if (strlen(xen_prefix) + 3  < len &&
			!strncmp(xen_prefix, s, strlen(xen_prefix)))
		version = s + strlen(xen_prefix);
	if (version == NULL) {
		pr_debug("Xen version not found\n");
		return 0;
	}
	if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
		return 0;
	xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
	xen_events_irq = irq_of_parse_and_map(node, 0);
	pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
			version, xen_events_irq, xen_hvm_resume_frames);
	xen_domain_type = XEN_HVM_DOMAIN;

	xen_setup_features();
	if (xen_feature(XENFEAT_dom0))
		xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
	else
		xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);

	if (!shared_info_page)
		shared_info_page = (struct shared_info *)
			get_zeroed_page(GFP_KERNEL);
	if (!shared_info_page) {
		pr_err("not enough memory\n");
		return -ENOMEM;
	}
	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
		BUG();

	HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;

	/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
	 * page, we use it in the event channel upcall and in some pvclock
	 * related functions. 
	 * The shared info contains exactly 1 CPU (the boot CPU). The guest
	 * is required to use VCPUOP_register_vcpu_info to place vcpu info
	 * for secondary CPUs as they are brought up.
	 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
	 */
	xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
			                       sizeof(struct vcpu_info));
	if (xen_vcpu_info == NULL)
		return -ENOMEM;

	gnttab_init();
	if (!xen_initial_domain())
		xenbus_probe(NULL);

	return 0;
}
Пример #8
0
struct net_device *batadv_softif_create(const char *name)
{
	struct net_device *soft_iface;
	struct batadv_priv *bat_priv;
	int ret;
	size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;

	soft_iface = alloc_netdev(sizeof(*bat_priv), name,
				  batadv_interface_setup);

	if (!soft_iface)
		goto out;

	bat_priv = netdev_priv(soft_iface);
	bat_priv->soft_iface = soft_iface;
	INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);

	/* batadv_interface_stats() needs to be available as soon as
	 * register_netdevice() has been called
	 */
	bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
	if (!bat_priv->bat_counters)
		goto free_soft_iface;

	ret = register_netdevice(soft_iface);
	if (ret < 0) {
		pr_err("Unable to register the batman interface '%s': %i\n",
		       name, ret);
		goto free_bat_counters;
	}

	atomic_set(&bat_priv->aggregated_ogms, 1);
	atomic_set(&bat_priv->bonding, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
	atomic_set(&bat_priv->bridge_loop_avoidance, 0);
#endif
#ifdef CONFIG_BATMAN_ADV_DAT
	atomic_set(&bat_priv->distributed_arp_table, 1);
#endif
	atomic_set(&bat_priv->ap_isolation, 0);
	atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
	atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
	atomic_set(&bat_priv->gw_sel_class, 20);
	atomic_set(&bat_priv->gw_bandwidth, 41);
	atomic_set(&bat_priv->orig_interval, 1000);
	atomic_set(&bat_priv->hop_penalty, 30);
#ifdef CONFIG_BATMAN_ADV_DEBUG
	atomic_set(&bat_priv->log_level, 0);
#endif
	atomic_set(&bat_priv->fragmentation, 1);
	atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);

	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
	atomic_set(&bat_priv->bcast_seqno, 1);
	atomic_set(&bat_priv->tt.vn, 0);
	atomic_set(&bat_priv->tt.local_changes, 0);
	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
#ifdef CONFIG_BATMAN_ADV_BLA
	atomic_set(&bat_priv->bla.num_requests, 0);
#endif
	bat_priv->tt.last_changeset = NULL;
	bat_priv->tt.last_changeset_len = 0;

	bat_priv->primary_if = NULL;
	bat_priv->num_ifaces = 0;

	ret = batadv_algo_select(bat_priv, batadv_routing_algo);
	if (ret < 0)
		goto unreg_soft_iface;

	ret = batadv_sysfs_add_meshif(soft_iface);
	if (ret < 0)
		goto unreg_soft_iface;

	ret = batadv_debugfs_add_meshif(soft_iface);
	if (ret < 0)
		goto unreg_sysfs;

	ret = batadv_mesh_init(soft_iface);
	if (ret < 0)
		goto unreg_debugfs;

	return soft_iface;

unreg_debugfs:
	batadv_debugfs_del_meshif(soft_iface);
unreg_sysfs:
	batadv_sysfs_del_meshif(soft_iface);
unreg_soft_iface:
	free_percpu(bat_priv->bat_counters);
	unregister_netdevice(soft_iface);
	return NULL;

free_bat_counters:
	free_percpu(bat_priv->bat_counters);
free_soft_iface:
	free_netdev(soft_iface);
out:
	return NULL;
}
Пример #9
0
/*
 * Setup everything required to start tracing
 */
int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
			struct blk_user_trace_setup *buts)
{
	struct blk_trace *old_bt, *bt = NULL;
	struct dentry *dir = NULL;
	int ret, i;

	if (!buts->buf_size || !buts->buf_nr)
		return -EINVAL;

	strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
	buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';

	/*
	 * some device names have larger paths - convert the slashes
	 * to underscores for this to work as expected
	 */
	for (i = 0; i < strlen(buts->name); i++)
		if (buts->name[i] == '/')
			buts->name[i] = '_';

	ret = -ENOMEM;
	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		goto err;

	bt->sequence = alloc_percpu(unsigned long);
	if (!bt->sequence)
		goto err;

	bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
	if (!bt->msg_data)
		goto err;

	ret = -ENOENT;

	if (!blk_tree_root) {
		blk_tree_root = debugfs_create_dir("block", NULL);
		if (!blk_tree_root)
			return -ENOMEM;
	}

	dir = debugfs_create_dir(buts->name, blk_tree_root);

	if (!dir)
		goto err;

	bt->dir = dir;
	bt->dev = dev;
	atomic_set(&bt->dropped, 0);

	ret = -EIO;
	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
	if (!bt->dropped_file)
		goto err;

	bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
	if (!bt->msg_file)
		goto err;

	bt->rchan = relay_open("trace", dir, buts->buf_size,
				buts->buf_nr, &blk_relay_callbacks, bt);
	if (!bt->rchan)
		goto err;

	bt->act_mask = buts->act_mask;
	if (!bt->act_mask)
		bt->act_mask = (u16) -1;

	bt->start_lba = buts->start_lba;
	bt->end_lba = buts->end_lba;
	if (!bt->end_lba)
		bt->end_lba = -1ULL;

	bt->pid = buts->pid;
	bt->trace_state = Blktrace_setup;

	mutex_lock(&blk_probe_mutex);
	if (atomic_add_return(1, &blk_probes_ref) == 1) {
		ret = blk_register_tracepoints();
		if (ret)
			goto probe_err;
	}
	mutex_unlock(&blk_probe_mutex);

	ret = -EBUSY;
	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt) {
		(void) xchg(&q->blk_trace, old_bt);
		goto err;
	}

	return 0;
probe_err:
	atomic_dec(&blk_probes_ref);
	mutex_unlock(&blk_probe_mutex);
err:
	if (bt) {
		if (bt->msg_file)
			debugfs_remove(bt->msg_file);
		if (bt->dropped_file)
			debugfs_remove(bt->dropped_file);
		free_percpu(bt->sequence);
		free_percpu(bt->msg_data);
		if (bt->rchan)
			relay_close(bt->rchan);
		kfree(bt);
	}
	return ret;
}
Пример #10
0
/* kernel commit f2a8205c */
void foo (void) {
   (void) __alloc_percpu(sizeof(int), 8);
}