static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
{
	struct irq_desc *desc;
	gfp_t gfp = GFP_KERNEL;

	desc = kzalloc_node(sizeof(*desc), gfp, node);
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
	desc->kstat_irqs = alloc_percpu(unsigned int);
	if (!desc->kstat_irqs)
		goto err_desc;

	if (alloc_masks(desc, gfp, node))
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);

	desc_set_defaults(irq, desc, node, owner);

	return desc;

err_kstat:
	free_percpu(desc->kstat_irqs);
err_desc:
	kfree(desc);
	return NULL;
}
Esempio n. 2
0
static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
				   const struct cpumask *affinity,
				   struct module *owner)
{
	struct irq_desc *desc;

	desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
	if (!desc)
		return NULL;
	/* allocate based on nr_cpu_ids */
	desc->kstat_irqs = alloc_percpu(unsigned int);
	if (!desc->kstat_irqs)
		goto err_desc;

	if (alloc_masks(desc, node))
		goto err_kstat;

	raw_spin_lock_init(&desc->lock);
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
	mutex_init(&desc->request_mutex);
	init_rcu_head(&desc->rcu);

	desc_set_defaults(irq, desc, node, affinity, owner);
	irqd_set(&desc->irq_data, flags);
	kobject_init(&desc->kobj, &irq_kobj_type);

	return desc;

err_kstat:
	free_percpu(desc->kstat_irqs);
err_desc:
	kfree(desc);
	return NULL;
}
Esempio n. 3
0
int __init early_irq_init(void)
{
	int count, i, node = first_online_node;
	struct irq_desc *desc;

	init_irq_default_affinity();

#ifdef CONFIG_DEBUG_PRINTK
	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
#else
	;
#endif

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
		alloc_masks(&desc[i], GFP_KERNEL, node);
		raw_spin_lock_init(&desc[i].lock);
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
		desc_set_defaults(i, &desc[i], node);
	}
	return arch_early_irq_init();
}
/**
 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
 * @irq:	irq number to initialize
 */
void dynamic_irq_cleanup(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	raw_spin_lock_irqsave(&desc->lock, flags);
	desc_set_defaults(irq, desc, desc_node(desc), NULL);
	raw_spin_unlock_irqrestore(&desc->lock, flags);
}
Esempio n. 5
0
static void free_desc(unsigned int irq)
{
	struct irq_desc *desc = irq_to_desc(irq);
	unsigned long flags;

	raw_spin_lock_irqsave(&desc->lock, flags);
	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
	raw_spin_unlock_irqrestore(&desc->lock, flags);
}
Esempio n. 6
0
int __init early_irq_init(void)
{
	int count, i, node = first_online_node;
	struct irq_desc *desc;

	init_irq_default_affinity();

	printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
		alloc_masks(&desc[i], node);
		raw_spin_lock_init(&desc[i].lock);
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
		mutex_init(&desc[i].request_mutex);
		desc_set_defaults(i, &desc[i], node, NULL, NULL);
	}
	return arch_early_irq_init();
}
Esempio n. 7
0
// ARM10C 20141004
// i: 0, node: 0, null
// ARM10C 20141115
// 16, node: 0, owner: NULL
// ARM10C 20141115
// 64, node: 0, owner: NULL
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
{
	struct irq_desc *desc;
	// GFP_KERNEL: 0xD0
	gfp_t gfp = GFP_KERNEL;
	// gfp: GFP_KERNEL: 0xD0

	// sizeof(struct irq_desc): 156 bytes, gfp: GFP_KERNEL: 0xD0, node: 0
	// kzalloc_node(156, GFP_KERNEL: 0xD0, 0): kmem_cache#28-o0
	desc = kzalloc_node(sizeof(*desc), gfp, node);
	// desc: kmem_cache#28-o0

	// desc: kmem_cache#28-o0
	if (!desc)
		return NULL;

	/* allocate based on nr_cpu_ids */
	// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs
	// alloc_percpu(unsigned int): pcp 4 byte 공간 할당
	desc->kstat_irqs = alloc_percpu(unsigned int);
	// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs: pcp 4 byte 공간

	// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs: pcp 4 byte 공간
	if (!desc->kstat_irqs)
		goto err_desc;

	// desc: kmem_cache#28-o0, gfp: GFP_KERNEL: 0xD0, node: 0
	// alloc_masks(kmem_cache#28-o0, GFP_KERNEL: 0xD0, 0): 0
	if (alloc_masks(desc, gfp, node))
		goto err_kstat;
	// alloc_masks에서 한일:
	// (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0

	// desc->lock: (kmem_cache#28-o0)->lock
	raw_spin_lock_init(&desc->lock);
	// desc->lock: (kmem_cache#28-o0)->lock 을 이용한 spinlock 초기화 수행

	// desc->lock: (kmem_cache#28-o0)->lock
	lockdep_set_class(&desc->lock, &irq_desc_lock_class); // null function

	// irq: 0, desc: kmem_cache#28-o0, node: 0, owner: null
	desc_set_defaults(irq, desc, node, owner);

	// desc_set_defaults에서 한일:
	// (kmem_cache#28-o0)->irq_data.irq: 0
	// (kmem_cache#28-o0)->irq_data.chip: &no_irq_chip
	// (kmem_cache#28-o0)->irq_data.chip_data: NULL
	// (kmem_cache#28-o0)->irq_data.handler_data: NULL
	// (kmem_cache#28-o0)->irq_data.msi_desc: NULL
	// (kmem_cache#28-o0)->status_use_accessors: 0xc00
	// (&(kmem_cache#28-o0)->irq_data)->state_use_accessors: 0x10000
	// (kmem_cache#28-o0)->handle_irq: handle_bad_irq
	// (kmem_cache#28-o0)->depth: 1
	// (kmem_cache#28-o0)->irq_count: 0
	// (kmem_cache#28-o0)->irqs_unhandled: 0
	// (kmem_cache#28-o0)->name: NULL
	// (kmem_cache#28-o0)->owner: null
	// [pcp0...3] (kmem_cache#28-o0)->kstat_irqs: 0
	// (kmem_cache#28-o0)->irq_data.node: 0
	// (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0xF

	return desc;

err_kstat:
	free_percpu(desc->kstat_irqs);
err_desc:
	kfree(desc);
	return NULL;
}