/*
 * initialise the semaphore
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->activity = 0;
#ifdef CONFIG_BRCM_DEBUG_RWSEM
	sem->wr_owner = NULL;
#endif
	raw_spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
}
Example #2
0
File: em_sti.c Project: 7799/linux
static int em_sti_probe(struct platform_device *pdev)
{
	struct em_sti_priv *p;
	struct resource *res;
	int irq;

	p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
	if (p == NULL) {
		dev_err(&pdev->dev, "failed to allocate driver data\n");
		return -ENOMEM;
	}

	p->pdev = pdev;
	platform_set_drvdata(pdev, p);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get irq\n");
		return -EINVAL;
	}

	/* map memory, let base point to the STI instance */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	p->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(p->base))
		return PTR_ERR(p->base);

	/* get hold of clock */
	p->clk = devm_clk_get(&pdev->dev, "sclk");
	if (IS_ERR(p->clk)) {
		dev_err(&pdev->dev, "cannot get clock\n");
		return PTR_ERR(p->clk);
	}

	if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt,
			     IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
			     dev_name(&pdev->dev), p)) {
		dev_err(&pdev->dev, "failed to request low IRQ\n");
		return -ENOENT;
	}

	raw_spin_lock_init(&p->lock);
	em_sti_register_clockevent(p);
	em_sti_register_clocksource(p);
	return 0;
}
Example #3
0
/*
 * Initialize an rwsem:
 */
void __init_rwsem(struct rw_semaphore *sem, const char *name,
		  struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	/*
	 * Make sure we are not reinitializing a held semaphore:
	 */
	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
	lockdep_init_map(&sem->dep_map, name, key, 0);
#endif
	sem->count = RWSEM_UNLOCKED_VALUE;
	raw_spin_lock_init(&sem->wait_lock);
	INIT_LIST_HEAD(&sem->wait_list);
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
	sem->owner = NULL;
	osq_lock_init(&sem->osq);
#endif
}
Example #4
0
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int cpu;
	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
		events->events = per_cpu(hw_events, cpu);
		events->used_mask = per_cpu(used_mask, cpu);
		raw_spin_lock_init(&events->pmu_lock);
	}

	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
	cpu_pmu->request_irq	= cpu_pmu_request_irq;
	cpu_pmu->free_irq	= cpu_pmu_free_irq;

	/* Ensure the PMU has sane values out of reset. */
	if (cpu_pmu->reset)
		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
}
Example #5
0
/*
 * Functions related to boot-time initialization:
 */
static void __cpuinit init_hrtimers_cpu(int cpu)
{
	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
	int *lock_init = &per_cpu(hrtimer_base_lock_init, cpu);
	int i;

	if ((*lock_init) != cpu) {
		*lock_init = cpu;
		raw_spin_lock_init(&cpu_base->lock);
		pr_info("hrtimer base lock initialized for cpu%d\n", cpu);
	}

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		cpu_base->clock_base[i].cpu_base = cpu_base;
		timerqueue_init_head(&cpu_base->clock_base[i].active);
	}

	hrtimer_init_hres(cpu_base);
}
Example #6
0
/*
 * Initialize SRCU combining tree.  Note that statically allocated
 * srcu_struct structures might already have srcu_read_lock() and
 * srcu_read_unlock() running against them.  So if the is_static parameter
 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
 */
static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
{
	int cpu;
	int i;
	int level = 0;
	int levelspread[RCU_NUM_LVLS];
	struct srcu_data *sdp;
	struct srcu_node *snp;
	struct srcu_node *snp_first;

	/* Work out the overall tree geometry. */
	sp->level[0] = &sp->node[0];
	for (i = 1; i < rcu_num_lvls; i++)
		sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
	rcu_init_levelspread(levelspread, num_rcu_lvl);

	/* Each pass through this loop initializes one srcu_node structure. */
	rcu_for_each_node_breadth_first(sp, snp) {
		raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
			     ARRAY_SIZE(snp->srcu_data_have_cbs));
		for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
			snp->srcu_have_cbs[i] = 0;
			snp->srcu_data_have_cbs[i] = 0;
		}
		snp->srcu_gp_seq_needed_exp = 0;
		snp->grplo = -1;
		snp->grphi = -1;
		if (snp == &sp->node[0]) {
			/* Root node, special case. */
			snp->srcu_parent = NULL;
			continue;
		}

		/* Non-root node. */
		if (snp == sp->level[level + 1])
			level++;
		snp->srcu_parent = sp->level[level - 1] +
				   (snp - sp->level[level]) /
				   levelspread[level - 1];
	}
int __init early_irq_init(void)
{
	int count, i, node = first_online_node;
	struct irq_desc *desc;

	init_irq_default_affinity();

	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);

	desc = irq_desc;
	count = ARRAY_SIZE(irq_desc);

	for (i = 0; i < count; i++) {
		desc[i].kstat_irqs = alloc_percpu(unsigned int);
		alloc_masks(&desc[i], GFP_KERNEL, node);
		raw_spin_lock_init(&desc[i].lock);
		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
		desc_set_defaults(i, &desc[i], node, NULL);
	}
	return arch_early_irq_init();
}
Example #8
0
static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
{
	memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));

	raw_spin_lock_init(&desc->lock);
	desc->irq = irq;
#ifdef CONFIG_SMP
	desc->node = node;
#endif
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
	init_kstat_irqs(desc, node, nr_cpu_ids);
	if (!desc->kstat_irqs) {
		printk(KERN_ERR "can not alloc kstat_irqs\n");
		BUG_ON(1);
	}
	if (!alloc_desc_masks(desc, node, false)) {
		printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
		BUG_ON(1);
	}
	init_desc_masks(desc);
	arch_init_chip_data(desc, node);
}
Example #9
0
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
    int err;
    int cpu;
    struct pmu_hw_events __percpu *cpu_hw_events;

    cpu_hw_events = alloc_percpu(struct pmu_hw_events);
    if (!cpu_hw_events)
        return -ENOMEM;

    cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
    err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
    if (err)
        goto out_hw_events;

    for_each_possible_cpu(cpu) {
        struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
        raw_spin_lock_init(&events->pmu_lock);
        events->percpu_pmu = cpu_pmu;
    }

    cpu_pmu->hw_events	= cpu_hw_events;
    cpu_pmu->request_irq	= cpu_pmu_request_irq;
    cpu_pmu->free_irq	= cpu_pmu_free_irq;

    /* Ensure the PMU has sane values out of reset. */
    if (cpu_pmu->reset)
        on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);

    /* If no interrupts available, set the corresponding capability flag */
    if (!platform_get_irq(cpu_pmu->plat_device, 0))
        cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;

    return 0;

out_hw_events:
    free_percpu(cpu_hw_events);
    return err;
}
Example #10
0
static int init_rootdomain(struct root_domain *rd)
{
	if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
		goto out;
	if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
		goto free_span;
	if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
		goto free_online;
	if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
		goto free_dlo_mask;

#ifdef HAVE_RT_PUSH_IPI
	rd->rto_cpu = -1;
	raw_spin_lock_init(&rd->rto_lock);
	init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
#endif

	init_dl_bw(&rd->dl_bw);
	if (cpudl_init(&rd->cpudl) != 0)
		goto free_rto_mask;

	if (cpupri_init(&rd->cpupri) != 0)
		goto free_cpudl;
	return 0;

free_cpudl:
	cpudl_cleanup(&rd->cpudl);
free_rto_mask:
	free_cpumask_var(rd->rto_mask);
free_dlo_mask:
	free_cpumask_var(rd->dlo_mask);
free_online:
	free_cpumask_var(rd->online);
free_span:
	free_cpumask_var(rd->span);
out:
	return -ENOMEM;
}
Example #11
0
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
	int cpu;
	for_each_possible_cpu(cpu) {
		struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
		events->events = per_cpu(hw_events, cpu);
		events->used_mask = per_cpu(used_mask, cpu);
		raw_spin_lock_init(&events->pmu_lock);
		per_cpu(percpu_pmu, cpu) = cpu_pmu;
	}

	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
	cpu_pmu->request_irq	= cpu_pmu_request_irq;
	cpu_pmu->free_irq	= cpu_pmu_free_irq;

	/* Ensure the PMU has sane values out of reset. */
	if (cpu_pmu->reset)
		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);

	/* If no interrupts available, set the corresponding capability flag */
	if (!platform_get_irq(cpu_pmu->plat_device, 0))
		cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
}
/*
 * Functions related to boot-time initialization:
 */
static void __cpuinit init_hrtimers_cpu(int cpu)
{
	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
	static char __cpuinitdata cpu_base_done[NR_CPUS];
	int i;
	unsigned long flags;

	if (!cpu_base_done[cpu]) {
		raw_spin_lock_init(&cpu_base->lock);
		cpu_base_done[cpu] = 1;
	}

	raw_spin_lock_irqsave(&cpu_base->lock, flags);

	for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
		cpu_base->clock_base[i].cpu_base = cpu_base;
		timerqueue_init_head(&cpu_base->clock_base[i].active);
	}

	hrtimer_init_hres(cpu_base);

	raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}
Example #13
0
/*
 * cpudl_init - initialize the cpudl structure
 * @cp: the cpudl max-heap context
 */
int cpudl_init(struct cpudl *cp)
{
	int i;

	memset(cp, 0, sizeof(*cp));
	raw_spin_lock_init(&cp->lock);
	cp->size = 0;

	cp->elements = kcalloc(nr_cpu_ids,
			       sizeof(struct cpudl_item),
			       GFP_KERNEL);
	if (!cp->elements)
		return -ENOMEM;

	if (!zalloc_cpumask_var(&cp->free_cpus, GFP_KERNEL)) {
		kfree(cp->elements);
		return -ENOMEM;
	}

	for_each_possible_cpu(i)
		cp->elements[i].idx = IDX_INVALID;

	return 0;
}
Example #14
0
File: sh_cmt.c Project: 7799/linux
static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
{
	struct sh_timer_config *cfg = pdev->dev.platform_data;
	struct resource *res, *res2;
	int irq, ret;
	ret = -ENXIO;

	memset(p, 0, sizeof(*p));
	p->pdev = pdev;

	if (!cfg) {
		dev_err(&p->pdev->dev, "missing platform data\n");
		goto err0;
	}

	res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&p->pdev->dev, "failed to get I/O memory\n");
		goto err0;
	}

	/* optional resource for the shared timer start/stop register */
	res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);

	irq = platform_get_irq(p->pdev, 0);
	if (irq < 0) {
		dev_err(&p->pdev->dev, "failed to get irq\n");
		goto err0;
	}

	/* map memory, let mapbase point to our channel */
	p->mapbase = ioremap_nocache(res->start, resource_size(res));
	if (p->mapbase == NULL) {
		dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
		goto err0;
	}

	/* map second resource for CMSTR */
	p->mapbase_str = ioremap_nocache(res2 ? res2->start :
					 res->start - cfg->channel_offset,
					 res2 ? resource_size(res2) : 2);
	if (p->mapbase_str == NULL) {
		dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
		goto err1;
	}

	/* request irq using setup_irq() (too early for request_irq()) */
	p->irqaction.name = dev_name(&p->pdev->dev);
	p->irqaction.handler = sh_cmt_interrupt;
	p->irqaction.dev_id = p;
	p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;

	/* get hold of clock */
	p->clk = clk_get(&p->pdev->dev, "cmt_fck");
	if (IS_ERR(p->clk)) {
		dev_err(&p->pdev->dev, "cannot get clock\n");
		ret = PTR_ERR(p->clk);
		goto err2;
	}

	ret = clk_prepare(p->clk);
	if (ret < 0)
		goto err3;

	if (res2 && (resource_size(res2) == 4)) {
		/* assume both CMSTR and CMCSR to be 32-bit */
		p->read_control = sh_cmt_read32;
		p->write_control = sh_cmt_write32;
	} else {
		p->read_control = sh_cmt_read16;
		p->write_control = sh_cmt_write16;
	}

	if (resource_size(res) == 6) {
		p->width = 16;
		p->read_count = sh_cmt_read16;
		p->write_count = sh_cmt_write16;
		p->overflow_bit = 0x80;
		p->clear_bits = ~0x80;
	} else {
		p->width = 32;
		p->read_count = sh_cmt_read32;
		p->write_count = sh_cmt_write32;
		p->overflow_bit = 0x8000;
		p->clear_bits = ~0xc000;
	}

	if (p->width == (sizeof(p->max_match_value) * 8))
		p->max_match_value = ~0;
	else
		p->max_match_value = (1 << p->width) - 1;

	p->match_value = p->max_match_value;
	raw_spin_lock_init(&p->lock);

	ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
			      cfg->clockevent_rating,
			      cfg->clocksource_rating);
	if (ret) {
		dev_err(&p->pdev->dev, "registration failed\n");
		goto err4;
	}
	p->cs_enabled = false;

	ret = setup_irq(irq, &p->irqaction);
	if (ret) {
		dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
		goto err4;
	}

	platform_set_drvdata(pdev, p);

	return 0;
err4:
	clk_unprepare(p->clk);
err3:
	clk_put(p->clk);
err2:
	iounmap(p->mapbase_str);
err1:
	iounmap(p->mapbase);
err0:
	return ret;
}
Example #15
0
static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
{
	struct megamod_pic *pic;
	int i, irq;
	int mapping[NR_MUX_OUTPUTS];

	pr_info("Initializing C64x+ Megamodule PIC\n");

	pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
	if (!pic) {
		pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
		return NULL;
	}

	pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR,
				      NR_COMBINERS * 32, &megamod_host_ops,
				      IRQ_UNMAPPED);
	if (!pic->irqhost) {
		pr_err("%s: Could not alloc host.\n", np->full_name);
		goto error_free;
	}

	pic->irqhost->host_data = pic;

	raw_spin_lock_init(&pic->lock);

	pic->regs = of_iomap(np, 0);
	if (!pic->regs) {
		pr_err("%s: Could not map registers.\n", np->full_name);
		goto error_free;
	}

	/* Initialize MUX map */
	for (i = 0; i < ARRAY_SIZE(mapping); i++)
		mapping[i] = IRQ_UNMAPPED;

	parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));

	/*
	 * We can have up to 12 interrupts cascading to the core controller.
	 * These cascades can be from the combined interrupt sources or for
	 * individual interrupt sources. The "interrupts" property only
	 * deals with the cascaded combined interrupts. The individual
	 * interrupts muxed to the core controller use the core controller
	 * as their interrupt parent.
	 */
	for (i = 0; i < NR_COMBINERS; i++) {

		irq = irq_of_parse_and_map(np, i);
		if (irq == NO_IRQ)
			continue;

		/*
		 * We count on the core priority interrupts (4 - 15) being
		 * direct mapped. Check that device tree provided something
		 * in that range.
		 */
		if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
			pr_err("%s: combiner-%d virq %d out of range!\n",
				 np->full_name, i, irq);
			continue;
		}

		/* record the mapping */
		mapping[irq - 4] = i;

		pr_debug("%s: combiner-%d cascading to virq %d\n",
			 np->full_name, i, irq);

		cascade_data[i].pic = pic;
		cascade_data[i].index = i;

		/* mask and clear all events in combiner */
		soc_writel(~0, &pic->regs->evtmask[i]);
		soc_writel(~0, &pic->regs->evtclr[i]);

		irq_set_handler_data(irq, &cascade_data[i]);
		irq_set_chained_handler(irq, megamod_irq_cascade);
	}

	/* Finally, set up the MUX registers */
	for (i = 0; i < NR_MUX_OUTPUTS; i++) {
		if (mapping[i] != IRQ_UNMAPPED) {
			pr_debug("%s: setting mux %d to priority %d\n",
				 np->full_name, mapping[i], i + 4);
			set_megamod_mux(pic, mapping[i], i);
		}
	}

	return pic;

error_free:
	kfree(pic);

	return NULL;
}
Example #16
0
// ARM10C 20141004
// i: 0, node: 0, null
// ARM10C 20141115
// 16, node: 0, owner: NULL
// ARM10C 20141115
// 64, node: 0, owner: NULL
static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
{
	struct irq_desc *desc;
	// GFP_KERNEL: 0xD0
	gfp_t gfp = GFP_KERNEL;
	// gfp: GFP_KERNEL: 0xD0

	// sizeof(struct irq_desc): 156 bytes, gfp: GFP_KERNEL: 0xD0, node: 0
	// kzalloc_node(156, GFP_KERNEL: 0xD0, 0): kmem_cache#28-o0
	desc = kzalloc_node(sizeof(*desc), gfp, node);
	// desc: kmem_cache#28-o0

	// desc: kmem_cache#28-o0
	if (!desc)
		return NULL;

	/* allocate based on nr_cpu_ids */
	// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs
	// alloc_percpu(unsigned int): pcp 4 byte 공간 할당
	desc->kstat_irqs = alloc_percpu(unsigned int);
	// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs: pcp 4 byte 공간

	// desc->kstat_irqs: (kmem_cache#28-o0)->kstat_irqs: pcp 4 byte 공간
	if (!desc->kstat_irqs)
		goto err_desc;

	// desc: kmem_cache#28-o0, gfp: GFP_KERNEL: 0xD0, node: 0
	// alloc_masks(kmem_cache#28-o0, GFP_KERNEL: 0xD0, 0): 0
	if (alloc_masks(desc, gfp, node))
		goto err_kstat;
	// alloc_masks에서 한일:
	// (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0

	// desc->lock: (kmem_cache#28-o0)->lock
	raw_spin_lock_init(&desc->lock);
	// desc->lock: (kmem_cache#28-o0)->lock 을 이용한 spinlock 초기화 수행

	// desc->lock: (kmem_cache#28-o0)->lock
	lockdep_set_class(&desc->lock, &irq_desc_lock_class); // null function

	// irq: 0, desc: kmem_cache#28-o0, node: 0, owner: null
	desc_set_defaults(irq, desc, node, owner);

	// desc_set_defaults에서 한일:
	// (kmem_cache#28-o0)->irq_data.irq: 0
	// (kmem_cache#28-o0)->irq_data.chip: &no_irq_chip
	// (kmem_cache#28-o0)->irq_data.chip_data: NULL
	// (kmem_cache#28-o0)->irq_data.handler_data: NULL
	// (kmem_cache#28-o0)->irq_data.msi_desc: NULL
	// (kmem_cache#28-o0)->status_use_accessors: 0xc00
	// (&(kmem_cache#28-o0)->irq_data)->state_use_accessors: 0x10000
	// (kmem_cache#28-o0)->handle_irq: handle_bad_irq
	// (kmem_cache#28-o0)->depth: 1
	// (kmem_cache#28-o0)->irq_count: 0
	// (kmem_cache#28-o0)->irqs_unhandled: 0
	// (kmem_cache#28-o0)->name: NULL
	// (kmem_cache#28-o0)->owner: null
	// [pcp0...3] (kmem_cache#28-o0)->kstat_irqs: 0
	// (kmem_cache#28-o0)->irq_data.node: 0
	// (kmem_cache#28-o0)->irq_data.affinity.bits[0]: 0xF

	return desc;

err_kstat:
	free_percpu(desc->kstat_irqs);
err_desc:
	kfree(desc);
	return NULL;
}
static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
{
	struct megamod_pic *pic;
	int i, irq;
	int mapping[NR_MUX_OUTPUTS];

	pr_info("Initializing C64x+ Megamodule PIC\n");

	pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
	if (!pic) {
		pr_err("%s: Could not alloc PIC structure.\n", np->full_name);
		return NULL;
	}

	pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
					     &megamod_domain_ops, pic);
	if (!pic->irqhost) {
		pr_err("%s: Could not alloc host.\n", np->full_name);
		goto error_free;
	}

	pic->irqhost->host_data = pic;

	raw_spin_lock_init(&pic->lock);

	pic->regs = of_iomap(np, 0);
	if (!pic->regs) {
		pr_err("%s: Could not map registers.\n", np->full_name);
		goto error_free;
	}

	/*                    */
	for (i = 0; i < ARRAY_SIZE(mapping); i++)
		mapping[i] = IRQ_UNMAPPED;

	parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));

	/*
                                                                     
                                                                    
                                                                
                                                               
                                                                   
                              
  */
	for (i = 0; i < NR_COMBINERS; i++) {

		irq = irq_of_parse_and_map(np, i);
		if (irq == NO_IRQ)
			continue;

		/*
                                                            
                                                             
                   
   */
		if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
			pr_err("%s: combiner-%d virq %d out of range!\n",
				 np->full_name, i, irq);
			continue;
		}

		/*                    */
		mapping[irq - 4] = i;

		pr_debug("%s: combiner-%d cascading to virq %d\n",
			 np->full_name, i, irq);

		cascade_data[i].pic = pic;
		cascade_data[i].index = i;

		/*                                       */
		soc_writel(~0, &pic->regs->evtmask[i]);
		soc_writel(~0, &pic->regs->evtclr[i]);

		irq_set_handler_data(irq, &cascade_data[i]);
		irq_set_chained_handler(irq, megamod_irq_cascade);
	}

	/*                                   */
	for (i = 0; i < NR_MUX_OUTPUTS; i++) {
		if (mapping[i] != IRQ_UNMAPPED) {
			pr_debug("%s: setting mux %d to priority %d\n",
				 np->full_name, mapping[i], i + 4);
			set_megamod_mux(pic, mapping[i], i);
		}
	}

	return pic;

error_free:
	kfree(pic);

	return NULL;
}
Example #18
0
static int __init crossbar_of_init(struct device_node *node)
{
	u32 max = 0, entry, reg_size;
	int i, size, reserved = 0;
	const __be32 *irqsr;
	int ret = -ENOMEM;

	cb = kzalloc(sizeof(*cb), GFP_KERNEL);

	if (!cb)
		return ret;

	cb->crossbar_base = of_iomap(node, 0);
	if (!cb->crossbar_base)
		goto err_cb;

	of_property_read_u32(node, "ti,max-crossbar-sources",
			     &cb->max_crossbar_sources);
	if (!cb->max_crossbar_sources) {
		pr_err("missing 'ti,max-crossbar-sources' property\n");
		ret = -EINVAL;
		goto err_base;
	}

	of_property_read_u32(node, "ti,max-irqs", &max);
	if (!max) {
		pr_err("missing 'ti,max-irqs' property\n");
		ret = -EINVAL;
		goto err_base;
	}
	cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
	if (!cb->irq_map)
		goto err_base;

	cb->int_max = max;

	for (i = 0; i < max; i++)
		cb->irq_map[i] = IRQ_FREE;

	/* Get and mark reserved irqs */
	irqsr = of_get_property(node, "ti,irqs-reserved", &size);
	if (irqsr) {
		size /= sizeof(__be32);

		for (i = 0; i < size; i++) {
			of_property_read_u32_index(node,
						   "ti,irqs-reserved",
						   i, &entry);
			if (entry >= max) {
				pr_err("Invalid reserved entry\n");
				ret = -EINVAL;
				goto err_irq_map;
			}
			cb->irq_map[entry] = IRQ_RESERVED;
		}
	}

	/* Skip irqs hardwired to bypass the crossbar */
	irqsr = of_get_property(node, "ti,irqs-skip", &size);
	if (irqsr) {
		size /= sizeof(__be32);

		for (i = 0; i < size; i++) {
			of_property_read_u32_index(node,
						   "ti,irqs-skip",
						   i, &entry);
			if (entry >= max) {
				pr_err("Invalid skip entry\n");
				ret = -EINVAL;
				goto err_irq_map;
			}
			cb->irq_map[entry] = IRQ_SKIP;
		}
	}


	cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
	if (!cb->register_offsets)
		goto err_irq_map;

	of_property_read_u32(node, "ti,reg-size", &reg_size);

	switch (reg_size) {
	case 1:
		cb->write = crossbar_writeb;
		break;
	case 2:
		cb->write = crossbar_writew;
		break;
	case 4:
		cb->write = crossbar_writel;
		break;
	default:
		pr_err("Invalid reg-size property\n");
		ret = -EINVAL;
		goto err_reg_offset;
		break;
	}

	/*
	 * Register offsets are not linear because of the
	 * reserved irqs. so find and store the offsets once.
	 */
	for (i = 0; i < max; i++) {
		if (cb->irq_map[i] == IRQ_RESERVED)
			continue;

		cb->register_offsets[i] = reserved;
		reserved += reg_size;
	}

	of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
	/* Initialize the crossbar with safe map to start with */
	for (i = 0; i < max; i++) {
		if (cb->irq_map[i] == IRQ_RESERVED ||
		    cb->irq_map[i] == IRQ_SKIP)
			continue;

		cb->write(i, cb->safe_map);
	}

	raw_spin_lock_init(&cb->lock);

	return 0;

err_reg_offset:
	kfree(cb->register_offsets);
err_irq_map:
	kfree(cb->irq_map);
err_base:
	iounmap(cb->crossbar_base);
err_cb:
	kfree(cb);

	cb = NULL;
	return ret;
}
Example #19
0
static int __devinit em_sti_probe(struct platform_device *pdev)
{
	struct em_sti_priv *p;
	struct resource *res;
	int irq, ret;

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (p == NULL) {
		dev_err(&pdev->dev, "failed to allocate driver data\n");
		ret = -ENOMEM;
		goto err0;
	}

	p->pdev = pdev;
	platform_set_drvdata(pdev, p);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to get I/O memory\n");
		ret = -EINVAL;
		goto err0;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "failed to get irq\n");
		ret = -EINVAL;
		goto err0;
	}

	/* map memory, let base point to the STI instance */
	p->base = ioremap_nocache(res->start, resource_size(res));
	if (p->base == NULL) {
		dev_err(&pdev->dev, "failed to remap I/O memory\n");
		ret = -ENXIO;
		goto err0;
	}

	/* get hold of clock */
	p->clk = clk_get(&pdev->dev, "sclk");
	if (IS_ERR(p->clk)) {
		dev_err(&pdev->dev, "cannot get clock\n");
		ret = PTR_ERR(p->clk);
		goto err1;
	}

	if (request_irq(irq, em_sti_interrupt,
			IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
			dev_name(&pdev->dev), p)) {
		dev_err(&pdev->dev, "failed to request low IRQ\n");
		ret = -ENOENT;
		goto err2;
	}

	raw_spin_lock_init(&p->lock);
	em_sti_register_clockevent(p);
	em_sti_register_clocksource(p);
	return 0;

err2:
	clk_put(p->clk);
err1:
	iounmap(p->base);
err0:
	kfree(p);
	return ret;
}
Example #20
0
/* The probe function for a single message register block.
 */
static int mpic_msgr_probe(struct platform_device *dev)
{
    void __iomem *msgr_block_addr;
    int block_number;
    struct resource rsrc;
    unsigned int i;
    unsigned int irq_index;
    struct device_node *np = dev->dev.of_node;
    unsigned int receive_mask;
    const unsigned int *prop;

    if (!np) {
        dev_err(&dev->dev, "Device OF-Node is NULL");
        return -EFAULT;
    }

    /* Allocate the message register array upon the first device
     * registered.
     */
    if (!mpic_msgrs) {
        mpic_msgr_count = mpic_msgr_number_of_registers();
        dev_info(&dev->dev, "Found %d message registers\n",
                 mpic_msgr_count);

        mpic_msgrs = kcalloc(mpic_msgr_count, sizeof(*mpic_msgrs),
                             GFP_KERNEL);
        if (!mpic_msgrs) {
            dev_err(&dev->dev,
                    "No memory for message register blocks\n");
            return -ENOMEM;
        }
    }
    dev_info(&dev->dev, "Of-device full name %s\n", np->full_name);

    /* IO map the message register block. */
    of_address_to_resource(np, 0, &rsrc);
    msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
    if (!msgr_block_addr) {
        dev_err(&dev->dev, "Failed to iomap MPIC message registers");
        return -EFAULT;
    }

    /* Ensure the block has a defined order. */
    block_number = mpic_msgr_block_number(np);
    if (block_number < 0) {
        dev_err(&dev->dev,
                "Failed to find message register block alias\n");
        return -ENODEV;
    }
    dev_info(&dev->dev, "Setting up message register block %d\n",
             block_number);

    /* Grab the receive mask which specifies what registers can receive
     * interrupts.
     */
    prop = of_get_property(np, "mpic-msgr-receive-mask", NULL);
    receive_mask = (prop) ? *prop : 0xF;

    /* Build up the appropriate message register data structures. */
    for (i = 0, irq_index = 0; i < MPIC_MSGR_REGISTERS_PER_BLOCK; ++i) {
        struct mpic_msgr *msgr;
        unsigned int reg_number;

        msgr = kzalloc(sizeof(struct mpic_msgr), GFP_KERNEL);
        if (!msgr) {
            dev_err(&dev->dev, "No memory for message register\n");
            return -ENOMEM;
        }

        reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
        msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
        msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET);
        msgr->in_use = MSGR_FREE;
        msgr->num = i;
        raw_spin_lock_init(&msgr->lock);

        if (receive_mask & (1 << i)) {
            msgr->irq = irq_of_parse_and_map(np, irq_index);
            if (msgr->irq == NO_IRQ) {
                dev_err(&dev->dev,
                        "Missing interrupt specifier");
                kfree(msgr);
                return -EFAULT;
            }
            irq_index += 1;
        } else {
            msgr->irq = NO_IRQ;
        }

        mpic_msgrs[reg_number] = msgr;
        mpic_msgr_disable(msgr);
        dev_info(&dev->dev, "Register %d initialized: irq %d\n",
                 reg_number, msgr->irq);

    }

    return 0;
}
Example #21
0
static int dio48e_probe(struct device *dev, unsigned int id)
{
	struct dio48e_gpio *dio48egpio;
	const char *const name = dev_name(dev);
	int err;

	dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
	if (!dio48egpio)
		return -ENOMEM;

	if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) {
		dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
			base[id], base[id] + DIO48E_EXTENT);
		return -EBUSY;
	}

	dio48egpio->chip.label = name;
	dio48egpio->chip.parent = dev;
	dio48egpio->chip.owner = THIS_MODULE;
	dio48egpio->chip.base = -1;
	dio48egpio->chip.ngpio = DIO48E_NGPIO;
	dio48egpio->chip.names = dio48e_names;
	dio48egpio->chip.get_direction = dio48e_gpio_get_direction;
	dio48egpio->chip.direction_input = dio48e_gpio_direction_input;
	dio48egpio->chip.direction_output = dio48e_gpio_direction_output;
	dio48egpio->chip.get = dio48e_gpio_get;
	dio48egpio->chip.set = dio48e_gpio_set;
	dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
	dio48egpio->base = base[id];

	raw_spin_lock_init(&dio48egpio->lock);

	err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
	if (err) {
		dev_err(dev, "GPIO registering failed (%d)\n", err);
		return err;
	}

	/* initialize all GPIO as output */
	outb(0x80, base[id] + 3);
	outb(0x00, base[id]);
	outb(0x00, base[id] + 1);
	outb(0x00, base[id] + 2);
	outb(0x00, base[id] + 3);
	outb(0x80, base[id] + 7);
	outb(0x00, base[id] + 4);
	outb(0x00, base[id] + 5);
	outb(0x00, base[id] + 6);
	outb(0x00, base[id] + 7);

	/* disable IRQ by default */
	inb(base[id] + 0xB);

	err = gpiochip_irqchip_add(&dio48egpio->chip, &dio48e_irqchip, 0,
		handle_edge_irq, IRQ_TYPE_NONE);
	if (err) {
		dev_err(dev, "Could not add irqchip (%d)\n", err);
		return err;
	}

	err = devm_request_irq(dev, irq[id], dio48e_irq_handler, 0, name,
		dio48egpio);
	if (err) {
		dev_err(dev, "IRQ handler registering failed (%d)\n", err);
		return err;
	}

	return 0;
}
Example #22
0
int fivm_init(void)
{
	printk("FIVM init\n");
	raw_spin_lock_init(&fivm_hook_lock);
	return 0 ;
}
Example #23
0
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *child_tidptr,
					struct pid *pid,
					int trace)
{
	int retval;
	struct task_struct *p;

    if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)){
        printk("[%d:%s] fork fail at cpp 1, clone_flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags);
		return ERR_PTR(-EINVAL);
    }

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
    if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND)){
        printk("[%d:%s] fork fail at cpp 2, clone_flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags);
		return ERR_PTR(-EINVAL);
    }
	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
    if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM)){
        printk("[%d:%s] fork fail at cpp 3, clone_flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags);
		return ERR_PTR(-EINVAL);
    }
	/*
	 * Siblings of global init remain as zombies on exit since they are
	 * not reaped by their parent (swapper). To solve this and to avoid
	 * multi-rooted process trees, prevent global and container-inits
	 * from creating siblings.
	 */
	if ((clone_flags & CLONE_PARENT) &&
				current->signal->flags & SIGNAL_UNKILLABLE){
        printk("[%d:%s] fork fail at cpp 4, clone_flags:0x%x\n", current->pid, current->comm, (unsigned int)clone_flags);
		return ERR_PTR(-EINVAL);
    }

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p){
        printk("[%d:%s] fork fail at dup_task_struc, p=0x%x\n", current->pid, current->comm, (unsigned int)p);
		goto fork_out;
    }

	ftrace_graph_init_task(p);

	rt_mutex_init_task(p);
#ifdef CONFIG_MTK_SCHED_CMP_TGS
	raw_spin_lock_init(&p->thread_group_info_lock);
#endif

#ifdef CONFIG_PROVE_LOCKING
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
	retval = -EAGAIN;
	if (atomic_read(&p->real_cred->user->processes) >=
			task_rlimit(p, RLIMIT_NPROC)) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
		    p->real_cred->user != INIT_USER)
			goto bad_fork_free;
	}
	current->flags &= ~PF_NPROC_EXCEEDED;

	retval = copy_creds(p, clone_flags);
	if (retval < 0)
		goto bad_fork_free;

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	retval = -EAGAIN;
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

	if (!try_module_get(task_thread_info(p)->exec_domain->module))
		goto bad_fork_cleanup_count;

	p->did_exec = 0;
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
	copy_flags(clone_flags, p);
	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	rcu_copy_process(p);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	init_sigpending(&p->pending);

	p->utime = p->stime = p->gtime = 0;
	p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	p->prev_utime = p->prev_stime = 0;
#endif
#if defined(SPLIT_RSS_COUNTING)
	memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif

	p->default_timer_slack_ns = current->timer_slack_ns;

	task_io_accounting_init(&p->ioac);
	acct_clear_integrals(p);

	posix_cpu_timers_init(p);

	do_posix_clock_monotonic_gettime(&p->start_time);
	p->real_start_time = p->start_time;
	monotonic_to_bootbased(&p->real_start_time);
	p->io_context = NULL;
	p->audit_context = NULL;
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_begin(current);
	cgroup_fork(p);
#ifdef CONFIG_NUMA
	p->mempolicy = mpol_dup(p->mempolicy);
	if (IS_ERR(p->mempolicy)) {
		retval = PTR_ERR(p->mempolicy);
		p->mempolicy = NULL;
		goto bad_fork_cleanup_cgroup;
	}
	mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
	p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
	p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
	seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
	p->hardirqs_enabled = 0;
#endif
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	p->memcg_batch.do_batch = 0;
	p->memcg_batch.memcg = NULL;
#endif

	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p);

	retval = perf_event_init_task(p);
	if (retval)
		goto bad_fork_cleanup_policy;
	retval = audit_alloc(p);
	if (retval)
		goto bad_fork_cleanup_perf;
	/* copy all the process information */
	retval = copy_semundo(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_audit;
	retval = copy_files(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_semundo;
	retval = copy_fs(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_files;
	retval = copy_sighand(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_fs;
	retval = copy_signal(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_sighand;
	retval = copy_mm(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_signal;
	retval = copy_namespaces(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_mm;
	retval = copy_io(clone_flags, p);
	if (retval)
		goto bad_fork_cleanup_namespaces;
	retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
	if (retval)
		goto bad_fork_cleanup_io;

	if (pid != &init_struct_pid) {
		retval = -ENOMEM;
		pid = alloc_pid(p->nsproxy->pid_ns);
		if (!pid)
			goto bad_fork_cleanup_io;
	}

	p->pid = pid_nr(pid);
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
	p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;
#endif
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

	/*
	 * Syscall tracing and stepping should be turned off in the
	 * child regardless of CLONE_PTRACE.
	 */
	user_disable_single_step(p);
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
	clear_all_latency_tracing(p);

	/* ok, now we should be set up.. */
	if (clone_flags & CLONE_THREAD)
		p->exit_signal = -1;
	else if (clone_flags & CLONE_PARENT)
		p->exit_signal = current->group_leader->exit_signal;
	else
		p->exit_signal = (clone_flags & CSIGNAL);

	p->pdeath_signal = 0;
	p->exit_state = 0;

	p->nr_dirtied = 0;
	p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
	p->dirty_paused_when = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
	INIT_LIST_HEAD(&p->thread_group);
    INIT_HLIST_HEAD(&p->task_works);
#ifdef CONFIG_MTK_SCHED_CMP_TGS
	mt_init_thread_group(p);
#endif

	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
		p->real_parent = current->real_parent;
		p->parent_exec_id = current->parent_exec_id;
	} else {
		p->real_parent = current;
		p->parent_exec_id = current->self_exec_id;
	}

	spin_lock(&current->sighand->siglock);

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
	*/
	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
		goto bad_fork_free_pid;
	}

	if (likely(p->pid)) {
		ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);

		if (thread_group_leader(p)) {
			if (is_child_reaper(pid))
				p->nsproxy->pid_ns->child_reaper = p;

			p->signal->leader_pid = pid;
			p->signal->tty = tty_kref_get(current->signal->tty);
			attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
			attach_pid(p, PIDTYPE_SID, task_session(current));
			list_add_tail(&p->sibling, &p->real_parent->children);
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
			__this_cpu_inc(process_counts);
		} else {
			current->signal->nr_threads++;
			atomic_inc(&current->signal->live);
			atomic_inc(&current->signal->sigcnt);
			p->group_leader = current->group_leader;
			list_add_tail_rcu(&p->thread_group,
					  &p->group_leader->thread_group);
			list_add_tail_rcu(&p->thread_node,
					  &p->signal->thread_head);
		}
		attach_pid(p, PIDTYPE_PID, pid);
		nr_threads++;
	}

	total_forks++;
	spin_unlock(&current->sighand->siglock);
	syscall_tracepoint_update(p);
	write_unlock_irq(&tasklist_lock);

	proc_fork_connector(p);
	cgroup_post_fork(p);
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	perf_event_fork(p);

	trace_task_newtask(p, clone_flags);

	return p;

bad_fork_free_pid:
	if (pid != &init_struct_pid)
		free_pid(pid);
bad_fork_cleanup_io:
	if (p->io_context)
		exit_io_context(p);
bad_fork_cleanup_namespaces:
	if (unlikely(clone_flags & CLONE_NEWPID))
		pid_ns_release_proc(p->nsproxy->pid_ns);
	exit_task_namespaces(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
	if (!(clone_flags & CLONE_THREAD))
		free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
	__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_perf:
	perf_event_free_task(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
	if (clone_flags & CLONE_THREAD)
		threadgroup_change_end(current);
	cgroup_exit(p, 0);
	delayacct_tsk_free(p);
	module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
	atomic_dec(&p->cred->user->processes);
	exit_creds(p);
bad_fork_free:
	free_task(p);
fork_out:
    printk("[%d:%s] fork fail retval:0x%x\n", current->pid, current->comm, retval);
	return ERR_PTR(retval);
}
Example #24
0
static int pdc_intc_probe(struct platform_device *pdev)
{
	struct pdc_intc_priv *priv;
	struct device_node *node = pdev->dev.of_node;
	struct resource *res_regs;
	struct irq_chip_generic *gc;
	unsigned int i;
	int irq, ret;
	u32 val;

	if (!node)
		return -ENOENT;

	/* Get registers */
	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res_regs == NULL) {
		dev_err(&pdev->dev, "cannot find registers resource\n");
		return -ENOENT;
	}

	/* Allocate driver data */
	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "cannot allocate device data\n");
		return -ENOMEM;
	}
	raw_spin_lock_init(&priv->lock);
	platform_set_drvdata(pdev, priv);

	/* Ioremap the registers */
	priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
				      res_regs->end - res_regs->start);
	if (!priv->pdc_base)
		return -EIO;

	/* Get number of peripherals */
	ret = of_property_read_u32(node, "num-perips", &val);
	if (ret) {
		dev_err(&pdev->dev, "No num-perips node property found\n");
		return -EINVAL;
	}
	if (val > SYS0_HWIRQ) {
		dev_err(&pdev->dev, "num-perips (%u) out of range\n", val);
		return -EINVAL;
	}
	priv->nr_perips = val;

	/* Get number of syswakes */
	ret = of_property_read_u32(node, "num-syswakes", &val);
	if (ret) {
		dev_err(&pdev->dev, "No num-syswakes node property found\n");
		return -EINVAL;
	}
	if (val > SYS0_HWIRQ) {
		dev_err(&pdev->dev, "num-syswakes (%u) out of range\n", val);
		return -EINVAL;
	}
	priv->nr_syswakes = val;

	/* Get peripheral IRQ numbers */
	priv->perip_irqs = devm_kzalloc(&pdev->dev, 4 * priv->nr_perips,
					GFP_KERNEL);
	if (!priv->perip_irqs) {
		dev_err(&pdev->dev, "cannot allocate perip IRQ list\n");
		return -ENOMEM;
	}
	for (i = 0; i < priv->nr_perips; ++i) {
		irq = platform_get_irq(pdev, 1 + i);
		if (irq < 0) {
			dev_err(&pdev->dev, "cannot find perip IRQ #%u\n", i);
			return irq;
		}
		priv->perip_irqs[i] = irq;
	}
	/* check if too many were provided */
	if (platform_get_irq(pdev, 1 + i) >= 0) {
		dev_err(&pdev->dev, "surplus perip IRQs detected\n");
		return -EINVAL;
	}

	/* Get syswake IRQ number */
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "cannot find syswake IRQ\n");
		return irq;
	}
	priv->syswake_irq = irq;

	/* Set up an IRQ domain */
	priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops,
					     priv);
	if (unlikely(!priv->domain)) {
		dev_err(&pdev->dev, "cannot add IRQ domain\n");
		return -ENOMEM;
	}

	/*
	 * Set up 2 generic irq chips with 2 chip types.
	 * The first one for peripheral irqs (only 1 chip type used)
	 * The second one for syswake irqs (edge and level chip types)
	 */
	ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc",
					     handle_level_irq, 0, 0,
					     IRQ_GC_INIT_NESTED_LOCK);
	if (ret)
		goto err_generic;

	/* peripheral interrupt chip */

	gc = irq_get_domain_generic_chip(priv->domain, 0);
	gc->unused	= ~(BIT(priv->nr_perips) - 1);
	gc->reg_base	= priv->pdc_base;
	/*
	 * IRQ_ROUTE contains wake bits, so we can't use the generic versions as
	 * they cache the mask
	 */
	gc->chip_types[0].regs.mask		= PDC_IRQ_ROUTE;
	gc->chip_types[0].chip.irq_mask		= perip_irq_mask;
	gc->chip_types[0].chip.irq_unmask	= perip_irq_unmask;
	gc->chip_types[0].chip.irq_set_wake	= pdc_irq_set_wake;

	/* syswake interrupt chip */

	gc = irq_get_domain_generic_chip(priv->domain, 8);
	gc->unused	= ~(BIT(priv->nr_syswakes) - 1);
	gc->reg_base	= priv->pdc_base;

	/* edge interrupts */
	gc->chip_types[0].type			= IRQ_TYPE_EDGE_BOTH;
	gc->chip_types[0].handler		= handle_edge_irq;
	gc->chip_types[0].regs.ack		= PDC_IRQ_CLEAR;
	gc->chip_types[0].regs.mask		= PDC_IRQ_ENABLE;
	gc->chip_types[0].chip.irq_ack		= irq_gc_ack_set_bit;
	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
	gc->chip_types[0].chip.irq_set_type	= syswake_irq_set_type;
	gc->chip_types[0].chip.irq_set_wake	= pdc_irq_set_wake;
	/* for standby we pass on to the shared syswake IRQ */
	gc->chip_types[0].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;

	/* level interrupts */
	gc->chip_types[1].type			= IRQ_TYPE_LEVEL_MASK;
	gc->chip_types[1].handler		= handle_level_irq;
	gc->chip_types[1].regs.ack		= PDC_IRQ_CLEAR;
	gc->chip_types[1].regs.mask		= PDC_IRQ_ENABLE;
	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
	gc->chip_types[1].chip.irq_set_type	= syswake_irq_set_type;
	gc->chip_types[1].chip.irq_set_wake	= pdc_irq_set_wake;
	/* for standby we pass on to the shared syswake IRQ */
	gc->chip_types[1].chip.flags		= IRQCHIP_MASK_ON_SUSPEND;

	/* Set up the hardware to enable interrupt routing */
	pdc_intc_setup(priv);

	/* Setup chained handlers for the peripheral IRQs */
	for (i = 0; i < priv->nr_perips; ++i) {
		irq = priv->perip_irqs[i];
		irq_set_handler_data(irq, priv);
		irq_set_chained_handler(irq, pdc_intc_perip_isr);
	}

	/* Setup chained handler for the syswake IRQ */
	irq_set_handler_data(priv->syswake_irq, priv);
	irq_set_chained_handler(priv->syswake_irq, pdc_intc_syswake_isr);

	dev_info(&pdev->dev,
		 "PDC IRQ controller initialised (%u perip IRQs, %u syswake IRQs)\n",
		 priv->nr_perips,
		 priv->nr_syswakes);

	return 0;
err_generic:
	irq_domain_remove(priv->domain);
	return ret;
}
Example #25
0
static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
{
	unsigned int i;
	int ret;

	tmu->pdev = pdev;

	raw_spin_lock_init(&tmu->lock);

	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
		ret = sh_tmu_parse_dt(tmu);
		if (ret < 0)
			return ret;
	} else if (pdev->dev.platform_data) {
		const struct platform_device_id *id = pdev->id_entry;
		struct sh_timer_config *cfg = pdev->dev.platform_data;

		tmu->model = id->driver_data;
		tmu->num_channels = hweight8(cfg->channels_mask);
	} else {
		dev_err(&tmu->pdev->dev, "missing platform data\n");
		return -ENXIO;
	}

	/* Get hold of clock. */
	tmu->clk = clk_get(&tmu->pdev->dev, "fck");
	if (IS_ERR(tmu->clk)) {
		dev_err(&tmu->pdev->dev, "cannot get clock\n");
		return PTR_ERR(tmu->clk);
	}

	ret = clk_prepare(tmu->clk);
	if (ret < 0)
		goto err_clk_put;

	/* Map the memory resource. */
	ret = sh_tmu_map_memory(tmu);
	if (ret < 0) {
		dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n");
		goto err_clk_unprepare;
	}

	/* Allocate and setup the channels. */
	tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels,
				GFP_KERNEL);
	if (tmu->channels == NULL) {
		ret = -ENOMEM;
		goto err_unmap;
	}

	/*
	 * Use the first channel as a clock event device and the second channel
	 * as a clock source.
	 */
	for (i = 0; i < tmu->num_channels; ++i) {
		ret = sh_tmu_channel_setup(&tmu->channels[i], i,
					   i == 0, i == 1, tmu);
		if (ret < 0)
			goto err_unmap;
	}

	platform_set_drvdata(pdev, tmu);

	return 0;

err_unmap:
	kfree(tmu->channels);
	iounmap(tmu->mapbase);
err_clk_unprepare:
	clk_unprepare(tmu->clk);
err_clk_put:
	clk_put(tmu->clk);
	return ret;
}
Example #26
0
static __devinit int mpic_msgr_probe(struct platform_device *dev)
{
	void __iomem *msgr_block_addr;
	int block_number;
	struct resource rsrc;
	unsigned int i;
	unsigned int irq_index;
	struct device_node *np = dev->dev.of_node;
	unsigned int receive_mask;
	const unsigned int *prop;

	if (!np) {
		dev_err(&dev->dev, "Device OF-Node is NULL");
		return -EFAULT;
	}

	if (!mpic_msgrs) {
		mpic_msgr_count = mpic_msgr_number_of_registers();
		dev_info(&dev->dev, "Found %d message registers\n",
				mpic_msgr_count);

		mpic_msgrs = kzalloc(sizeof(struct mpic_msgr) * mpic_msgr_count,
							 GFP_KERNEL);
		if (!mpic_msgrs) {
			dev_err(&dev->dev,
				"No memory for message register blocks\n");
			return -ENOMEM;
		}
	}
	dev_info(&dev->dev, "Of-device full name %s\n", np->full_name);

	
	of_address_to_resource(np, 0, &rsrc);
	msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
	if (!msgr_block_addr) {
		dev_err(&dev->dev, "Failed to iomap MPIC message registers");
		return -EFAULT;
	}

	
	block_number = mpic_msgr_block_number(np);
	if (block_number < 0) {
		dev_err(&dev->dev,
			"Failed to find message register block alias\n");
		return -ENODEV;
	}
	dev_info(&dev->dev, "Setting up message register block %d\n",
			block_number);

	prop = of_get_property(np, "mpic-msgr-receive-mask", NULL);
	receive_mask = (prop) ? *prop : 0xF;

	
	for (i = 0, irq_index = 0; i < MPIC_MSGR_REGISTERS_PER_BLOCK; ++i) {
		struct mpic_msgr *msgr;
		unsigned int reg_number;

		msgr = kzalloc(sizeof(struct mpic_msgr), GFP_KERNEL);
		if (!msgr) {
			dev_err(&dev->dev, "No memory for message register\n");
			return -ENOMEM;
		}

		reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
		msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
		msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET);
		msgr->in_use = MSGR_FREE;
		msgr->num = i;
		raw_spin_lock_init(&msgr->lock);

		if (receive_mask & (1 << i)) {
			struct resource irq;

			if (of_irq_to_resource(np, irq_index, &irq) == NO_IRQ) {
				dev_err(&dev->dev,
						"Missing interrupt specifier");
				kfree(msgr);
				return -EFAULT;
			}
			msgr->irq = irq.start;
			irq_index += 1;
		} else {
			msgr->irq = NO_IRQ;
		}

		mpic_msgrs[reg_number] = msgr;
		mpic_msgr_disable(msgr);
		dev_info(&dev->dev, "Register %d initialized: irq %d\n",
				reg_number, msgr->irq);

	}

	return 0;
}
Example #27
0
File: rt.c Project: BozkurTR/kernel
// ARM10C 20140830
// [pcp0] &rq->rt: &(&runqueues)->rt, rq: (&runqueues)
void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
{
	struct rt_prio_array *array;
	int i;

	// &rt_rq->active: (&(&runqueues)->rt)->active
	array = &rt_rq->active;
	// array: (&(&runqueues)->rt)->active

	// MAX_RT_PRIO: 100
	for (i = 0; i < MAX_RT_PRIO; i++) {
		// i: 0, array->queue: (&(&(&runqueues)->rt)->active)->queue
		INIT_LIST_HEAD(array->queue + i);
		// (&(&(&runqueues)->rt)->active)->queue[0] 의 리스트 초기화

		// i: 0, array->bitmap: (&(&(&runqueues)->rt)->active)->bitmap
		__clear_bit(i, array->bitmap);
		// (&(&(&runqueues)->rt)->active)->bitmap의 0 bit를 클리어

		// i: 1 ... 99 까지 수행
	}

	/* delimiter for bitsearch: */
	// MAX_RT_PRIO: 100, array->bitmap: (&(&(&runqueues)->rt)->active)->bitmap
	__set_bit(MAX_RT_PRIO, array->bitmap);
	// (&(&(&runqueues)->rt)->active)->bitmap의 100 bit를 1로 세팅

#if defined CONFIG_SMP // CONFIG_SMP=y
	// &rt_rq->highest_prio.curr: (&(&runqueues)->rt)->highest_prio.curr, MAX_RT_PRIO: 100
	rt_rq->highest_prio.curr = MAX_RT_PRIO;
	// &rt_rq->highest_prio.curr: (&(&runqueues)->rt)->highest_prio.curr: 100

	// &rt_rq->highest_prio.next: (&(&runqueues)->rt)->highest_prio.next, MAX_RT_PRIO: 100
	rt_rq->highest_prio.next = MAX_RT_PRIO;
	// &rt_rq->highest_prio.next: (&(&runqueues)->rt)->highest_prio.next: 100

	// &rt_rq->rt_nr_migratory: (&(&runqueues)->rt)->rt_nr_migratory
	rt_rq->rt_nr_migratory = 0;
	// &rt_rq->rt_nr_migratory: (&(&runqueues)->rt)->rt_nr_migratory: 0

	// &rt_rq->overloaded: (&(&runqueues)->rt)->overloaded
	rt_rq->overloaded = 0;
	// &rt_rq->overloaded: (&(&runqueues)->rt)->overloaded: 0

	// &rt_rq->pushable_tasks: &(&(&runqueues)->rt)->pushable_tasks
	plist_head_init(&rt_rq->pushable_tasks);
	// (&(&(&runqueues)->rt)->pushable_tasks)->node_list 리스트 초기화
#endif

	// &rt_rq->rt_time: (&(&runqueues)->rt)->rt_time
	rt_rq->rt_time = 0;
	// &rt_rq->rt_time: (&(&runqueues)->rt)->rt_time: 0

	// &rt_rq->rt_throttled: (&(&runqueues)->rt)->rt_throttled
	rt_rq->rt_throttled = 0;
	// &rt_rq->rt_throttled: (&(&runqueues)->rt)->rt_throttled: 0

	// &rt_rq->rt_runtime: (&(&runqueues)->rt)->rt_runtime
	rt_rq->rt_runtime = 0;
	// &rt_rq->rt_runtime: (&(&runqueues)->rt)->rt_runtime: 0

	// &rt_rq->rt_runtime_lock: (&(&runqueues)->rt)->rt_runtime_lock
	raw_spin_lock_init(&rt_rq->rt_runtime_lock);
	// (&(&runqueues)->rt)->rt_runtime_lock 을 사용한 spinlock 초기화
}
Example #28
0
static void __init pistachio_clksrc_of_init(struct device_node *node)
{
    struct clk *sys_clk, *fast_clk;
    struct regmap *periph_regs;
    unsigned long rate;
    int ret;

    pcs_gpt.base = of_iomap(node, 0);
    if (!pcs_gpt.base) {
        pr_err("cannot iomap\n");
        return;
    }

    periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
    if (IS_ERR(periph_regs)) {
        pr_err("cannot get peripheral regmap (%lu)\n",
               PTR_ERR(periph_regs));
        return;
    }

    /* Switch to using the fast counter clock */
    ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
                             0xf, 0x0);
    if (ret)
        return;

    sys_clk = of_clk_get_by_name(node, "sys");
    if (IS_ERR(sys_clk)) {
        pr_err("clock get failed (%lu)\n", PTR_ERR(sys_clk));
        return;
    }

    fast_clk = of_clk_get_by_name(node, "fast");
    if (IS_ERR(fast_clk)) {
        pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
        return;
    }

    ret = clk_prepare_enable(sys_clk);
    if (ret < 0) {
        pr_err("failed to enable clock (%d)\n", ret);
        return;
    }

    ret = clk_prepare_enable(fast_clk);
    if (ret < 0) {
        pr_err("failed to enable clock (%d)\n", ret);
        clk_disable_unprepare(sys_clk);
        return;
    }

    rate = clk_get_rate(fast_clk);

    /* Disable irq's for clocksource usage */
    gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
    gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
    gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
    gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);

    /* Enable timer block */
    writel(TIMER_ME_GLOBAL, pcs_gpt.base);

    raw_spin_lock_init(&pcs_gpt.lock);
    sched_clock_register(pistachio_read_sched_clock, 32, rate);
    clocksource_register_hz(&pcs_gpt.cs, rate);
}