Пример #1
0
static int __cpuinit gic_eoimode_init(struct vmm_devtree_node *node)
{
	int rc;

	if (vmm_smp_is_bootcpu()) {
		rc = gic_devtree_init(node, NULL, TRUE);
	} else {
		gic_secondary_init(0);
		rc = VMM_OK;
	}

	return rc;
}
Пример #2
0
static int __init fpga_init(struct vmm_devtree_node *node)
{
    int rc;
    virtual_addr_t base;
    u32 clear_mask;
    u32 valid_mask;
    u32 picen_mask;
    u32 irq_start;
    u32 parent_irq;

    BUG_ON(!vmm_smp_is_bootcpu());

    rc = vmm_devtree_request_regmap(node, &base, 0, "Versatile SIC");
    WARN(rc, "unable to map fpga irq registers\n");

    if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) {
        irq_start = 0;
    }

    if (vmm_devtree_read_u32(node, "clear-mask", &clear_mask)) {
        clear_mask = 0;
    }

    if (vmm_devtree_read_u32(node, "valid-mask", &valid_mask)) {
        valid_mask = 0;
    }

    /* Some chips are cascaded from a parent IRQ */
    if (vmm_devtree_irq_get(node, &parent_irq, 0)) {
        parent_irq = 0xFFFFFFFF;
    }

    fpga_irq_init((void *)base, "FPGA",
                  irq_start, parent_irq,
                  valid_mask, node);

    vmm_writel(clear_mask, (void *)base + IRQ_ENABLE_CLEAR);
    vmm_writel(clear_mask, (void *)base + FIQ_ENABLE_CLEAR);

    /* For VersatilePB, we have interrupts from 21 to 31 capable
     * of being routed directly to the parent interrupt controller
     * (i.e. VIC). This is controlled by setting PIC_ENABLEx.
     */
    if (!vmm_devtree_read_u32(node, "picen-mask", &picen_mask)) {
        vmm_writel(picen_mask, (void *)base + PICEN_SET);
    }

    return 0;
}
Пример #3
0
int __cpuinit arch_smp_ipi_init(void)
{
	int rc;

	if (vmm_smp_is_bootcpu()) {
		/* Register IPI1 interrupt handler */
		rc = vmm_host_irq_register(1, "IPI1", &smp_ipi_handler, NULL);
		if (rc) {
			return rc;
		}

		/* Mark IPI1 interrupt as per-cpu */
		rc = vmm_host_irq_mark_per_cpu(1);
		if (rc) {
			return rc;
		}
	}

	/* Explicitly enable IPI1 interrupt */
	gic_enable_ppi(1);

	return VMM_OK;
}
Пример #4
0
static int __cpuinit twd_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 ref_cnt_freq;
	virtual_addr_t ref_cnt_addr;
	u32 cpu = vmm_smp_processor_id();
	struct twd_clockchip *cc = &this_cpu(twd_cc);

	if (!twd_base) {
		rc = vmm_devtree_regmap(node, &twd_base, 0);
		if (rc) {
			goto fail;
		}
	}

	if (!twd_ppi_irq) {
		rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0);
		if (rc) {
			goto fail_regunmap;
		}
	}

	if (!twd_freq_hz) {
		/* First try to find TWD clock */
		if (!twd_clk) {
			twd_clk = of_clk_get(node, 0);
		}
		if (!twd_clk) {
			twd_clk = clk_get_sys("smp_twd", NULL);
		}

		if (twd_clk) {
			/* Use TWD clock to find frequency */
			rc = clk_prepare_enable(twd_clk);
			if (rc) {
				clk_put(twd_clk);
				goto fail_regunmap;
			}
			twd_freq_hz = clk_get_rate(twd_clk);
		} else {
			/* No TWD clock found hence caliberate */
			rc = vmm_devtree_regmap(node, &ref_cnt_addr, 1);
			if (rc) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			if (vmm_devtree_read_u32(node, "ref-counter-freq",
						 &ref_cnt_freq)) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			twd_caliberate_freq(twd_base, 
					ref_cnt_addr, ref_cnt_freq);
			vmm_devtree_regunmap(node, ref_cnt_addr, 1);
		}
	}

	memset(cc, 0, sizeof(struct twd_clockchip));

	vmm_sprintf(cc->name, "twd/%d", cpu);

	cc->clkchip.name = cc->name;
	cc->clkchip.hirq = twd_ppi_irq;
	cc->clkchip.rating = 350;
	cc->clkchip.cpumask = vmm_cpumask_of(cpu);
	cc->clkchip.features = 
		VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, 
				   VMM_NSEC_PER_SEC, twd_freq_hz, 10);
	cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
	cc->clkchip.max_delta_ns = 
			vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
	cc->clkchip.set_mode = &twd_clockchip_set_mode;
	cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
	cc->clkchip.priv = cc;

	if (vmm_smp_is_bootcpu()) {
		/* Register interrupt handler */
		if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd",
						&twd_clockchip_irq_handler, 
						cc))) {
			
			goto fail_regunmap;
		}

		/* Mark interrupt as per-cpu */
		if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) {
			goto fail_unreg_irq;
		}
	}

	/* Explicitly enable local timer PPI in GIC 
	 * Note: Local timer requires PPI support hence requires GIC
	 */
	gic_enable_ppi(twd_ppi_irq);

	rc = vmm_clockchip_register(&cc->clkchip);
	if (rc) {
		goto fail_unreg_irq;
	}

	return VMM_OK;

fail_unreg_irq:
	if (vmm_smp_is_bootcpu()) {
		vmm_host_irq_unregister(twd_ppi_irq, cc);
	}
fail_regunmap:
	vmm_devtree_regunmap(node, twd_base, 0);
fail:
	return rc;
}