Пример #1
0
static int virtio_mmio_probe(struct vmm_guest *guest,
			     struct vmm_emudev *edev,
			     const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	const char *attr;
	struct virtio_mmio_dev *m;

	m = vmm_zalloc(sizeof(struct virtio_mmio_dev));
	if (!m) {
		rc = VMM_EFAIL;
		goto virtio_mmio_probe_done;
	}

	m->guest = guest;

	vmm_snprintf(m->dev.name, VIRTIO_DEVICE_MAX_NAME_LEN, 
		     "%s/%s", guest->name, edev->node->name); 
	m->dev.edev = edev;
	m->dev.tra = &mmio_tra;
	m->dev.tra_data = m;
	m->dev.guest = guest;

	m->config = (struct virtio_mmio_config) {
		     .magic          = {'v', 'i', 'r', 't'},
		     .version        = 1,
		     .vendor_id      = 0x52535658, /* XVSR */
		     .queue_num_max  = 256,
	};

	attr = vmm_devtree_attrval(edev->node, "virtio_type");
	if (attr) {
		m->config.device_id = *((u32 *)attr);
	} else {
		rc = VMM_EFAIL;
		goto virtio_mmio_probe_freestate_fail;
	}

	m->dev.id.type = m->config.device_id;

	rc = vmm_devtree_irq_get(edev->node, &m->irq, 0);
	if (rc) {
		goto virtio_mmio_probe_freestate_fail;
	}

	if ((rc = virtio_register_device(&m->dev))) {
		goto virtio_mmio_probe_freestate_fail;
	}

	edev->priv = m;

	goto virtio_mmio_probe_done;

virtio_mmio_probe_freestate_fail:
	vmm_free(m);
virtio_mmio_probe_done:
	return rc;
}
Пример #2
0
int __cpuinit arch_clockchip_init(void)
{
	int rc;
	struct vmm_devtree_node *node;
	u32 val, cpu = vmm_smp_processor_id();

	if (!cpu) {
		/* Map timer0 registers */
		node = vmm_devtree_getnode(VMM_DEVTREE_PATH_SEPARATOR_STRING
					   "mct");
		if (!node) {
			goto skip_mct_timer_init;
		}

		if (!mct_timer_base) {
			rc = vmm_devtree_regmap(node, &mct_timer_base, 0);
			if (rc) {
				return rc;
			}
		}

		rc = vmm_devtree_clock_frequency(node, &mct_clk_rate);
		if (rc) {
			return rc;
		}

		/* Get MCT irq */
		rc = vmm_devtree_irq_get(node, &val, 0);
		if (rc) {
			return rc;
		}

		/* Initialize MCT as clockchip */
		rc = exynos4_clockchip_init(mct_timer_base, val, node->name,
					    300, mct_clk_rate, 0);
		if (rc) {
			return rc;
		}

	}
 skip_mct_timer_init:

#if CONFIG_SAMSUNG_MCT_LOCAL_TIMERS
	if (mct_timer_base) {
		exynos4_local_timer_init(mct_timer_base, 0, "mct_tick", 450,
					 mct_clk_rate);
	}
#endif

	return VMM_OK;
}
Пример #3
0
static int __init fpga_init(struct vmm_devtree_node *node)
{
    int rc;
    virtual_addr_t base;
    u32 clear_mask;
    u32 valid_mask;
    u32 picen_mask;
    u32 irq_start;
    u32 parent_irq;

    BUG_ON(!vmm_smp_is_bootcpu());

    rc = vmm_devtree_request_regmap(node, &base, 0, "Versatile SIC");
    WARN(rc, "unable to map fpga irq registers\n");

    if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) {
        irq_start = 0;
    }

    if (vmm_devtree_read_u32(node, "clear-mask", &clear_mask)) {
        clear_mask = 0;
    }

    if (vmm_devtree_read_u32(node, "valid-mask", &valid_mask)) {
        valid_mask = 0;
    }

    /* Some chips are cascaded from a parent IRQ */
    if (vmm_devtree_irq_get(node, &parent_irq, 0)) {
        parent_irq = 0xFFFFFFFF;
    }

    fpga_irq_init((void *)base, "FPGA",
                  irq_start, parent_irq,
                  valid_mask, node);

    vmm_writel(clear_mask, (void *)base + IRQ_ENABLE_CLEAR);
    vmm_writel(clear_mask, (void *)base + FIQ_ENABLE_CLEAR);

    /* For VersatilePB, we have interrupts from 21 to 31 capable
     * of being routed directly to the parent interrupt controller
     * (i.e. VIC). This is controlled by setting PIC_ENABLEx.
     */
    if (!vmm_devtree_read_u32(node, "picen-mask", &picen_mask)) {
        vmm_writel(picen_mask, (void *)base + PICEN_SET);
    }

    return 0;
}
Пример #4
0
static int __cpuinit twd_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 ref_cnt_freq;
	virtual_addr_t ref_cnt_addr;
	u32 cpu = vmm_smp_processor_id();
	struct twd_clockchip *cc = &this_cpu(twd_cc);

	if (!twd_base) {
		rc = vmm_devtree_regmap(node, &twd_base, 0);
		if (rc) {
			goto fail;
		}
	}

	if (!twd_ppi_irq) {
		rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0);
		if (rc) {
			goto fail_regunmap;
		}
	}

	if (!twd_freq_hz) {
		/* First try to find TWD clock */
		if (!twd_clk) {
			twd_clk = of_clk_get(node, 0);
		}
		if (!twd_clk) {
			twd_clk = clk_get_sys("smp_twd", NULL);
		}

		if (twd_clk) {
			/* Use TWD clock to find frequency */
			rc = clk_prepare_enable(twd_clk);
			if (rc) {
				clk_put(twd_clk);
				goto fail_regunmap;
			}
			twd_freq_hz = clk_get_rate(twd_clk);
		} else {
			/* No TWD clock found hence caliberate */
			rc = vmm_devtree_regmap(node, &ref_cnt_addr, 1);
			if (rc) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			if (vmm_devtree_read_u32(node, "ref-counter-freq",
						 &ref_cnt_freq)) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			twd_caliberate_freq(twd_base, 
					ref_cnt_addr, ref_cnt_freq);
			vmm_devtree_regunmap(node, ref_cnt_addr, 1);
		}
	}

	memset(cc, 0, sizeof(struct twd_clockchip));

	vmm_sprintf(cc->name, "twd/%d", cpu);

	cc->clkchip.name = cc->name;
	cc->clkchip.hirq = twd_ppi_irq;
	cc->clkchip.rating = 350;
	cc->clkchip.cpumask = vmm_cpumask_of(cpu);
	cc->clkchip.features = 
		VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, 
				   VMM_NSEC_PER_SEC, twd_freq_hz, 10);
	cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
	cc->clkchip.max_delta_ns = 
			vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
	cc->clkchip.set_mode = &twd_clockchip_set_mode;
	cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
	cc->clkchip.priv = cc;

	if (vmm_smp_is_bootcpu()) {
		/* Register interrupt handler */
		if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd",
						&twd_clockchip_irq_handler, 
						cc))) {
			
			goto fail_regunmap;
		}

		/* Mark interrupt as per-cpu */
		if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) {
			goto fail_unreg_irq;
		}
	}

	/* Explicitly enable local timer PPI in GIC 
	 * Note: Local timer requires PPI support hence requires GIC
	 */
	gic_enable_ppi(twd_ppi_irq);

	rc = vmm_clockchip_register(&cc->clkchip);
	if (rc) {
		goto fail_unreg_irq;
	}

	return VMM_OK;

fail_unreg_irq:
	if (vmm_smp_is_bootcpu()) {
		vmm_host_irq_unregister(twd_ppi_irq, cc);
	}
fail_regunmap:
	vmm_devtree_regunmap(node, twd_base, 0);
fail:
	return rc;
}
Пример #5
0
static int mmci_driver_probe(struct vmm_device *dev,
				   const struct vmm_devtree_nodeid *devid)
{
	int rc;
	u32 sdi;
	virtual_addr_t base;
	physical_addr_t basepa;
	struct mmc_host *mmc;
	struct mmci_host *host;

	mmc = mmc_alloc_host(sizeof(struct mmci_host), dev);
	if (!mmc) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}
	host = mmc_priv(mmc);

	rc = vmm_devtree_regmap(dev->node, &base, 0);
	if (rc) {
		goto free_host;
	}
	host->base = (struct sdi_registers *)base;

	rc = vmm_devtree_irq_get(dev->node, &host->irq0, 0);
	if (rc) {
		goto free_reg;
	}
	if ((rc = vmm_host_irq_register(host->irq0, dev->name, 
					mmci_cmd_irq_handler, mmc))) {
		goto free_reg;
	}

	rc = vmm_devtree_irq_get(dev->node, &host->irq1, 1);
	if (!rc) {
		if ((rc = vmm_host_irq_register(host->irq1, dev->name, 
						mmci_pio_irq_handler, mmc))) {
			goto free_irq0;
		}
		host->singleirq = 0;
	} else {
		host->singleirq = 1;
	}

	/* Retrive matching data */
	host->pwr_init = ((const u32 *)devid->data)[0];
	host->clkdiv_init = ((const u32 *)devid->data)[1];
	host->voltages = ((const u32 *)devid->data)[2];
	host->caps = ((const u32 *)devid->data)[3];
	host->clock_in = ((const u32 *)devid->data)[4];
	host->clock_min = ((const u32 *)devid->data)[5];
	host->clock_max = ((const u32 *)devid->data)[6];
	host->b_max = ((const u32 *)devid->data)[7];
	host->version2 = ((const u32 *)devid->data)[8];

	/* Initialize power and clock divider */
	vmm_writel(host->pwr_init, &host->base->power);
	vmm_writel(host->clkdiv_init, &host->base->clock);
	vmm_udelay(CLK_CHANGE_DELAY);

	/* Disable interrupts */
	sdi = vmm_readl(&host->base->mask0) & ~SDI_MASK0_MASK;
	vmm_writel(sdi, &host->base->mask0);

	/* Setup mmc host configuration */
	mmc->caps = host->caps;
	mmc->voltages = host->voltages;
	mmc->f_min = host->clock_min;
	mmc->f_max = host->clock_max;
	mmc->b_max = host->b_max;

	/* Setup mmc host operations */
	mmc->ops.send_cmd = mmci_request;
	mmc->ops.set_ios = mmci_set_ios;
	mmc->ops.init_card = mmci_init_card;
	mmc->ops.get_cd = NULL;
	mmc->ops.get_wp = NULL;

	rc = mmc_add_host(mmc);
	if (rc) {
		goto free_irq1;
	}

	dev->priv = mmc;

	vmm_devtree_regaddr(dev->node, &basepa, 0);
	vmm_printf("%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n",
		   dev->name, amba_part(dev), amba_manf(dev),
		   amba_rev(dev), (unsigned long long)basepa,
		   host->irq0, host->irq1);

	return VMM_OK;

free_irq1:
	if (!host->singleirq) {
		vmm_host_irq_unregister(host->irq1, mmc);
	}
free_irq0:
	vmm_host_irq_unregister(host->irq0, mmc);
free_reg:
	vmm_devtree_regunmap(dev->node, (virtual_addr_t)host->base, 0);
free_host:
	mmc_free_host(mmc);
free_nothing:
	return rc;
}
Пример #6
0
static int imx_driver_probe(struct vmm_device *dev,
			    const struct vmm_devtree_nodeid *devid)
{
	int rc = VMM_EFAIL;
	struct imx_port *port = NULL;

	port = vmm_zalloc(sizeof(struct imx_port));
	if (!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	if (strlcpy(port->cd.name, dev->name, sizeof(port->cd.name)) >=
	    sizeof(port->cd.name)) {
		rc = VMM_EOVERFLOW;
		goto free_port;
	}

	port->cd.dev.parent = dev;
	port->cd.ioctl = NULL;
	port->cd.read = imx_read;
	port->cd.write = imx_write;
	port->cd.priv = port;

	INIT_COMPLETION(&port->read_possible);
#if defined(UART_IMX_USE_TXINTR)
	INIT_COMPLETION(&port->write_possible);
#endif

	rc = vmm_devtree_regmap(dev->node, &port->base, 0);
	if (rc) {
		goto free_port;
	}

	port->mask = UCR1_RRDYEN | UCR1_UARTEN;

#if defined(UART_IMX_USE_TXINTR)
	port->mask |= UCR1_TRDYEN;
#endif

	vmm_writel(port->mask, (void *)port->base + UCR1);

	if (vmm_devtree_read_u32(dev->node, "baudrate", &port->baudrate)) {
		port->baudrate = 115200;
	}

	rc = vmm_devtree_clock_frequency(dev->node, &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	rc = vmm_devtree_irq_get(dev->node, &port->irq, 0);
	if (rc) {
		goto free_reg;
	}

	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					imx_irq_handler, port))) {
		goto free_reg;
	}

	/* Call low-level init function */
	imx_lowlevel_init(port->base, port->baudrate, port->input_clock);

	port->mask = vmm_readl((void *)port->base + UCR1);

	rc = vmm_chardev_register(&port->cd);
	if (rc) {
		goto free_irq;
	}

	dev->priv = port;

	return rc;

 free_irq:
	vmm_host_irq_unregister(port->irq, port);
 free_reg:
	vmm_devtree_regunmap(dev->node, port->base, 0);
 free_port:
	vmm_free(port);
 free_nothing:
	return rc;
}
Пример #7
0
static int __cpuinit bcm2835_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 clock, hirq;
	struct bcm2835_clockchip *bcc;

	/* Read clock frequency */
	rc = vmm_devtree_clock_frequency(node, &clock);
	if (rc) {
		return rc;
	}

	/* Read irq attribute */
	rc = vmm_devtree_irq_get(node, &hirq, DEFAULT_TIMER);
	if (rc) {
		return rc;
	}

	bcc = vmm_zalloc(sizeof(struct bcm2835_clockchip));
	if (!bcc) {
		return VMM_ENOMEM;
	}

	/* Map timer registers */
	rc = vmm_devtree_regmap(node, &bcc->base, 0);
	if (rc) {
		vmm_free(bcc);
		return rc;
	}
	bcc->system_clock = (void *)(bcc->base + REG_COUNTER_LO);
	bcc->control = (void *)(bcc->base + REG_CONTROL);
	bcc->compare = (void *)(bcc->base + REG_COMPARE(DEFAULT_TIMER));
	bcc->match_mask = 1 << DEFAULT_TIMER;

	/* Setup clockchip */
	bcc->clkchip.name = "bcm2835-clkchip";
	bcc->clkchip.hirq = hirq;
	bcc->clkchip.rating = 300;
	bcc->clkchip.cpumask = vmm_cpumask_of(0);
	bcc->clkchip.features = VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&bcc->clkchip.mult, 
				   &bcc->clkchip.shift,
				   VMM_NSEC_PER_SEC, clock, 10);
	bcc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(MIN_REG_COMPARE, 
							   &bcc->clkchip);
	bcc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(MAX_REG_COMPARE, 
							   &bcc->clkchip);
	bcc->clkchip.set_mode = &bcm2835_clockchip_set_mode;
	bcc->clkchip.set_next_event = &bcm2835_clockchip_set_next_event;
	bcc->clkchip.priv = bcc;

	/* Make sure compare register is set to zero */
	vmm_writel(0x0, bcc->compare);

	/* Make sure pending timer interrupts acknowledged */
	if (vmm_readl(bcc->control) & bcc->match_mask) {
		vmm_writel(bcc->match_mask, bcc->control);
	}

	/* Register interrupt handler */
	rc = vmm_host_irq_register(hirq, "bcm2835_timer",
				   &bcm2835_clockchip_irq_handler, bcc);
	if (rc) {
		vmm_devtree_regunmap(node, bcc->base, 0);
		vmm_free(bcc);
		return rc;
	}

	/* Register clockchip */
	rc = vmm_clockchip_register(&bcc->clkchip);
	if (rc) {
		vmm_host_irq_unregister(hirq, bcc);
		vmm_devtree_regunmap(node, bcc->base, 0);
		vmm_free(bcc);
		return rc;
	}

	return VMM_OK;
}
Пример #8
0
int __cpuinit generic_timer_clockchip_init(void)
{
	int rc;
	u32 irq[3], num_irqs, val;
	struct vmm_clockchip *cc;
	struct vmm_devtree_node *node;

	/* Find generic timer device tree node */
	node = vmm_devtree_find_matching(NULL, generic_timer_match);
	if (!node) {
		return VMM_ENODEV;
	}

	/* Determine generic timer frequency */
	if (generic_timer_hz == 0) {
		rc =  vmm_devtree_clock_frequency(node, &generic_timer_hz);
		if (rc) {
			/* Use preconfigured counter frequency 
			 * in absence of dts node
			 */
			generic_timer_hz = 
				generic_timer_reg_read(GENERIC_TIMER_REG_FREQ);
		} else if (generic_timer_freq_writeable()) {
			/* Program the counter frequency as per the dts node */
			generic_timer_reg_write(GENERIC_TIMER_REG_FREQ, 
							generic_timer_hz);
		}
	}
	if (generic_timer_hz == 0) {
		return VMM_EFAIL;
	}

	/* Get hypervisor timer irq number */
	rc = vmm_devtree_irq_get(node, 
				 &irq[GENERIC_HYPERVISOR_TIMER], 
				 GENERIC_HYPERVISOR_TIMER);
	if (rc) {
		return rc;
	}

	/* Get physical timer irq number */
	rc = vmm_devtree_irq_get(node, 
				 &irq[GENERIC_PHYSICAL_TIMER], 
				 GENERIC_PHYSICAL_TIMER);
	if (rc) {
		return rc;
	}

	/* Get virtual timer irq number */
	rc = vmm_devtree_irq_get(node, 
				 &irq[GENERIC_VIRTUAL_TIMER], 
				 GENERIC_VIRTUAL_TIMER);
	if (rc) {
		return rc;
	}

	/* Number of generic timer irqs */
	num_irqs = vmm_devtree_irq_count(node);
	if (!num_irqs) {
		return VMM_EFAIL;
	}

	/* Ensure hypervisor timer is stopped */
	generic_timer_stop();

	/* Create generic hypervisor timer clockchip */
	cc = vmm_zalloc(sizeof(struct vmm_clockchip));
	if (!cc) {
		return VMM_EFAIL;
	}
	cc->name = "gen-hyp-timer";
	cc->hirq = irq[GENERIC_HYPERVISOR_TIMER];
	cc->rating = 400;
	cc->cpumask = vmm_cpumask_of(vmm_smp_processor_id());
	cc->features = VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->mult, &cc->shift, 
				   VMM_NSEC_PER_SEC, generic_timer_hz, 10);
	cc->min_delta_ns = vmm_clockchip_delta2ns(0xF, cc);
	cc->max_delta_ns = vmm_clockchip_delta2ns(0x7FFFFFFF, cc);
	cc->set_mode = &generic_timer_set_mode;
	cc->set_next_event = &generic_timer_set_next_event;
	cc->priv = NULL;

	/* Register hypervisor timer clockchip */
	rc = vmm_clockchip_register(cc);
	if (rc) {
		goto fail_free_cc;
	}

	if (!vmm_smp_processor_id()) {
		/* Register irq handler for hypervisor timer */
		rc = vmm_host_irq_register(irq[GENERIC_HYPERVISOR_TIMER],
					   "gen-hyp-timer", 
					   &generic_hyp_timer_handler, cc);
		if (rc) {
			goto fail_unreg_cc;
		}

		/* Mark hypervisor timer irq as per-CPU */
		if ((rc = vmm_host_irq_mark_per_cpu(cc->hirq))) {
			goto fail_unreg_htimer;
		}

		if (num_irqs > 1) {
			/* Register irq handler for physical timer */
			rc = vmm_host_irq_register(irq[GENERIC_PHYSICAL_TIMER],
						   "gen-phys-timer",
						   &generic_phys_timer_handler,
						   NULL);
			if (rc) {
				goto fail_unreg_htimer;
			}

			/* Mark physical timer irq as per-CPU */
			rc = vmm_host_irq_mark_per_cpu(
						irq[GENERIC_PHYSICAL_TIMER]);
			if (rc)	{
				goto fail_unreg_ptimer;
			}
		}

		if (num_irqs > 2) {
			/* Register irq handler for virtual timer */
			rc = vmm_host_irq_register(irq[GENERIC_VIRTUAL_TIMER],
						   "gen-virt-timer",
						   &generic_virt_timer_handler,
						   NULL);
			if (rc) {
				goto fail_unreg_ptimer;
			}

			/* Mark virtual timer irq as per-CPU */
			rc = vmm_host_irq_mark_per_cpu(
						irq[GENERIC_VIRTUAL_TIMER]);
			if (rc) {
				goto fail_unreg_vtimer;
			}
		}
	}

	if (num_irqs > 1) {
		val = generic_timer_reg_read(GENERIC_TIMER_REG_HCTL);
		val |= GENERIC_TIMER_HCTL_KERN_PCNT_EN;
		val |= GENERIC_TIMER_HCTL_KERN_PTMR_EN;
		generic_timer_reg_write(GENERIC_TIMER_REG_HCTL, val);
	}

	for (val = 0; val < num_irqs; val++) {
		gic_enable_ppi(irq[val]);
	}

	return VMM_OK;

fail_unreg_vtimer:
	if (!vmm_smp_processor_id() && num_irqs > 2) {
		vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER],
					&generic_virt_timer_handler);
	}
fail_unreg_ptimer:
	if (!vmm_smp_processor_id() && num_irqs > 1) {
		vmm_host_irq_unregister(irq[GENERIC_PHYSICAL_TIMER],
					&generic_phys_timer_handler);
	}
fail_unreg_htimer:
	if (!vmm_smp_processor_id()) {
		vmm_host_irq_unregister(irq[GENERIC_HYPERVISOR_TIMER],
					&generic_hyp_timer_handler);
	}
fail_unreg_cc:
	vmm_clockchip_register(cc);
fail_free_cc:
	vmm_free(cc);
	return rc;
}
Пример #9
0
int __cpuinit epit_clockchip_init(void)
{
	int rc = VMM_ENODEV;
	u32 clock, hirq, timer_num, *val;
	struct vmm_devtree_node *node;
	struct epit_clockchip *ecc;

	/* find the first epit compatible node */
	node = vmm_devtree_find_compatible(NULL, NULL, "freescale,epit-timer");
	if (!node) {
		goto fail;
	}

	/* Read clock frequency */
	rc = vmm_devtree_clock_frequency(node, &clock);
	if (rc) {
		goto fail;
	}

	/* Read timer_num attribute */
	val = vmm_devtree_attrval(node, "timer_num");
	if (!val) {
		rc = VMM_ENOTAVAIL;
		goto fail;
	}
	timer_num = *val;

	/* Read irq attribute */
	rc = vmm_devtree_irq_get(node, &hirq, 0);
	if (rc) {
		goto fail;
	}

	/* allocate our struct */
	ecc = vmm_zalloc(sizeof(struct epit_clockchip));
	if (!ecc) {
		rc = VMM_ENOMEM;
		goto fail;
	}

	/* Map timer registers */
	rc = vmm_devtree_regmap(node, &ecc->base, 0);
	if (rc) {
		goto regmap_fail;
	}

	ecc->match_mask = 1 << timer_num;
	ecc->timer_num = timer_num;

	/* Setup clockchip */
	ecc->clkchip.name = node->name;
	ecc->clkchip.hirq = hirq;
	ecc->clkchip.rating = 300;
	ecc->clkchip.cpumask = vmm_cpumask_of(0);
	ecc->clkchip.features = VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&ecc->clkchip.mult,
				   &ecc->clkchip.shift,
				   VMM_NSEC_PER_SEC, clock, 10);
	ecc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(MIN_REG_COMPARE,
							   &ecc->clkchip);
	ecc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(MAX_REG_COMPARE,
							   &ecc->clkchip);
	ecc->clkchip.set_mode = epit_set_mode;
	ecc->clkchip.set_next_event = epit_set_next_event;
	ecc->clkchip.priv = ecc;

	/*
	 * Initialise to a known state (all timers off, and timing reset)
	 */
	vmm_writel(0x0, (void *)(ecc->base + EPITCR));
	/*
	 * Initialize the load register to the max value to decrement.
	 */
	vmm_writel(0xffffffff, (void *)(ecc->base + EPITLR));
	/*
	 * enable the timer, set it to the high reference clock,
	 * allow the timer to work in WAIT mode.
	 */
	vmm_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN,
		   (void *)(ecc->base + EPITCR));

	/* Register interrupt handler */
	rc = vmm_host_irq_register(hirq, ecc->clkchip.name,
				   &epit_timer_interrupt, ecc);
	if (rc) {
		goto irq_fail;
	}

	/* Register clockchip */
	rc = vmm_clockchip_register(&ecc->clkchip);
	if (rc) {
		goto register_fail;
	}

	return VMM_OK;

 register_fail:
	vmm_host_irq_unregister(hirq, ecc);
 irq_fail:
	vmm_devtree_regunmap(node, ecc->base, 0);
 regmap_fail:
	vmm_free(ecc);
 fail:
	return rc;
}
Пример #10
0
int __cpuinit twd_clockchip_init(virtual_addr_t ref_counter_addr,
                                 u32 ref_counter_freq)
{
    int rc;
    u32 cpu = vmm_smp_processor_id();
    struct vmm_devtree_node *node;
    struct twd_clockchip *cc = &this_cpu(twd_cc);

    node = vmm_devtree_find_matching(NULL, twd_match);
    if (!node) {
        return VMM_ENODEV;
    }

    if (!twd_base) {
        rc = vmm_devtree_regmap(node, &twd_base, 0);
        if (rc) {
            return rc;
        }
    }

    if (!twd_ppi_irq) {
        rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0);
        if (rc) {
            return rc;
        }
    }

    twd_caliberate_freq(twd_base, ref_counter_addr, ref_counter_freq);

    memset(cc, 0, sizeof(struct twd_clockchip));

    vmm_sprintf(cc->name, "twd/%d", cpu);

    cc->clkchip.name = cc->name;
    cc->clkchip.hirq = twd_ppi_irq;
    cc->clkchip.rating = 350;
    cc->clkchip.cpumask = vmm_cpumask_of(cpu);
    cc->clkchip.features =
        VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
    vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift,
                               VMM_NSEC_PER_SEC, twd_freq_hz, 10);
    cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
    cc->clkchip.max_delta_ns =
        vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
    cc->clkchip.set_mode = &twd_clockchip_set_mode;
    cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
    cc->clkchip.priv = cc;

    if (!cpu) {
        /* Register interrupt handler */
        if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd",
                                        &twd_clockchip_irq_handler,
                                        cc))) {
            return rc;
        }

        /* Mark interrupt as per-cpu */
        if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) {
            return rc;
        }
    }

    /* Explicitly enable local timer PPI in GIC
     * Note: Local timer requires PPI support hence requires GIC
     */
    gic_enable_ppi(twd_ppi_irq);

    return vmm_clockchip_register(&cc->clkchip);
}
Пример #11
0
static int s3c_rtc_driver_probe(struct vmm_device *pdev,
				const struct vmm_devtree_nodeid *devid)
{
	u32 alarmno, tickno;
	struct rtc_time rtc_tm;
	int ret, tmp, rc;

	/* find the IRQs */

	rc = vmm_devtree_irq_get(pdev->node, &alarmno, 0);
	if (rc) {
		rc = VMM_EFAIL;
		return rc;
	}
	s3c_rtc_alarmno = alarmno;
	rc = vmm_devtree_irq_get(pdev->node, &tickno, 1);
	if (rc) {
		rc = VMM_EFAIL;
		return rc;
	}
	s3c_rtc_tickno = tickno;

	/* get the memory region */

	rc = vmm_devtree_regmap(pdev->node, (virtual_addr_t *)&s3c_rtc_base,
				0);
	if (rc) {
		dev_err(pdev, "failed ioremap()\n");
		ret = rc;
		goto err_nomap;
	}

	rtc_clk = clk_get(pdev, "rtc");
	if (rtc_clk == NULL) {
		dev_err(pdev, "failed to find rtc clock source\n");
		ret = -ENODEV;
		goto err_clk;
	}

	clk_enable(rtc_clk);

	/* check to see if everything is setup correctly */

	s3c_rtc_enable(pdev, 1);

	//device_init_wakeup(pdev, 1);

	/* register RTC and exit */

	s3c_rtcops.dev = pdev;

	rc = vmm_rtcdev_register(&s3c_rtcops);

	if (rc) {
		dev_err(pdev, "cannot attach rtc\n");
		ret = rc;
		goto err_nortc;
	}

	s3c_rtc_cpu_type = (enum s3c_cpu_type)devid->data;

	/* Check RTC Time */

	s3c_rtc_gettime(NULL, &rtc_tm);

	if (!rtc_valid_tm(&rtc_tm)) {
		dev_warn(pdev,
			 "warning: invalid RTC value so initializing it\n");

		rtc_tm.tm_year = 100;
		rtc_tm.tm_mon = 0;
		rtc_tm.tm_mday = 1;
		rtc_tm.tm_hour = 0;
		rtc_tm.tm_min = 0;
		rtc_tm.tm_sec = 0;

		s3c_rtc_settime(NULL, &rtc_tm);
	}

	if (s3c_rtc_cpu_type != TYPE_S3C2410)
		max_user_freq = 32768;
	else
		max_user_freq = 128;

	if (s3c_rtc_cpu_type == TYPE_S3C2416
	    || s3c_rtc_cpu_type == TYPE_S3C2443) {
		tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
		tmp |= S3C2443_RTCCON_TICSEL;
		writew(tmp, s3c_rtc_base + S3C2410_RTCCON);
	}

	pdev->priv = &s3c_rtcops;

	s3c_rtc_setfreq(&s3c_rtcops, 1);

	if ((rc =
	     vmm_host_irq_register(s3c_rtc_alarmno, "s3c_rtc_alarm",
				   s3c_rtc_alarmirq, &s3c_rtcops))) {
		dev_err(pdev, "IRQ%d error %d\n", s3c_rtc_alarmno, rc);
		goto err_alarm_irq;
	}

	if ((rc =
	     vmm_host_irq_register(s3c_rtc_tickno, "s3c_rtc_tick",
				   s3c_rtc_tickirq, &s3c_rtcops))) {
		dev_err(pdev, "IRQ%d error %d\n", s3c_rtc_tickno, rc);
		goto err_tick_irq;
	}

	clk_disable(rtc_clk);

	return 0;

 err_tick_irq:
	vmm_host_irq_unregister(s3c_rtc_alarmno, &s3c_rtcops);

 err_alarm_irq:
	pdev->priv = NULL;
	vmm_rtcdev_unregister(&s3c_rtcops);

 err_nortc:
	s3c_rtc_enable(pdev, 0);
	clk_disable(rtc_clk);
	clk_put(rtc_clk);

 err_clk:
	vmm_devtree_regunmap(pdev->node, (virtual_addr_t) s3c_rtc_base, 0);

 err_nomap:
	return ret;
}
Пример #12
0
/* Search EMAC board, allocate space and register it
 */
static int emac_probe(struct vmm_device *pdev,
			const struct vmm_devtree_nodeid *devid)
{
	struct device_node *np = pdev->node;
	struct emac_board_info *db;
	struct net_device *ndev;
	int ret = 0;
	const char *mac_addr;
	virtual_addr_t  reg_addr;

	ndev = alloc_etherdev(sizeof(struct emac_board_info));
	if (!ndev) {
		dev_err(pdev, "%s: could not allocate device.\n", __func__);
		return -ENOMEM;
	}

	strlcpy(ndev->name, pdev->name, sizeof(ndev->name));

	SET_NETDEV_DEV(ndev, pdev);

	db = netdev_priv(ndev);
	memset(db, 0, sizeof(*db));

	db->ndev = ndev;
	db->pdev = pdev;

	spin_lock_init(&db->lock);

	if ((ret = vmm_devtree_request_regmap(np, &reg_addr, 0,
					      "Sun4i EMAC"))) {
		vmm_printf("%s: Failed to ioreamp\n", __func__);
		return -ENOMEM;
	}

	db->membase = (void *) reg_addr;

	/* fill in parameters for net-dev structure */
	ndev->base_addr = (unsigned long)db->membase;

	ret = vmm_devtree_irq_get(np, &ndev->irq, 0);
	if (ret) {
		vmm_printf("%s: No irq resource\n", __func__);
		goto out;
	}

	db->clk = clk_get(pdev, NULL);
	if (IS_ERR(db->clk))
		goto out;

	clk_prepare_enable(db->clk);

	db->phy_node = vmm_devtree_parse_phandle(np, "phy", 0);
	if (!db->phy_node) {
		dev_err(pdev, "%s: no associated PHY\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	/* Read MAC-address from DT */
	mac_addr = of_get_mac_address(np);
	if (mac_addr)
		memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);

	/* Check if the MAC address is valid, if not get a random one */
	if (!is_valid_ether_addr(ndev->dev_addr)) {
		eth_hw_addr_random(ndev);
		dev_info(pdev, "using random MAC address: ");
		print_mac_address_fmt(ndev->dev_addr);
	}

	db->emacrx_completed_flag = 1;
	emac_powerup(ndev);
	emac_reset(db);

	ether_setup(ndev);

	ndev->netdev_ops = &emac_netdev_ops;
	ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
	ndev->ethtool_ops = &emac_ethtool_ops;

	platform_set_drvdata(pdev, ndev);

	/* Carrier starts down, phylib will bring it up */
	netif_carrier_off(ndev);

	ret = register_netdev(ndev);
	if (ret) {
		dev_err(pdev, "%s: Registering netdev failed!\n", __func__);
		ret = -ENODEV;
		goto out;
	}

	dev_info(pdev, "%s: at %p, IRQ %d MAC: ",
		 ndev->name, db->membase, ndev->irq);
	print_mac_address_fmt(ndev->dev_addr);

	return 0;

out:
	dev_err(pdev, "%s: not found (%d).\n", __func__, ret);
	free_netdev(ndev);

	return ret;
}
Пример #13
0
int vmm_devtree_irq_parse_one(struct vmm_devtree_node *device, int index,
			      struct vmm_devtree_phandle_args *out_irq)
{
	struct vmm_devtree_node *p = NULL;
	struct vmm_devtree_attr *attr = NULL;
	u32 *intspec = NULL;
	u32 intsize = 0;
	u32 intlen = 0;
	int res = VMM_EINVALID;
	int i;

	if (!device || (index < 0) || !out_irq) {
		return VMM_EINVALID;
	}

	pr_debug("%s: dev=%s, index=%d\n", __func__, device->name, index);

	attr = vmm_devtree_getattr(device, "interrupts");
	if (NULL == attr) {
		return VMM_EINVALID;
	}

	intlen = attr->len / sizeof(u32);
	intspec = attr->value;
	pr_debug(" intspec=%d intlen=%d\n", vmm_be32_to_cpu(*intspec), intlen);

	/* Look for the interrupt parent. */
	p = vmm_devtree_irq_find_parent(device);
	if (NULL == p) {
		/* If no interrupt-parent fount then try
		 * the original vmm_devtree_irq_get() API
		 */
		res = vmm_devtree_irq_get(device, &intsize, index);
		if (res != VMM_OK) {
			return res;
		}
		out_irq->np = NULL;
		out_irq->args_count = 1;
		out_irq->args[0] = intsize;
		return VMM_OK;
	}

	/* Get size of interrupt specifier */
	res = vmm_devtree_read_u32(p, "#interrupt-cells", &intsize);
	if (VMM_OK != res) {
		vmm_devtree_dref_node(p);
		return res;
	}

	pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);

	/* Check index */
	if ((index + 1) * intsize > intlen) {
		vmm_devtree_dref_node(p);
		return VMM_EINVALID;
	}

	/* Copy intspec into irq structure */
	intspec += index * intsize;
	out_irq->np = p;
	out_irq->args_count = intsize;
	for (i = 0; i < intsize && i < VMM_MAX_PHANDLE_ARGS; i++) {
		out_irq->args[i] = vmm_be32_to_cpu(*intspec++);
	}

	return VMM_OK;
}
Пример #14
0
static int omap_uart_driver_probe(struct vmm_device *dev,
				  const struct vmm_devtree_nodeid *devid)
{
	int rc;
	u32 reg_offset;
	struct omap_uart_port *port;
	
	port = vmm_zalloc(sizeof(struct omap_uart_port));
	if(!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	if (strlcpy(port->cd.name, dev->name, sizeof(port->cd.name)) >=
	    sizeof(port->cd.name)) {
		rc = VMM_EOVERFLOW;
		goto free_port;
	}

	port->cd.dev.parent = dev;
	port->cd.ioctl = NULL;
	port->cd.read = omap_uart_read;
	port->cd.write = omap_uart_write;
	port->cd.priv = port;

	INIT_COMPLETION(&port->read_possible);
	INIT_COMPLETION(&port->write_possible);

	rc = vmm_devtree_regmap(dev->node, &port->base, 0);
	if(rc) {
		goto free_port;
	}

	if (vmm_devtree_read_u32(dev->node, "reg_align",
				 &port->reg_align)) {
		port->reg_align = 1;
	}

	if (vmm_devtree_read_u32(dev->node, "reg_offset",
				 &reg_offset) == VMM_OK) {
		port->base += reg_offset;
	}

	rc = vmm_devtree_read_u32(dev->node, "baudrate",
				  &port->baudrate);
	if (rc) {
		goto free_reg;
	}

	rc = vmm_devtree_clock_frequency(dev->node,
					 &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	omap_uart_startup_configure(port);

	rc = vmm_devtree_irq_get(dev->node, &port->irq, 0);
	if (rc) {
		goto free_reg;
	}
	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					omap_uart_irq_handler, port))) {
		goto free_reg;
	}

	rc = vmm_chardev_register(&port->cd);
	if (rc) {
		goto free_irq;
	}

	dev->priv = port;

	return VMM_OK;

free_irq:
	vmm_host_irq_unregister(port->irq, port);
free_reg:
	vmm_devtree_regunmap(dev->node, port->base, 0);
free_port:
	vmm_free(port);
free_nothing:
	return rc;
}