Exemplo n.º 1
0
static int __init uart8250_defterm_init(struct vmm_devtree_node *node)
{
	int rc;

	rc = vmm_devtree_regmap(node, &uart8250_port.base, 0);
	if (rc) {
		return rc;
	}

	rc = vmm_devtree_clock_frequency(node,
				&uart8250_port.input_clock);
	if (rc) {
		return rc;
	}

	if (vmm_devtree_read_u32(node, "baudrate",
				 &uart8250_port.baudrate)) {
		uart8250_port.baudrate = 115200;
	}

	if (vmm_devtree_read_u32(node, "reg-shift",
				 &uart8250_port.reg_shift)) {
		uart8250_port.reg_shift = 2;
	}

	if (vmm_devtree_read_u32(node, "reg-io-width",
				 &uart8250_port.reg_width)) {
		uart8250_port.reg_width = 1;
	}

	uart_8250_lowlevel_init(&uart8250_port);

	return VMM_OK;
}
Exemplo n.º 2
0
static int __init gic_devtree_init(struct vmm_devtree_node *node,
				   struct vmm_devtree_node *parent,
				   bool eoimode)
{
	int rc;
	u32 irq, irq_start = 0;
	physical_size_t cpu_sz;
	virtual_addr_t cpu_base;
	virtual_addr_t cpu2_base;
	virtual_addr_t dist_base;

	if (WARN_ON(!node)) {
		return VMM_ENODEV;
	}

	rc = vmm_devtree_request_regmap(node, &dist_base, 0, "GIC Dist");
	WARN(rc, "unable to map gic dist registers\n");

	rc = vmm_devtree_request_regmap(node, &cpu_base, 1, "GIC CPU");
	WARN(rc, "unable to map gic cpu registers\n");

	rc = vmm_devtree_request_regmap(node, &cpu2_base, 4, "GIC CPU2");
	if (rc) {
		rc = vmm_devtree_regsize(node, &cpu_sz, 1);
		if (rc) {
			return rc;
		}
		if (cpu_sz >= 0x20000) {
			cpu2_base = cpu_base + 0x10000;
		} else if (cpu_sz >= 0x2000) {
			cpu2_base = cpu_base + 0x1000;
		} else {
			cpu2_base = 0x0;
		}
	}

	if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) {
		irq_start = 0;
	}

	rc = gic_init_bases(node, gic_cnt, eoimode, irq_start,
			    cpu_base, cpu2_base, dist_base);
	if (rc) {
		return rc;
	}

	if (parent) {
		if (vmm_devtree_read_u32(node, "parent_irq", &irq)) {
			irq = 1020;
		}
		gic_cascade_irq(gic_cnt, irq);
	} else {
		vmm_host_irq_set_active_callback(gic_active_irq);
	}

	gic_cnt++;

	return VMM_OK;
}
Exemplo n.º 3
0
static int __init fpga_init(struct vmm_devtree_node *node)
{
    int rc;
    virtual_addr_t base;
    u32 clear_mask;
    u32 valid_mask;
    u32 picen_mask;
    u32 irq_start;
    u32 parent_irq;

    BUG_ON(!vmm_smp_is_bootcpu());

    rc = vmm_devtree_request_regmap(node, &base, 0, "Versatile SIC");
    WARN(rc, "unable to map fpga irq registers\n");

    if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) {
        irq_start = 0;
    }

    if (vmm_devtree_read_u32(node, "clear-mask", &clear_mask)) {
        clear_mask = 0;
    }

    if (vmm_devtree_read_u32(node, "valid-mask", &valid_mask)) {
        valid_mask = 0;
    }

    /* Some chips are cascaded from a parent IRQ */
    if (vmm_devtree_irq_get(node, &parent_irq, 0)) {
        parent_irq = 0xFFFFFFFF;
    }

    fpga_irq_init((void *)base, "FPGA",
                  irq_start, parent_irq,
                  valid_mask, node);

    vmm_writel(clear_mask, (void *)base + IRQ_ENABLE_CLEAR);
    vmm_writel(clear_mask, (void *)base + FIQ_ENABLE_CLEAR);

    /* For VersatilePB, we have interrupts from 21 to 31 capable
     * of being routed directly to the parent interrupt controller
     * (i.e. VIC). This is controlled by setting PIC_ENABLEx.
     */
    if (!vmm_devtree_read_u32(node, "picen-mask", &picen_mask)) {
        vmm_writel(picen_mask, (void *)base + PICEN_SET);
    }

    return 0;
}
Exemplo n.º 4
0
int arch_guest_init(struct vmm_guest *guest)
{
	if (!guest->reset_count) {
		guest->arch_priv = vmm_malloc(sizeof(struct arm_guest_priv));
		if (!guest->arch_priv) {
			return VMM_ENOMEM;
		}

		arm_guest_priv(guest)->ttbl = mmu_lpae_ttbl_alloc(TTBL_STAGE2);
		if (!arm_guest_priv(guest)->ttbl) {
			vmm_free(guest->arch_priv);
			guest->arch_priv = NULL;
			return VMM_ENOMEM;
		}

		if (vmm_devtree_read_u32(guest->node,
				"psci_version",
				&arm_guest_priv(guest)->psci_version)) {
			/* By default, assume PSCI v0.1 */
			arm_guest_priv(guest)->psci_version = 1;
		}
	}

	return VMM_OK;
}
Exemplo n.º 5
0
static int __init samsung_defterm_init(struct vmm_devtree_node *node)
{
	int rc;

	/* map this console device */
	rc = vmm_devtree_regmap(node, &samsung_defterm_base, 0);
	if (rc) {
		return rc;
	}

	/* retrieve clock frequency */
	rc = vmm_devtree_clock_frequency(node,
				&samsung_defterm_inclk);
	if (rc) {
		return rc;
	}
	
	/* retrieve baud rate */
	if (vmm_devtree_read_u32(node, "baudrate",
				&samsung_defterm_baud)) {
		samsung_defterm_baud = 115200;
	}

	/* initialize the console port */
	samsung_lowlevel_init(samsung_defterm_base,
			      samsung_defterm_baud,
			      samsung_defterm_inclk);

	return VMM_OK;
}
Exemplo n.º 6
0
static int __init pl011_defterm_init(struct vmm_devtree_node *node)
{
	int rc;

	rc = vmm_devtree_regmap(node, &pl011_defterm_base, 0);
	if (rc) {
		return rc;
	}

	rc = vmm_devtree_clock_frequency(node,
				&pl011_defterm_inclk);
	if (rc) {
		return rc;
	}

	if (vmm_devtree_read_u32(node, "baudrate",
				&pl011_defterm_baud)) {
		pl011_defterm_baud = 115200;
	}

	pl011_lowlevel_init(pl011_defterm_base,
			    pl011_defterm_baud, 
			    pl011_defterm_inclk);

	return VMM_OK;
}
Exemplo n.º 7
0
static int virtio_pci_bar_probe(struct vmm_guest *guest,
				struct vmm_emudev *edev,
				const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct virtio_pci_dev *vdev;

	vdev = vmm_zalloc(sizeof(struct virtio_pci_dev));
	if (!vdev) {
		rc = VMM_ENOMEM;
		goto virtio_pci_probe_done;
	}

	vdev->guest = guest;

	vmm_snprintf(vdev->dev.name, VMM_VIRTIO_DEVICE_MAX_NAME_LEN,
		     "%s/%s", guest->name, edev->node->name);
	vdev->dev.edev = edev;
	vdev->dev.tra = &pci_tra;
	vdev->dev.tra_data = vdev;
	vdev->dev.guest = guest;

	vdev->config = (struct vmm_virtio_pci_config) {
		.queue_num  = 256,
	};

	rc = vmm_devtree_read_u32(edev->node, "virtio_type",
				  &vdev->dev.id.type);
	if (rc) {
		goto virtio_pci_probe_freestate_fail;
	}

	rc = vmm_devtree_read_u32_atindex(edev->node,
					  VMM_DEVTREE_INTERRUPTS_ATTR_NAME,
					  &vdev->irq, 0);
	if (rc) {
		goto virtio_pci_probe_freestate_fail;
	}

	if ((rc = vmm_virtio_register_device(&vdev->dev))) {
		goto virtio_pci_probe_freestate_fail;
	}

	edev->priv = vdev;

	goto virtio_pci_probe_done;

virtio_pci_probe_freestate_fail:
	vmm_free(vdev);
virtio_pci_probe_done:
	return rc;
}

static struct vmm_devtree_nodeid virtio_pci_emuid_table[] = {
	{
		.type = "virtio",
		.compatible = "virtio,pci",
	},
	{ /* end of list */ },
};
Exemplo n.º 8
0
static int arm11mpcore_emulator_probe(struct vmm_guest *guest,
				      struct vmm_emudev *edev,
				      const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct arm11mpcore_priv_state *s;
	u32 parent_irq, timer_irq[2];

	s = vmm_zalloc(sizeof(struct arm11mpcore_priv_state));
	if (!s) {
		rc = VMM_ENOMEM;
		goto arm11mp_probe_done;
	}

	s->num_cpu = guest->vcpu_count;

	rc = vmm_devtree_read_u32(edev->node, "parent_irq", &parent_irq);
	if (rc) {
		goto arm11mp_probe_failed;
	}

	rc = vmm_devtree_read_u32_array(edev->node, "timer_irq",
					timer_irq, array_size(timer_irq));
	if (rc) {
		goto arm11mp_probe_failed;
	}

	/* Allocate and init MPT state */
	if (!(s->mpt = mptimer_state_alloc(guest, edev, s->num_cpu, 1000000,
				 	   timer_irq[0], timer_irq[1]))) {
		rc = VMM_ENOMEM;
		goto arm11mp_probe_failed;
	}

	/* Allocate and init GIC state */
	if (!(s->gic = gic_state_alloc(edev->node->name, guest, 
				       GIC_TYPE_ARM11MPCORE, s->num_cpu,
				       FALSE, 0, 96, parent_irq))) {
		rc = VMM_ENOMEM;
		goto arm11mp_gic_alloc_failed;
	}

	s->guest = guest;
	INIT_SPIN_LOCK(&s->lock);

	edev->priv = s;

	goto arm11mp_probe_done;

arm11mp_gic_alloc_failed:
	mptimer_state_free(s->mpt);

arm11mp_probe_failed:
	vmm_free(s);

arm11mp_probe_done:
	return rc;
}
Exemplo n.º 9
0
int versatile_clcd_setup(struct clcd_fb *fb, unsigned long framesize)
{
	int rc;
	u32 use_dma, val[2];
	void *screen_base;
	unsigned long smem_len;
	physical_addr_t smem_pa;

	if (!fb->dev->node) {
		return VMM_EINVALID;
	}

	if (vmm_devtree_read_u32(fb->dev->node, "use_dma", &use_dma)) {
		use_dma = 0;
	}
	
	if (use_dma) {
		smem_len = framesize;

		screen_base = (void *)vmm_host_alloc_pages(
				VMM_SIZE_TO_PAGE(smem_len),
				VMM_MEMORY_READABLE | VMM_MEMORY_WRITEABLE);
		if (!screen_base) {
			vmm_printf("CLCD: unable to alloc framebuffer\n");
			return VMM_ENOMEM;
		}

		rc = vmm_host_va2pa((virtual_addr_t)screen_base, &smem_pa);
		if (rc) {
			return rc;
		}
	} else {
		rc = vmm_devtree_read_u32_array(fb->dev->node,
						"framebuffer", val, 2);
		if (rc) {
			return rc;
		}

		smem_pa = val[0];
		smem_len = val[1];

		if (smem_len < framesize) {
			return VMM_ENOMEM;
		}

		screen_base = (void *)vmm_host_iomap(smem_pa, smem_len);
		if (!screen_base) {
			vmm_printf("CLCD: unable to map framebuffer\n");
			return VMM_ENOMEM;
		}
	}

	fb->fb.screen_base	= screen_base;
	fb->fb.fix.smem_start	= smem_pa;
	fb->fb.fix.smem_len	= smem_len;

	return 0;
}
Exemplo n.º 10
0
static int virtio_mmio_probe(struct vmm_guest *guest,
			     struct vmm_emudev *edev,
			     const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct virtio_mmio_dev *m;

	m = vmm_zalloc(sizeof(struct virtio_mmio_dev));
	if (!m) {
		rc = VMM_ENOMEM;
		goto virtio_mmio_probe_done;
	}

	m->guest = guest;

	vmm_snprintf(m->dev.name, VMM_VIRTIO_DEVICE_MAX_NAME_LEN, 
		     "%s/%s", guest->name, edev->node->name); 
	m->dev.edev = edev;
	m->dev.tra = &mmio_tra;
	m->dev.tra_data = m;
	m->dev.guest = guest;

	m->config = (struct vmm_virtio_mmio_config) {
		     .magic          = {'v', 'i', 'r', 't'},
		     .version        = 1,
		     .vendor_id      = 0x52535658, /* XVSR */
		     .queue_num_max  = 256,
	};

	rc = vmm_devtree_read_u32(edev->node, "virtio_type",
				  &m->config.device_id);
	if (rc) {
		goto virtio_mmio_probe_freestate_fail;
	}

	m->dev.id.type = m->config.device_id;

	rc = vmm_devtree_read_u32_atindex(edev->node,
					  VMM_DEVTREE_INTERRUPTS_ATTR_NAME,
					  &m->irq, 0);
	if (rc) {
		goto virtio_mmio_probe_freestate_fail;
	}

	if ((rc = vmm_virtio_register_device(&m->dev))) {
		goto virtio_mmio_probe_freestate_fail;
	}

	edev->priv = m;

	goto virtio_mmio_probe_done;

virtio_mmio_probe_freestate_fail:
	vmm_free(m);
virtio_mmio_probe_done:
	return rc;
}
Exemplo n.º 11
0
static void vexpress_sysreg_find_prop(struct vmm_devtree_node *node,
					const char *name, u32 *val)
{
	while (node) {
		if (vmm_devtree_read_u32(node, name, val) == VMM_OK) {
			return;
		}
		node = node->parent;
	}
}
Exemplo n.º 12
0
/**
 * of_fixed_clk_setup() - Setup function for simple fixed rate clock
 */
void of_fixed_clk_setup(struct vmm_devtree_node *node)
{
	struct clk *clk;
	const char *clk_name = node->name;
	u32 rate;

	if (vmm_devtree_read_u32(node, "clock-frequency", &rate))
		return;

	vmm_devtree_read_string(node, "clock-output-names", &clk_name);

	clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
	if (clk)
		of_clk_add_provider(node, of_clk_src_simple_get, clk);
}
Exemplo n.º 13
0
static int uart_8250_driver_probe(struct vmm_device *dev,
				  const struct vmm_devtree_nodeid *devid)
{
	int rc;
	struct uart_8250_port *port;
	physical_addr_t ioport;
	const char *aval;

	port = vmm_zalloc(sizeof(struct uart_8250_port));
	if(!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	if (vmm_devtree_read_string(dev->node,
				    VMM_DEVTREE_ADDRESS_TYPE_ATTR_NAME,
				    &aval)) {
		aval = NULL;
	}
	if (aval && !strcmp(aval, VMM_DEVTREE_ADDRESS_TYPE_VAL_IO)) {
		port->use_ioport = TRUE;
	} else {
		port->use_ioport = FALSE;
	}

	if (port->use_ioport) {
		rc = vmm_devtree_regaddr(dev->node, &ioport, 0);
		if (rc) {
			goto free_port;
		}
		port->base = ioport;
	} else {
		rc = vmm_devtree_request_regmap(dev->node, &port->base, 0,
						"UART 8250");
		if (rc) {
			goto free_port;
		}
	}

	if (vmm_devtree_read_u32(dev->node, "reg-shift",
				 &port->reg_shift)) {
		port->reg_shift = 0;
	}

	if (vmm_devtree_read_u32(dev->node, "reg-io-width",
				 &port->reg_width)) {
		port->reg_width = 1;
	}

	if (vmm_devtree_read_u32(dev->node, "baudrate",
				 &port->baudrate)) {
		port->baudrate = 115200;
	}

	rc = vmm_devtree_clock_frequency(dev->node, &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	/* Call low-level init function
	 * Note: low-level init will make sure that
	 * interrupts are disabled in IER register.
	 */
	uart_8250_lowlevel_init(port);

	/* Setup interrupt handler */
	port->irq = vmm_devtree_irq_parse_map(dev->node, 0);
	if (!port->irq) {
		rc = VMM_ENODEV;
		goto free_reg;
	}
	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					uart_8250_irq_handler, port))) {
		goto free_reg;
	}

	/* Create Serial Port */
	port->p = serial_create(dev, 256, uart_8250_tx, port);
	if (VMM_IS_ERR_OR_NULL(port->p)) {
		rc = VMM_PTR_ERR(port->p);
		goto free_irq;
	}

	/* Save port pointer */
	dev->priv = port;

	/* Unmask Rx interrupt */
	port->ier |= (UART_IER_RLSI | UART_IER_RDI);
	uart_8250_out(port, UART_IER_OFFSET, port->ier);

	return VMM_OK;

free_irq:
	vmm_host_irq_unregister(port->irq, port);
free_reg:
	if (!port->use_ioport) {
		vmm_devtree_regunmap_release(dev->node, port->base, 0);
	}
free_port:
	vmm_free(port);
free_nothing:
	return rc;
}
Exemplo n.º 14
0
static int imx_driver_probe(struct vmm_device *dev,
			    const struct vmm_devtree_nodeid *devid)
{
	int rc = VMM_EFAIL;
	struct clk *clk_ipg = NULL;
	struct clk *clk_uart = NULL;
	struct imx_port *port = NULL;
	unsigned long old_rate = 0;

	port = vmm_zalloc(sizeof(struct imx_port));
	if (!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0,
					"iMX UART");
	if (rc) {
		goto free_port;
	}

	if (vmm_devtree_read_u32(dev->of_node, "baudrate", &port->baudrate)) {
		port->baudrate = 115200;
	}

	rc = vmm_devtree_clock_frequency(dev->of_node, &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	/* Setup clocks */
	clk_ipg = of_clk_get(dev->of_node, 0);
	clk_uart = of_clk_get(dev->of_node, 1);
	if (!VMM_IS_ERR_OR_NULL(clk_ipg)) {
		rc = clk_prepare_enable(clk_ipg);
		if (rc) {
			goto free_reg;
		}
	}
	if (!VMM_IS_ERR_OR_NULL(clk_uart)) {
		rc = clk_prepare_enable(clk_uart);
		if (rc) {
			goto clk_disable_unprepare_ipg;
		}
		old_rate = clk_get_rate(clk_uart);
		if (clk_set_rate(clk_uart, port->input_clock)) {
			vmm_printf("Could not set %s clock rate to %u Hz, "
				   "actual rate: %u Hz\n", __clk_get_name(clk_uart),
				   port->input_clock, clk_get_rate(clk_uart));
			rc = VMM_ERANGE;
			goto clk_disable_unprepare_uart;
		}
	}

	/* Register interrupt handler */
	port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0);
	if (!port->irq) {
		rc = VMM_ENODEV;
		goto clk_old_rate;
	}
	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					imx_irq_handler, port))) {
		goto clk_old_rate;
	}

	/* Call low-level init function */
	imx_lowlevel_init(port->base, port->baudrate, port->input_clock);

	/* Create Serial Port */
	port->p = serial_create(dev, 256, imx_tx, port);
	if (VMM_IS_ERR_OR_NULL(port->p)) {
		rc = VMM_PTR_ERR(port->p);
		goto free_irq;
	}

	/* Save port pointer */
	dev->priv = port;

	/* Unmask Rx, Mask Tx, and Enable UART */
	port->mask = vmm_readl((void *)port->base + UCR1);
	port->mask |= (UCR1_RRDYEN | UCR1_UARTEN);
	port->mask &= ~(UCR1_TRDYEN);
	vmm_writel(port->mask, (void *)port->base + UCR1);

	return rc;

free_irq:
	vmm_host_irq_unregister(port->irq, port);
clk_old_rate:
	if (!VMM_IS_ERR_OR_NULL(clk_uart)) {
		if (old_rate) {
			clk_set_rate(clk_uart, old_rate);
		}
	}
clk_disable_unprepare_uart:
	if (!VMM_IS_ERR_OR_NULL(clk_uart)) {
		clk_disable_unprepare(clk_uart);
	}
clk_disable_unprepare_ipg:
	if (!VMM_IS_ERR_OR_NULL(clk_ipg)) {
		clk_disable_unprepare(clk_ipg);
	}
free_reg:
	vmm_devtree_regunmap_release(dev->of_node, port->base, 0);
free_port:
	vmm_free(port);
free_nothing:
	return rc;
}
Exemplo n.º 15
0
static int __cpuinit twd_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 ref_cnt_freq;
	virtual_addr_t ref_cnt_addr;
	u32 cpu = vmm_smp_processor_id();
	struct twd_clockchip *cc = &this_cpu(twd_cc);

	if (!twd_base) {
		rc = vmm_devtree_regmap(node, &twd_base, 0);
		if (rc) {
			goto fail;
		}
	}

	if (!twd_ppi_irq) {
		rc = vmm_devtree_irq_get(node, &twd_ppi_irq, 0);
		if (rc) {
			goto fail_regunmap;
		}
	}

	if (!twd_freq_hz) {
		/* First try to find TWD clock */
		if (!twd_clk) {
			twd_clk = of_clk_get(node, 0);
		}
		if (!twd_clk) {
			twd_clk = clk_get_sys("smp_twd", NULL);
		}

		if (twd_clk) {
			/* Use TWD clock to find frequency */
			rc = clk_prepare_enable(twd_clk);
			if (rc) {
				clk_put(twd_clk);
				goto fail_regunmap;
			}
			twd_freq_hz = clk_get_rate(twd_clk);
		} else {
			/* No TWD clock found hence caliberate */
			rc = vmm_devtree_regmap(node, &ref_cnt_addr, 1);
			if (rc) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			if (vmm_devtree_read_u32(node, "ref-counter-freq",
						 &ref_cnt_freq)) {
				vmm_devtree_regunmap(node, ref_cnt_addr, 1);
				goto fail_regunmap;
			}
			twd_caliberate_freq(twd_base, 
					ref_cnt_addr, ref_cnt_freq);
			vmm_devtree_regunmap(node, ref_cnt_addr, 1);
		}
	}

	memset(cc, 0, sizeof(struct twd_clockchip));

	vmm_sprintf(cc->name, "twd/%d", cpu);

	cc->clkchip.name = cc->name;
	cc->clkchip.hirq = twd_ppi_irq;
	cc->clkchip.rating = 350;
	cc->clkchip.cpumask = vmm_cpumask_of(cpu);
	cc->clkchip.features = 
		VMM_CLOCKCHIP_FEAT_PERIODIC | VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&cc->clkchip.mult, &cc->clkchip.shift, 
				   VMM_NSEC_PER_SEC, twd_freq_hz, 10);
	cc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(0xF, &cc->clkchip);
	cc->clkchip.max_delta_ns = 
			vmm_clockchip_delta2ns(0xFFFFFFFF, &cc->clkchip);
	cc->clkchip.set_mode = &twd_clockchip_set_mode;
	cc->clkchip.set_next_event = &twd_clockchip_set_next_event;
	cc->clkchip.priv = cc;

	if (vmm_smp_is_bootcpu()) {
		/* Register interrupt handler */
		if ((rc = vmm_host_irq_register(twd_ppi_irq, "twd",
						&twd_clockchip_irq_handler, 
						cc))) {
			
			goto fail_regunmap;
		}

		/* Mark interrupt as per-cpu */
		if ((rc = vmm_host_irq_mark_per_cpu(twd_ppi_irq))) {
			goto fail_unreg_irq;
		}
	}

	/* Explicitly enable local timer PPI in GIC 
	 * Note: Local timer requires PPI support hence requires GIC
	 */
	gic_enable_ppi(twd_ppi_irq);

	rc = vmm_clockchip_register(&cc->clkchip);
	if (rc) {
		goto fail_unreg_irq;
	}

	return VMM_OK;

fail_unreg_irq:
	if (vmm_smp_is_bootcpu()) {
		vmm_host_irq_unregister(twd_ppi_irq, cc);
	}
fail_regunmap:
	vmm_devtree_regunmap(node, twd_base, 0);
fail:
	return rc;
}
Exemplo n.º 16
0
static int omap_uart_driver_probe(struct vmm_device *dev,
				  const struct vmm_devtree_nodeid *devid)
{
	int rc;
	u32 reg_offset;
	struct omap_uart_port *port;
	
	port = vmm_zalloc(sizeof(struct omap_uart_port));
	if(!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	if (strlcpy(port->cd.name, dev->name, sizeof(port->cd.name)) >=
	    sizeof(port->cd.name)) {
		rc = VMM_EOVERFLOW;
		goto free_port;
	}

	port->cd.dev.parent = dev;
	port->cd.ioctl = NULL;
	port->cd.read = omap_uart_read;
	port->cd.write = omap_uart_write;
	port->cd.priv = port;

	INIT_COMPLETION(&port->read_possible);
	INIT_COMPLETION(&port->write_possible);

	rc = vmm_devtree_regmap(dev->node, &port->base, 0);
	if(rc) {
		goto free_port;
	}

	if (vmm_devtree_read_u32(dev->node, "reg_align",
				 &port->reg_align)) {
		port->reg_align = 1;
	}

	if (vmm_devtree_read_u32(dev->node, "reg_offset",
				 &reg_offset) == VMM_OK) {
		port->base += reg_offset;
	}

	rc = vmm_devtree_read_u32(dev->node, "baudrate",
				  &port->baudrate);
	if (rc) {
		goto free_reg;
	}

	rc = vmm_devtree_clock_frequency(dev->node,
					 &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	omap_uart_startup_configure(port);

	rc = vmm_devtree_irq_get(dev->node, &port->irq, 0);
	if (rc) {
		goto free_reg;
	}
	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					omap_uart_irq_handler, port))) {
		goto free_reg;
	}

	rc = vmm_chardev_register(&port->cd);
	if (rc) {
		goto free_irq;
	}

	dev->priv = port;

	return VMM_OK;

free_irq:
	vmm_host_irq_unregister(port->irq, port);
free_reg:
	vmm_devtree_regunmap(dev->node, port->base, 0);
free_port:
	vmm_free(port);
free_nothing:
	return rc;
}
Exemplo n.º 17
0
static int imx_driver_probe(struct vmm_device *dev,
			    const struct vmm_devtree_nodeid *devid)
{
	int rc = VMM_EFAIL;
	struct imx_port *port = NULL;

	port = vmm_zalloc(sizeof(struct imx_port));
	if (!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	if (strlcpy(port->cd.name, dev->name, sizeof(port->cd.name)) >=
	    sizeof(port->cd.name)) {
		rc = VMM_EOVERFLOW;
		goto free_port;
	}

	port->cd.dev.parent = dev;
	port->cd.ioctl = NULL;
	port->cd.read = imx_read;
	port->cd.write = imx_write;
	port->cd.priv = port;

	INIT_COMPLETION(&port->read_possible);
#if defined(UART_IMX_USE_TXINTR)
	INIT_COMPLETION(&port->write_possible);
#endif

	rc = vmm_devtree_regmap(dev->node, &port->base, 0);
	if (rc) {
		goto free_port;
	}

	port->mask = UCR1_RRDYEN | UCR1_UARTEN;

#if defined(UART_IMX_USE_TXINTR)
	port->mask |= UCR1_TRDYEN;
#endif

	vmm_writel(port->mask, (void *)port->base + UCR1);

	if (vmm_devtree_read_u32(dev->node, "baudrate", &port->baudrate)) {
		port->baudrate = 115200;
	}

	rc = vmm_devtree_clock_frequency(dev->node, &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	rc = vmm_devtree_irq_get(dev->node, &port->irq, 0);
	if (rc) {
		goto free_reg;
	}

	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					imx_irq_handler, port))) {
		goto free_reg;
	}

	/* Call low-level init function */
	imx_lowlevel_init(port->base, port->baudrate, port->input_clock);

	port->mask = vmm_readl((void *)port->base + UCR1);

	rc = vmm_chardev_register(&port->cd);
	if (rc) {
		goto free_irq;
	}

	dev->priv = port;

	return rc;

 free_irq:
	vmm_host_irq_unregister(port->irq, port);
 free_reg:
	vmm_devtree_regunmap(dev->node, port->base, 0);
 free_port:
	vmm_free(port);
 free_nothing:
	return rc;
}
Exemplo n.º 18
0
static int __cpuinit epit_clockchip_init(struct vmm_devtree_node *node)
{
	int rc;
	u32 clock, hirq, timer_num;
	struct epit_clockchip *ecc;

	/* Read clock frequency */
	rc = vmm_devtree_clock_frequency(node, &clock);
	if (rc) {
		goto fail;
	}

	/* Read timer_num attribute */
	rc = vmm_devtree_read_u32(node, "timer_num", &timer_num);
	if (rc) {
		goto fail;
	}

	/* Read irq attribute */
	rc = vmm_devtree_irq_get(node, &hirq, 0);
	if (rc) {
		goto fail;
	}

	/* allocate our struct */
	ecc = vmm_zalloc(sizeof(struct epit_clockchip));
	if (!ecc) {
		rc = VMM_ENOMEM;
		goto fail;
	}

	/* Map timer registers */
	rc = vmm_devtree_regmap(node, &ecc->base, 0);
	if (rc) {
		goto regmap_fail;
	}

	ecc->match_mask = 1 << timer_num;
	ecc->timer_num = timer_num;

	/* Setup clockchip */
	ecc->clkchip.name = node->name;
	ecc->clkchip.hirq = hirq;
	ecc->clkchip.rating = 300;
	ecc->clkchip.cpumask = vmm_cpumask_of(0);
	ecc->clkchip.features = VMM_CLOCKCHIP_FEAT_ONESHOT;
	vmm_clocks_calc_mult_shift(&ecc->clkchip.mult,
				   &ecc->clkchip.shift,
				   VMM_NSEC_PER_SEC, clock, 10);
	ecc->clkchip.min_delta_ns = vmm_clockchip_delta2ns(MIN_REG_COMPARE,
							   &ecc->clkchip);
	ecc->clkchip.max_delta_ns = vmm_clockchip_delta2ns(MAX_REG_COMPARE,
							   &ecc->clkchip);
	ecc->clkchip.set_mode = epit_set_mode;
	ecc->clkchip.set_next_event = epit_set_next_event;
	ecc->clkchip.priv = ecc;

	/*
	 * Initialise to a known state (all timers off, and timing reset)
	 */
	vmm_writel(0x0, (void *)(ecc->base + EPITCR));
	/*
	 * Initialize the load register to the max value to decrement.
	 */
	vmm_writel(0xffffffff, (void *)(ecc->base + EPITLR));
	/*
	 * enable the timer, set it to the high reference clock,
	 * allow the timer to work in WAIT mode.
	 */
	vmm_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN,
		   (void *)(ecc->base + EPITCR));

	/* Register interrupt handler */
	rc = vmm_host_irq_register(hirq, ecc->clkchip.name,
				   &epit_timer_interrupt, ecc);
	if (rc) {
		goto irq_fail;
	}

	/* Register clockchip */
	rc = vmm_clockchip_register(&ecc->clkchip);
	if (rc) {
		goto register_fail;
	}

	return VMM_OK;

 register_fail:
	vmm_host_irq_unregister(hirq, ecc);
 irq_fail:
	vmm_devtree_regunmap(node, ecc->base, 0);
 regmap_fail:
	vmm_free(ecc);
 fail:
	return rc;
}
Exemplo n.º 19
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	int rc;
	u32 cpuid = 0;
	const char *attr;
	irq_flags_t flags;
	u32 phys_timer_irq, virt_timer_irq;

	/* For both Orphan & Normal VCPUs */
	memset(arm_regs(vcpu), 0, sizeof(arch_regs_t));
	arm_regs(vcpu)->pc = vcpu->start_pc;
	arm_regs(vcpu)->sp = vcpu->stack_va + vcpu->stack_sz - 8;
	if (!vcpu->is_normal) {
		arm_regs(vcpu)->pstate = PSR_MODE64_EL2h;
		arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
		return VMM_OK;
	}

	/* Following initialization for normal VCPUs only */
	rc = vmm_devtree_read_string(vcpu->node, 
			VMM_DEVTREE_COMPATIBLE_ATTR_NAME, &attr);
	if (rc) {
		goto fail;
	}
	if (strcmp(attr, "armv7a,cortex-a8") == 0) {
		cpuid = ARM_CPUID_CORTEXA8;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a9") == 0) {
		cpuid = ARM_CPUID_CORTEXA9;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a15") == 0) {
		cpuid = ARM_CPUID_CORTEXA15;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv7a,cortex-a7") == 0) {
		cpuid = ARM_CPUID_CORTEXA7;
		arm_regs(vcpu)->pstate = PSR_MODE32;
	} else if (strcmp(attr, "armv8,generic") == 0) {
		cpuid = ARM_CPUID_ARMV8;
	} else {
		rc = VMM_EINVALID;
		goto fail;
	}
	if (arm_regs(vcpu)->pstate == PSR_MODE32) {
		/* Check if the host supports A32 mode @ EL1 */
		if (!cpu_supports_el1_a32()) {
			vmm_printf("Host does not support AArch32 mode\n");
			rc = VMM_ENOTAVAIL;
			goto fail;
		}
		arm_regs(vcpu)->pstate |= PSR_ZERO_MASK;
		arm_regs(vcpu)->pstate |= PSR_MODE32_SUPERVISOR;
	} else {
		arm_regs(vcpu)->pstate |= PSR_MODE64_DEBUG_DISABLED;
		arm_regs(vcpu)->pstate |= PSR_MODE64_EL1h;
	}
	arm_regs(vcpu)->pstate |= PSR_ASYNC_ABORT_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_IRQ_DISABLED;
	arm_regs(vcpu)->pstate |= PSR_FIQ_DISABLED;

	/* First time initialization of private context */
	if (!vcpu->reset_count) {
		/* Alloc private context */
		vcpu->arch_priv = vmm_zalloc(sizeof(struct arm_priv));
		if (!vcpu->arch_priv) {
			rc = VMM_ENOMEM;
			goto fail;
		}
		/* Setup CPUID value expected by VCPU in MIDR register
		 * as-per HW specifications.
		 */
		arm_priv(vcpu)->cpuid = cpuid;
		/* Initialize VCPU features */
		arm_priv(vcpu)->features = 0;
		switch (cpuid) {
		case ARM_CPUID_CORTEXA8:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA9:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA7:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_CORTEXA15:
			arm_set_feature(vcpu, ARM_FEATURE_V7);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_VFP_FP16);
			arm_set_feature(vcpu, ARM_FEATURE_NEON);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2EE);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_V7MP);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			arm_set_feature(vcpu, ARM_FEATURE_DUMMY_C15_REGS);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_TRUSTZONE);
			break;
		case ARM_CPUID_ARMV8:
			arm_set_feature(vcpu, ARM_FEATURE_V8);
			arm_set_feature(vcpu, ARM_FEATURE_VFP4);
			arm_set_feature(vcpu, ARM_FEATURE_ARM_DIV);
			arm_set_feature(vcpu, ARM_FEATURE_LPAE);
			arm_set_feature(vcpu, ARM_FEATURE_GENERIC_TIMER);
			break;
		default:
			break;
		};
		/* Some features automatically imply others: */
		if (arm_feature(vcpu, ARM_FEATURE_V7)) {
			arm_set_feature(vcpu, ARM_FEATURE_VAPA);
			arm_set_feature(vcpu, ARM_FEATURE_THUMB2);
			arm_set_feature(vcpu, ARM_FEATURE_MPIDR);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_V6K);
			} else {
				arm_set_feature(vcpu, ARM_FEATURE_V6);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6K)) {
			arm_set_feature(vcpu, ARM_FEATURE_V6);
			arm_set_feature(vcpu, ARM_FEATURE_MVFR);
		}
		if (arm_feature(vcpu, ARM_FEATURE_V6)) {
			arm_set_feature(vcpu, ARM_FEATURE_V5);
			if (!arm_feature(vcpu, ARM_FEATURE_M)) {
				arm_set_feature(vcpu, ARM_FEATURE_AUXCR);
			}
		}
		if (arm_feature(vcpu, ARM_FEATURE_V5)) {
			arm_set_feature(vcpu, ARM_FEATURE_V4T);
		}
		if (arm_feature(vcpu, ARM_FEATURE_M)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_ARM_DIV)) {
			arm_set_feature(vcpu, ARM_FEATURE_THUMB_DIV);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP4)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP3);
		}
		if (arm_feature(vcpu, ARM_FEATURE_VFP3)) {
			arm_set_feature(vcpu, ARM_FEATURE_VFP);
		}
		if (arm_feature(vcpu, ARM_FEATURE_LPAE)) {
			arm_set_feature(vcpu, ARM_FEATURE_PXN);
		}
		/* Initialize Hypervisor Configuration */
		INIT_SPIN_LOCK(&arm_priv(vcpu)->hcr_lock);
		arm_priv(vcpu)->hcr =  (HCR_TSW_MASK |
					HCR_TACR_MASK |
					HCR_TIDCP_MASK |
					HCR_TSC_MASK |
					HCR_TWE_MASK |
					HCR_TWI_MASK |
					HCR_AMO_MASK |
					HCR_IMO_MASK |
					HCR_FMO_MASK |
					HCR_SWIO_MASK |
					HCR_VM_MASK);
		if (!(arm_regs(vcpu)->pstate & PSR_MODE32)) {
			arm_priv(vcpu)->hcr |= HCR_RW_MASK;
		}
		/* Initialize Coprocessor Trap Register */
		arm_priv(vcpu)->cptr = CPTR_TTA_MASK;
		arm_priv(vcpu)->cptr |= CPTR_TFP_MASK;
		/* Initialize Hypervisor System Trap Register */
		arm_priv(vcpu)->hstr = 0;
		/* Cleanup VGIC context first time */
		arm_vgic_cleanup(vcpu);
	}

	/* Clear virtual exception bits in HCR */
	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	arm_priv(vcpu)->hcr &= ~(HCR_VSE_MASK | 
				 HCR_VI_MASK | 
				 HCR_VF_MASK);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	/* Set last host CPU to invalid value */
	arm_priv(vcpu)->last_hcpu = 0xFFFFFFFF;

	/* Initialize sysregs context */
	rc = cpu_vcpu_sysregs_init(vcpu, cpuid);
	if (rc) {
		goto fail_sysregs_init;
	}

	/* Initialize VFP context */
	rc = cpu_vcpu_vfp_init(vcpu);
	if (rc) {
		goto fail_vfp_init;
	}

	/* Initialize generic timer context */
	if (arm_feature(vcpu, ARM_FEATURE_GENERIC_TIMER)) {
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_phys_irq",
					 &phys_timer_irq)) {
			phys_timer_irq = 0;
		}
		if (vmm_devtree_read_u32(vcpu->node, 
					 "gentimer_virt_irq",
					 &virt_timer_irq)) {
			virt_timer_irq = 0;
		}
		rc = generic_timer_vcpu_context_init(vcpu,
						&arm_gentimer_context(vcpu),
						phys_timer_irq,
						virt_timer_irq);
		if (rc) {
			goto fail_gentimer_init;
		}
	}

	return VMM_OK;

fail_gentimer_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_vfp_deinit(vcpu);
	}
fail_vfp_init:
	if (!vcpu->reset_count) {
		cpu_vcpu_sysregs_deinit(vcpu);
	}
fail_sysregs_init:
	if (!vcpu->reset_count) {
		vmm_free(vcpu->arch_priv);
		vcpu->arch_priv = NULL;
	}
fail:
	return rc;
}
Exemplo n.º 20
0
static int samsung_driver_probe(struct vmm_device *dev,
                                const struct vmm_devtree_nodeid *devid)
{
    u32 ucon;
    int rc = VMM_EFAIL;
    struct samsung_port *port = NULL;

    port = vmm_zalloc(sizeof(struct samsung_port));
    if (!port) {
        rc = VMM_ENOMEM;
        goto free_nothing;
    }

    rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0,
                                    "Samsung UART");
    if (rc) {
        goto free_port;
    }

    /* Make sure all interrupts except Rx are masked. */
    port->mask = S3C64XX_UINTM_RXD_MSK;
    port->mask = ~port->mask;
    vmm_out_le16((void *)(port->base + S3C64XX_UINTM), port->mask);

    if (vmm_devtree_read_u32(dev->of_node, "baudrate",
                             &port->baudrate)) {
        port->baudrate = 115200;
    }

    rc = vmm_devtree_clock_frequency(dev->of_node, &port->input_clock);
    if (rc) {
        goto free_reg;
    }

    /* Setup interrupt handler */
    port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0);
    if (!port->irq) {
        rc = VMM_ENODEV;
        goto free_reg;
    }
    if ((rc = vmm_host_irq_register(port->irq, dev->name,
                                    samsung_irq_handler, port))) {
        goto free_reg;
    }

    /* Call low-level init function */
    samsung_lowlevel_init(port->base, port->baudrate, port->input_clock);

    /* Create Serial Port */
    port->p = serial_create(dev, 256, samsung_tx, port);
    if (VMM_IS_ERR_OR_NULL(port->p)) {
        rc = VMM_PTR_ERR(port->p);
        goto free_irq;
    }

    /* Save port pointer */
    dev->priv = port;

    /* Unmask RX interrupt */
    ucon = vmm_in_le32((void *)(port->base + S3C2410_UCON));
    ucon |= S3C2410_UCON_RXIRQMODE;
    vmm_out_le32((void *)(port->base + S3C2410_UCON), ucon);

    return VMM_OK;

free_irq:
    vmm_host_irq_unregister(port->irq, port);
free_reg:
    vmm_devtree_regunmap_release(dev->of_node, port->base, 0);
free_port:
    vmm_free(port);
free_nothing:
    return rc;
}
Exemplo n.º 21
0
int vmm_devtree_irq_parse_one(struct vmm_devtree_node *device, int index,
			      struct vmm_devtree_phandle_args *out_irq)
{
	struct vmm_devtree_node *p = NULL;
	struct vmm_devtree_attr *attr = NULL;
	u32 *intspec = NULL;
	u32 intsize = 0;
	u32 intlen = 0;
	int res = VMM_EINVALID;
	int i;

	if (!device || (index < 0) || !out_irq) {
		return VMM_EINVALID;
	}

	pr_debug("%s: dev=%s, index=%d\n", __func__, device->name, index);

	attr = vmm_devtree_getattr(device, "interrupts");
	if (NULL == attr) {
		return VMM_EINVALID;
	}

	intlen = attr->len / sizeof(u32);
	intspec = attr->value;
	pr_debug(" intspec=%d intlen=%d\n", vmm_be32_to_cpu(*intspec), intlen);

	/* Look for the interrupt parent. */
	p = vmm_devtree_irq_find_parent(device);
	if (NULL == p) {
		/* If no interrupt-parent fount then try
		 * the original vmm_devtree_irq_get() API
		 */
		res = vmm_devtree_irq_get(device, &intsize, index);
		if (res != VMM_OK) {
			return res;
		}
		out_irq->np = NULL;
		out_irq->args_count = 1;
		out_irq->args[0] = intsize;
		return VMM_OK;
	}

	/* Get size of interrupt specifier */
	res = vmm_devtree_read_u32(p, "#interrupt-cells", &intsize);
	if (VMM_OK != res) {
		vmm_devtree_dref_node(p);
		return res;
	}

	pr_debug(" intsize=%d intlen=%d\n", intsize, intlen);

	/* Check index */
	if ((index + 1) * intsize > intlen) {
		vmm_devtree_dref_node(p);
		return VMM_EINVALID;
	}

	/* Copy intspec into irq structure */
	intspec += index * intsize;
	out_irq->np = p;
	out_irq->args_count = intsize;
	for (i = 0; i < intsize && i < VMM_MAX_PHANDLE_ARGS; i++) {
		out_irq->args[i] = vmm_be32_to_cpu(*intspec++);
	}

	return VMM_OK;
}
Exemplo n.º 22
0
static int gpex_emulator_probe(struct vmm_guest *guest,
			       struct vmm_emudev *edev,
			       const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK, i;
	char name[64];
	struct gpex_state *s;
	struct pci_class *class;

	s = vmm_zalloc(sizeof(struct gpex_state));
	if (!s) {
		GPEX_LOG(LVL_ERR, "Failed to allocate gpex state.\n");
		rc = VMM_EFAIL;
		goto _failed;
	}

	s->node = edev->node;
	s->guest = guest;
	s->controller = vmm_zalloc(sizeof(struct pci_host_controller));
	if (!s->controller) {
		GPEX_LOG(LVL_ERR, "Failed to allocate pci host contoller"
					"for gpex.\n");
		goto _failed;
	}
	INIT_MUTEX(&s->lock);
	INIT_LIST_HEAD(&s->controller->head);
	INIT_LIST_HEAD(&s->controller->attached_buses);

	/* initialize class */
	class = (struct pci_class *)s->controller;
	INIT_SPIN_LOCK(&class->lock);
	class->conf_header.vendor_id = PCI_VENDOR_ID_REDHAT;
	class->conf_header.device_id = PCI_DEVICE_ID_REDHAT_PCIE_HOST;
	class->config_read = gpex_config_read;
	class->config_write = gpex_config_write;

	rc = vmm_devtree_read_u32(edev->node, "nr_buses",
				  &s->controller->nr_buses);
	if (rc) {
		GPEX_LOG(LVL_ERR, "Failed to get fifo size in guest DTS.\n");
		goto _failed;
	}

	GPEX_LOG(LVL_VERBOSE, "%s: %d busses on this controller.\n",
		   __func__, s->controller->nr_buses);

	for (i = 0; i < s->controller->nr_buses; i++) {
		if ((rc = pci_emu_attach_new_pci_bus(s->controller, i))
		    != VMM_OK) {
			GPEX_LOG(LVL_ERR, "Failed to attach PCI bus %d\n",
				   i+1);
			goto _failed;
		}
	}

	strlcpy(name, guest->name, sizeof(name));
	strlcat(name, "/", sizeof(name));
	if (strlcat(name, edev->node->name, sizeof(name)) >= sizeof(name)) {
		rc = VMM_EOVERFLOW;
		goto _failed;
	}

	edev->priv = s;

	s->guest_aspace_client.notifier_call = &gpex_guest_aspace_notification;
	s->guest_aspace_client.priority = 0;

	vmm_guest_aspace_register_client(&s->guest_aspace_client);

	GPEX_LOG(LVL_VERBOSE, "Success.\n");

	goto _done;

_failed:
	if (s && s->controller) vmm_free(s->controller);
	if (s) vmm_free(s);

_done:
	return rc;
}
Exemplo n.º 23
0
static void libfdt_parse_devtree_recursive(struct fdt_fileinfo *fdt,
					   struct vmm_devtree_node *node,
					   char **data)
{
	void *val;
	const char *name;
	u32 type, len, alen, addr_cells, size_cells;
	struct vmm_devtree_node *child;

	if (!fdt || !node) {
		return;
	}

	if (vmm_devtree_read_u32(node->parent,
				 "#address-cells", &addr_cells)) {
		addr_cells = sizeof(physical_addr_t) / sizeof(fdt_cell_t);
	}
	if (vmm_devtree_read_u32(node->parent,
				 "#size-cells", &size_cells)) {
		size_cells = sizeof(physical_size_t) / sizeof(fdt_cell_t);
	}

	while (LIBFDT_DATA32(*data) != FDT_END_NODE) {
		switch (LIBFDT_DATA32(*data)) {
		case FDT_PROP:
			*data += sizeof(fdt_cell_t);
			len = LIBFDT_DATA32(*data);
			*data += sizeof(fdt_cell_t);
			name = &fdt->str[LIBFDT_DATA32(*data)];
			*data += sizeof(fdt_cell_t);
			type = vmm_devtree_estimate_attrtype(name);
			alen = libfdt_property_len(name, addr_cells, 
						   size_cells, len);
			val = vmm_zalloc(alen);
			libfdt_property_read(name, val, *data,
					     addr_cells, size_cells, len);
			vmm_devtree_setattr(node, name, val, type, alen);
			vmm_free(val);
			*data += len;
			while ((virtual_addr_t) (*data) % sizeof(fdt_cell_t) != 0)
				(*data)++;
			break;
		case FDT_NOP:
			*data += sizeof(fdt_cell_t);
			break;
		case FDT_BEGIN_NODE:
			*data += sizeof(fdt_cell_t);
			child = vmm_devtree_addnode(node, *data);
			*data += strlen(*data) + 1;
			while ((virtual_addr_t) (*data) % sizeof(fdt_cell_t) != 0) {
				(*data)++;
			}
			libfdt_parse_devtree_recursive(fdt, child, data);
			break;
		default:
			return;
			break;
		};
	}

	*data += sizeof(fdt_cell_t);

	return;
}
Exemplo n.º 24
0
static int spi_imx_probe(struct vmm_device *dev,
			 const struct vmm_devtree_nodeid *devid)
{
	virtual_addr_t vaddr = 0;
	struct spi_master *master;
	struct spi_imx_data *spi_imx;
	int i;
	int ret = VMM_OK;
	u32 num_cs;

	if (!vmm_devtree_is_available(dev->of_node)) {
		dev_info(dev, "device is disabled\n");
		return ret;
	}

	ret = vmm_devtree_read_u32(dev->of_node, "fsl,spi-num-chipselects",
				   &num_cs);
	if (ret < 0) {
		return ret;
	}

	master = spi_alloc_master(dev, sizeof(struct spi_imx_data) +
				  sizeof(int) * num_cs);
	if (!master) {
		dev_err(dev, "cannot allocate master\n");
		return -ENOMEM;
	}

	vmm_devdrv_set_data(dev, master);

	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
	master->num_chipselect = num_cs;

	spi_imx = spi_master_get_devdata(master);
	spi_imx->bitbang.master = master;

	for (i = 0; i < master->num_chipselect; i++) {
		int cs_gpio = of_get_named_gpio(dev->of_node, "cs-gpios", i);

		spi_imx->chipselect[i] = cs_gpio;
		if (!gpio_is_valid(cs_gpio))
			continue;

		ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
		if (ret) {
			dev_err(dev, "can't get cs gpios\n");
			goto out_gpio_free;
		}
	}

	spi_imx->bitbang.chipselect = spi_imx_chipselect;
	spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
	spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
	spi_imx->bitbang.master->setup = spi_imx_setup;
	spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
	spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
	spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
	spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;

	init_completion(&spi_imx->xfer_done);

	spi_imx->devtype_data = devid->data;

	ret = vmm_devtree_request_regmap(dev->of_node, &vaddr, 0, "i.MX SPI");
	if (VMM_OK != ret) {
		ret = PTR_ERR(spi_imx->base);
		goto out_gpio_free;
	}
	spi_imx->base = (void __iomem *)vaddr;
	master->bus_num = vmm_devtree_alias_get_id(dev->of_node, "spi");
	if (0 > master->bus_num) {
		ret = master->bus_num;
		goto out_gpio_free;
	}

	spi_imx->irq = vmm_devtree_irq_parse_map(dev->of_node, 0);
	if (!spi_imx->irq) {
		ret = VMM_ENODEV;
		goto out_gpio_free;
	}

	ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
	if (VMM_OK != ret) {
		dev_err(dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
		goto out_gpio_free;
	}

	spi_imx->clk_ipg = clk_get(dev, "ipg");
	if (IS_ERR(spi_imx->clk_ipg)) {
		ret = PTR_ERR(spi_imx->clk_ipg);
		goto out_gpio_free;
	}

	spi_imx->clk_per = clk_get(dev, "per");
	if (IS_ERR(spi_imx->clk_per)) {
		ret = PTR_ERR(spi_imx->clk_per);
		goto out_gpio_free;
	}

	ret = clk_prepare_enable(spi_imx->clk_per);
	if (ret)
		goto out_gpio_free;

	ret = clk_prepare_enable(spi_imx->clk_ipg);
	if (ret)
		goto out_put_per;

	spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);

	spi_imx->devtype_data->reset(spi_imx);

	spi_imx->devtype_data->intctrl(spi_imx, 0);

	master->dev.of_node = dev->of_node;
	ret = spi_bitbang_start(&spi_imx->bitbang);
	if (ret) {
		dev_err(dev, "bitbang start failed with %d\n", ret);
		goto out_clk_put;
	}

	/* FIXME: This should not disable the clock, as they are still set as
	   used by the UART */
#if 0
	clk_disable(spi_imx->clk_ipg);
	clk_disable(spi_imx->clk_per);
#endif

	return ret;

out_clk_put:
	/* FIXME: This should not disable the clock, as they are still set as
	   used by the UART */
#if 0
 	clk_disable_unprepare(spi_imx->clk_ipg);
#endif
out_put_per:
	/* FIXME: This should not disable the clock, as they are still set as
	   used by the UART */
#if 0
	clk_disable_unprepare(spi_imx->clk_per);
#endif
out_gpio_free:
	while (i-- > 0) {
		gpio_free(spi_imx->chipselect[i]);
	}

	spi_master_put(master);

	dev_err(dev, "probing failed\n");

	return ret;
}
Exemplo n.º 25
0
static int omap_uart_driver_probe(struct vmm_device *dev,
				  const struct vmm_devtree_nodeid *devid)
{
	int rc;
	struct omap_uart_port *port;
	
	port = vmm_zalloc(sizeof(struct omap_uart_port));
	if (!port) {
		rc = VMM_ENOMEM;
		goto free_nothing;
	}

	rc = vmm_devtree_request_regmap(dev->of_node, &port->base, 0,
					"omap-uart");
	if (rc) {
		goto free_port;
	}

	if (vmm_devtree_read_u32(dev->of_node, "reg-shift",
				 &port->reg_shift)) {
		port->reg_shift = 0;
	}

	if (vmm_devtree_read_u32(dev->of_node, "baudrate",
				 &port->baudrate)) {
		port->baudrate = 115200;
	}

	rc = vmm_devtree_clock_frequency(dev->of_node,
					 &port->input_clock);
	if (rc) {
		goto free_reg;
	}

	omap_uart_startup_configure(port);

	port->irq = vmm_devtree_irq_parse_map(dev->of_node, 0);
	if (!port->irq) {
		rc = VMM_ENODEV;
		goto free_reg;
	}
	if ((rc = vmm_host_irq_register(port->irq, dev->name,
					omap_uart_irq_handler, port))) {
		goto free_reg;
	}

	/* Create Serial Port */
	port->p = serial_create(dev, 256, omap_uart_tx, port);
	if (VMM_IS_ERR_OR_NULL(port->p)) {
		rc = VMM_PTR_ERR(port->p);
		goto free_irq;
	}

	/* Save port pointer */
	dev->priv = port;

	/* Unmask Rx interrupt */
	port->ier |= (UART_IER_RDI | UART_IER_RLSI);
	omap_serial_out(port, UART_IER, port->ier);

	return VMM_OK;

free_irq:
	vmm_host_irq_unregister(port->irq, port);
free_reg:
	vmm_devtree_regunmap_release(dev->of_node, port->base, 0);
free_port:
	vmm_free(port);
free_nothing:
	return rc;
}