示例#1
0
/*
 * Enable the SCU
 */
void scu_enable(void *scu_base)
{
	u32 scu_ctrl;

#ifdef CONFIG_ARM_ERRATA_764369
	/*
	 * This code is mostly for TEGRA 2 and 3 processors. 
	 * This in not enabled or tested on Xvisor for now.
	 * We keep it as we might have to enable it someday.
	 */
	/* Cortex-A9 only */
	if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {

		scu_ctrl = vmm_readl(scu_base + 0x30);
		if (!(scu_ctrl & 1)) {
			vmm_writel(scu_ctrl | 0x1, scu_base + 0x30);
		}
	}
#endif

	scu_ctrl = vmm_readl(scu_base + SCU_CTRL);
	/* already enabled? */
	if (scu_ctrl & 1) {
		return;
	}

	scu_ctrl |= 1;
	vmm_writel(scu_ctrl, scu_base + SCU_CTRL);

	/*
	 * Ensure that the data accessed by CPU0 before the SCU was
	 * initialised is visible to the other CPUs.
	 */
	vmm_flush_cache_all();
}
示例#2
0
static int __init scu_cpu_prepare(unsigned int cpu)
{
	int rc;
	physical_addr_t _start_secondary_pa;

	/* Get physical address secondary startup code */
	rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary_nopen,
			    &_start_secondary_pa);
	if (rc) {
		return rc;
	}

	/* Enable snooping through SCU */
	if (scu_base) {
		scu_enable((void *)scu_base);
	}

	/* Write to clear address */
	if (clear_addr[cpu]) {
		vmm_writel(~0x0, (void *)clear_addr[cpu]);
	}

	/* Write to release address */
	if (release_addr[cpu]) {
		vmm_writel((u32)_start_secondary_pa,
					(void *)release_addr[cpu]);
	}

	return VMM_OK;
}
示例#3
0
static void __cpuinit twd_caliberate_freq(virtual_addr_t base, 
					  virtual_addr_t ref_counter_addr,
					  u32 ref_counter_freq)
{
	u32 i, count, ref_count;
	u64 tmp;

	/* enable, no interrupt or reload */
	vmm_writel(0x1, (void *)(base + TWD_TIMER_CONTROL));

	/* read reference counter */
	ref_count = vmm_readl((void *)ref_counter_addr);

	/* maximum value */
	vmm_writel(0xFFFFFFFFU, (void *)(base + TWD_TIMER_COUNTER));

	/* wait some arbitary amount of time */
	for (i = 0; i < 1000000; i++);

	/* read counter */
	count = vmm_readl((void *)(base + TWD_TIMER_COUNTER));
	count = 0xFFFFFFFFU - count;

	/* take reference counter difference */
	ref_count = vmm_readl((void *)ref_counter_addr) - ref_count;

	/* disable */
	vmm_writel(0x0, (void *)(base + TWD_TIMER_CONTROL));

	/* determine frequency */
	tmp = (u64)count * (u64)ref_counter_freq;
	twd_freq_hz = udiv64(tmp, ref_count);
}
示例#4
0
文件: mmci.c 项目: HeidCloud/xvisor
static int mmci_command(struct mmc_host *mmc, struct mmc_cmd *cmd)
{
	int result;
	u32 sdi_cmd, sdi_pwr;
	struct mmci_host *host = mmc_priv(mmc);

	sdi_cmd = ((cmd->cmdidx & SDI_CMD_CMDINDEX_MASK) | SDI_CMD_CPSMEN);

	if (cmd->resp_type) {
		sdi_cmd |= SDI_CMD_WAITRESP;
		if (cmd->resp_type & MMC_RSP_136) {
			sdi_cmd |= SDI_CMD_LONGRESP;
		}
	}

	vmm_writel((u32)cmd->cmdarg, &host->base->argument);
	vmm_udelay(COMMAND_REG_DELAY);
	vmm_writel(sdi_cmd, &host->base->command);
	result = mmci_wait_for_command_end(mmc, cmd);

	/* After CMD2 set RCA to a none zero value. */
	if ((result == 0) && (cmd->cmdidx == MMC_CMD_ALL_SEND_CID)) {
		mmc->card->rca = 10;
	}

	/* After CMD3 open drain is switched off and push pull is used. */
	if ((result == 0) && (cmd->cmdidx == MMC_CMD_SET_RELATIVE_ADDR)) {
		sdi_pwr = vmm_readl(&host->base->power) & ~SDI_PWR_OPD;
		vmm_writel(sdi_pwr, &host->base->power);
	}

	return result;
}
示例#5
0
static void twd_clockchip_set_mode(enum vmm_clockchip_mode mode,
				   struct vmm_clockchip *cc)
{
	u32 ctrl;

	switch (mode) {
	case VMM_CLOCKCHIP_MODE_PERIODIC:
		/* timer load already set up */
		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
			| TWD_TIMER_CONTROL_PERIODIC;
		vmm_writel(twd_freq_hz / 100, /* Assuming HZ = 100 */
			   (void *)(twd_base + TWD_TIMER_LOAD));
		break;
	case VMM_CLOCKCHIP_MODE_ONESHOT:
		/* period set, and timer enabled in 'next_event' hook */
		ctrl = TWD_TIMER_CONTROL_IT_ENABLE | TWD_TIMER_CONTROL_ONESHOT;
		break;
	case VMM_CLOCKCHIP_MODE_UNUSED:
	case VMM_CLOCKCHIP_MODE_SHUTDOWN:
	default:
		ctrl = 0;
		break;
	}

	vmm_writel(ctrl, (void *)(twd_base + TWD_TIMER_CONTROL));
}
示例#6
0
文件: mmci.c 项目: 32bitmicro/xvisor
static int mmci_driver_remove(struct vmm_device *dev)
{
	struct mmc_host *mmc = dev->priv;
	struct mmci_host *host = mmc_priv(mmc);

	if (mmc && host) {
		mmc_remove_host(mmc);

		vmm_writel(0, &host->base->mask0);
		vmm_writel(0, &host->base->mask1);
		vmm_writel(0, &host->base->command);
		vmm_writel(0, &host->base->datactrl);

		if (!host->singleirq) {
			vmm_host_irq_unregister(host->irq1, mmc);
		}
		vmm_host_irq_unregister(host->irq0, mmc);
		vmm_devtree_regunmap_release(dev->node,
					(virtual_addr_t)host->base, 0);
		mmc_free_host(mmc);
		dev->priv = NULL;
	}

	return VMM_OK;
}
示例#7
0
void imx_lowlevel_init(virtual_addr_t base, u32 baudrate, u32 input_clock)
{
	unsigned int temp = vmm_readl((void *)(base + UCR1));
	unsigned int divider;

	/* First, disable everything */
	temp &= ~UCR1_UARTEN;
	vmm_writel(temp, (void *)base + UCR1);

	/* disable all UCR2 related interrupts */
	temp = vmm_readl((void *)(base + UCR2));
	temp &= ~(UCR2_ATEN | UCR2_ESCI | UCR2_RTSEN);
	/* Set to 8N1 */
	temp = (temp & ~(UCR2_PREN | UCR2_STPB)) | UCR2_WS;
	/* Ignore RTS */
	temp |= UCR2_IRTS;
	vmm_writel(temp, (void *)(base + UCR2));

	/* disable all UCR3 related interrupts */
	temp = vmm_readl((void *)(base + UCR3));
	vmm_writel(temp &
		   ~(UCR3_RXDSEN | UCR3_DTREN | UCR3_FRAERREN | UCR3_TIMEOUTEN |
		     UCR3_AIRINTEN | UCR3_AWAKEN | UCR3_DTRDEN),
		   (void *)(base + UCR3));

	/* disable all UCR4 related interrupts */
	temp = vmm_readl((void *)(base + UCR4));
	vmm_writel(temp &
		   ~(UCR4_DREN | UCR4_TCEN | UCR4_ENIRI | UCR4_WKEN | UCR4_BKEN
		     | UCR4_OREN), (void *)(base + UCR4));

	/* trigger interrupt when there is 1 by in the RXFIFO */
	temp = vmm_readl((void *)(base + UFCR));
	vmm_writel((temp & 0xFFC0) | 1, (void *)(base + UFCR));

	/* Divide input clock by 2 */
	temp = vmm_readl((void *)(base + UFCR)) & ~UFCR_RFDIV;
	vmm_writel(temp | UFCR_RFDIV_REG(2), (void *)(base + UFCR));
	input_clock /= 2;

	divider = udiv32(baudrate, 100) - 1;
	vmm_writel(divider, (void *)(base + UBIR));
	/* UBMR = Ref Freq / (16 * baudrate) * (UBIR + 1) - 1 */
	/* As UBIR = baudrate / 100 - 1, UBMR = Ref Freq / (16 * 100) - 1 */
	temp = udiv32(input_clock, 16 * 100) - 1;
	vmm_writel(temp, (void *)(base + UBMR));

	/* enable the UART */
	temp = UCR1_UARTEN;
	vmm_writel(temp, (void *)(base + UCR1));

	/* Enable FIFOs */
	temp = vmm_readl((void *)(base + UCR2));
	vmm_writel(temp | UCR2_SRST | UCR2_RXEN | UCR2_TXEN,
		   (void *)(base + UCR2));
}
示例#8
0
int arch_board_reset(void)
{
#if 0 /* FIXME: */
	vmm_writel(0x0, 
		   (void *)(ca15x4_sys_base + VEXPRESS_SYS_RESETCTL_OFFSET));
	vmm_writel(VEXPRESS_SYS_CTRL_RESET_PLLRESET, 
		   (void *)(ca15x4_sys_base + VEXPRESS_SYS_RESETCTL_OFFSET));
#endif
	return VMM_OK;
}
示例#9
0
static int twd_clockchip_set_next_event(unsigned long next,
					  struct vmm_clockchip *cc)
{
	u32 ctrl = vmm_readl((void *)(twd_base + TWD_TIMER_CONTROL));

	ctrl |= TWD_TIMER_CONTROL_ENABLE;

	vmm_writel(next, (void *)(twd_base + TWD_TIMER_COUNTER));
	vmm_writel(ctrl, (void *)(twd_base + TWD_TIMER_CONTROL));

	return 0;
}
示例#10
0
int arch_board_reset(void)
{
#if 0 /* QEMU checks bit 8 which is wrong */
	vmm_writel(0x100, 
		   (void *)(REALVIEW_SYS_BASE + REALVIEW_SYS_RESETCTL_OFFSET));
#else
	vmm_writel(0x0, 
		   (void *)(pba8_sys_base + REALVIEW_SYS_RESETCTL_OFFSET));
	vmm_writel(REALVIEW_SYS_CTRL_RESET_PLLRESET, 
		   (void *)(pba8_sys_base + REALVIEW_SYS_RESETCTL_OFFSET));
#endif
	return VMM_OK;
}
示例#11
0
static int __init fpga_init(struct vmm_devtree_node *node)
{
    int rc;
    virtual_addr_t base;
    u32 clear_mask;
    u32 valid_mask;
    u32 picen_mask;
    u32 irq_start;
    u32 parent_irq;

    BUG_ON(!vmm_smp_is_bootcpu());

    rc = vmm_devtree_request_regmap(node, &base, 0, "Versatile SIC");
    WARN(rc, "unable to map fpga irq registers\n");

    if (vmm_devtree_read_u32(node, "irq_start", &irq_start)) {
        irq_start = 0;
    }

    if (vmm_devtree_read_u32(node, "clear-mask", &clear_mask)) {
        clear_mask = 0;
    }

    if (vmm_devtree_read_u32(node, "valid-mask", &valid_mask)) {
        valid_mask = 0;
    }

    /* Some chips are cascaded from a parent IRQ */
    if (vmm_devtree_irq_get(node, &parent_irq, 0)) {
        parent_irq = 0xFFFFFFFF;
    }

    fpga_irq_init((void *)base, "FPGA",
                  irq_start, parent_irq,
                  valid_mask, node);

    vmm_writel(clear_mask, (void *)base + IRQ_ENABLE_CLEAR);
    vmm_writel(clear_mask, (void *)base + FIQ_ENABLE_CLEAR);

    /* For VersatilePB, we have interrupts from 21 to 31 capable
     * of being routed directly to the parent interrupt controller
     * (i.e. VIC). This is controlled by setting PIC_ENABLEx.
     */
    if (!vmm_devtree_read_u32(node, "picen-mask", &picen_mask)) {
        vmm_writel(picen_mask, (void *)base + PICEN_SET);
    }

    return 0;
}
示例#12
0
文件: mmci.c 项目: HeidCloud/xvisor
static int mmci_data_transfer(struct mmc_host *mmc,
			      struct mmc_cmd *cmd,
			      struct mmc_data *data)
{
	int error = VMM_ETIMEDOUT;
	struct mmci_host *host = mmc_priv(mmc);
	u32 blksz = 0;
	u32 data_ctrl = 0;
	u32 data_len = (u32)(data->blocks * data->blocksize);

	if (!host->version2) {
		blksz = (ffs(data->blocksize) - 1);
		data_ctrl |= ((blksz << 4) & SDI_DCTRL_DBLKSIZE_MASK);
	} else {
		blksz = data->blocksize;
		data_ctrl |= (blksz << SDI_DCTRL_DBLOCKSIZE_V2_SHIFT);
	}
	data_ctrl |= SDI_DCTRL_DTEN | SDI_DCTRL_BUSYMODE;

	vmm_writel(SDI_DTIMER_DEFAULT, &host->base->datatimer);
	vmm_writel(data_len, &host->base->datalength);
	vmm_udelay(DATA_REG_DELAY);

	if (data->flags & MMC_DATA_READ) {
		data_ctrl |= SDI_DCTRL_DTDIR_IN;
		vmm_writel(data_ctrl, &host->base->datactrl);

		error = mmci_command(mmc, cmd);
		if (error) {
			return error;
		}

		error = mmci_read_bytes(mmc, (u32 *)data->dest, 
					     (u32)data->blocks,
					     (u32)data->blocksize);
	} else if (data->flags & MMC_DATA_WRITE) {
		error = mmci_command(mmc, cmd);
		if (error) {
			return error;
		}

		vmm_writel(data_ctrl, &host->base->datactrl);
		error = mmci_write_bytes(mmc, (u32 *)data->src, 
					      (u32)data->blocks,
					      (u32)data->blocksize);
	}

	return error;
}
示例#13
0
static void fpga_irq_unmask(struct vmm_host_irq *irq)
{
    struct fpga_irq_data *f = vmm_host_irq_get_chip_data(irq);
    u32 mask = 1 << fpga_irq(irq);

    vmm_writel(mask, f->base + IRQ_ENABLE_SET);
}
示例#14
0
int __init arch_smp_prepare_cpus(unsigned int max_cpus)
{
	int i, rc;
	physical_addr_t _start_secondary_pa;

	/* Get physical address secondary startup code */
	rc = vmm_host_va2pa((virtual_addr_t)&_start_secondary, 
			    &_start_secondary_pa);
	if (rc) {
		return rc;
	}

	/* Update the cpu_present bitmap */
	for (i = 0; i < max_cpus; i++) {
		vmm_set_cpu_present(i, TRUE);
	}

	if (scu_base) {
		/* Enable snooping through SCU */
		scu_enable((void *)scu_base);
	}

	if (pmu_base) {
		/* Write the entry address for the secondary cpus */
		vmm_writel((u32)_start_secondary_pa, (void *)pmu_base + 0x814);
	}

	return VMM_OK;
}
示例#15
0
static vmm_irq_return_t imx_irq_handler(int irq, void *dev_id)
{
	struct imx_port *port = dev_id;
	unsigned int sts;

	sts = vmm_readl((void *)port->base + USR1);

	if (sts & USR1_RRDY) {
		imx_rxint(port);
	}
#if defined(UART_IMX_USE_TXINTR)
	if ((sts & USR1_TRDY) && (port->mask & UCR1_TXMPTYEN)) {
		imx_txint(port);
	}
#endif

	if (sts & USR1_RTSD) {
		imx_rtsint(port);
	}

	sts &=
	    USR1_PARITYERR | USR1_RTSD | USR1_ESCF | USR1_FRAMERR | USR1_TIMEOUT
	    | USR1_AIRINT | USR1_AWAKE;
	if (sts) {
		vmm_writel(sts, (void *)port->base + USR1);
	}

	return VMM_IRQ_HANDLED;
}
示例#16
0
static int versatile_reset(void)
{
	vmm_writel(0x101, (void *)(versatile_sys_base +
			   VERSATILE_SYS_RESETCTL_OFFSET));

	return VMM_OK;
}
示例#17
0
int __init arch_clockchip_init(void)
{
	int rc;
	u32 val;
	virtual_addr_t sctl_base;

	/* Map control registers */
	sctl_base = vmm_host_iomap(V2M_SYSCTL, 0x1000);

	/* Select 1MHz TIMCLK as the reference clock for SP804 timers */
	val = vmm_readl((void *)sctl_base) | SCCTRL_TIMEREN0SEL_TIMCLK;
	vmm_writel(val, (void *)sctl_base);

	/* Unmap control register */
	rc = vmm_host_iounmap(sctl_base, 0x1000);
	if (rc) {
		return rc;
	}

	/* Map timer0 registers */
	ca9x4_timer0_base = vmm_host_iomap(V2M_TIMER0, 0x1000);

	/* Initialize timer0 as clockchip */
	rc = sp804_clockchip_init(ca9x4_timer0_base, IRQ_V2M_TIMER0, 
				  "sp804_timer0", 300, 1000000, 0);
	if (rc) {
		return rc;
	}

	return VMM_OK;
}
示例#18
0
文件: mmci.c 项目: HeidCloud/xvisor
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
	struct mmci_host *host = mmc_priv(mmc);
	u32 sdi_clkcr;

	sdi_clkcr = vmm_readl(&host->base->clock);

	/* Ramp up the clock rate */
	if (ios->clock) {
		u32 clkdiv = 0;
		u32 tmp_clock;

		if (ios->clock >= mmc->f_max) {
			clkdiv = 0;
			ios->clock = mmc->f_max;
		} else {
			clkdiv = udiv32(host->clock_in, ios->clock) - 2;
		}

		tmp_clock = udiv32(host->clock_in, (clkdiv + 2));
		while (tmp_clock > ios->clock) {
			clkdiv++;
			tmp_clock = udiv32(host->clock_in, (clkdiv + 2));
		}

		if (clkdiv > SDI_CLKCR_CLKDIV_MASK)
			clkdiv = SDI_CLKCR_CLKDIV_MASK;

		tmp_clock = udiv32(host->clock_in, (clkdiv + 2));
		ios->clock = tmp_clock;
		sdi_clkcr &= ~(SDI_CLKCR_CLKDIV_MASK);
		sdi_clkcr |= clkdiv;
	}

	/* Set the bus width */
	if (ios->bus_width) {
		u32 buswidth = 0;

		switch (ios->bus_width) {
		case 1:
			buswidth |= SDI_CLKCR_WIDBUS_1;
			break;
		case 4:
			buswidth |= SDI_CLKCR_WIDBUS_4;
			break;
		case 8:
			buswidth |= SDI_CLKCR_WIDBUS_8;
			break;
		default:
			vmm_printf("%s: Invalid bus width: %d\n", 
				   __func__, ios->bus_width);
			break;
		}
		sdi_clkcr &= ~(SDI_CLKCR_WIDBUS_MASK);
		sdi_clkcr |= buswidth;
	}

	vmm_writel(sdi_clkcr, &host->base->clock);
	vmm_udelay(CLK_CHANGE_DELAY);
}
示例#19
0
文件: mmci.c 项目: HeidCloud/xvisor
static int mmci_read_bytes(struct mmc_host *mmc, 
			   u32 *dest, u32 blkcount, u32 blksize)
{
	u32 *tempbuff = dest;
	u64 xfercount = (u64)blkcount * blksize;
	struct mmci_host *host = mmc_priv(mmc);
	u32 status, status_err;

	debug("%s: read_bytes: blkcount=%u blksize=%u\n", 
	      __func__, blkcount, blksize);

	status = vmm_readl(&host->base->status);
	status_err = status & (SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT |
			       SDI_STA_RXOVERR);
	while ((!status_err) && (xfercount >= sizeof(u32))) {
		if (status & SDI_STA_RXDAVL) {
			*(tempbuff) = vmm_readl(&host->base->fifo);
			tempbuff++;
			xfercount -= sizeof(u32);
		}
		status = vmm_readl(&host->base->status);
		status_err = status & (SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT |
				       SDI_STA_RXOVERR);
	}

	status_err = status &
		(SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT | SDI_STA_DBCKEND |
		 SDI_STA_RXOVERR);
	while (!status_err) {
		status = vmm_readl(&host->base->status);
		status_err = status &
			(SDI_STA_DCRCFAIL | SDI_STA_DTIMEOUT | SDI_STA_DBCKEND |
			 SDI_STA_RXOVERR);
	}

	if (status & SDI_STA_DTIMEOUT) {
		vmm_printf("%s: Read data timed out, "
			   "xfercount: %llu, status: 0x%08X\n",
			   __func__, xfercount, status);
		return VMM_ETIMEDOUT;
	} else if (status & SDI_STA_DCRCFAIL) {
		vmm_printf("%s: Read data bytes CRC error: 0x%x\n", 
			   __func__, status);
		return VMM_EILSEQ;
	} else if (status & SDI_STA_RXOVERR) {
		vmm_printf("%s: Read data RX overflow error\n", __func__);
		return VMM_EIO;
	}

	vmm_writel(SDI_ICR_MASK, &host->base->status_clear);

	if (xfercount) {
		vmm_printf("%s: Read data error, xfercount: %llu\n", 
			   __func__, xfercount);
		return VMM_EIO;
	}

	return VMM_OK;
}
示例#20
0
static int twd_clockchip_expire(struct vmm_clockchip *cc)
{
	struct twd_clockchip *tcc = cc->priv;
	u32 i, ctrl = vmm_readl((void *)(tcc->base + TWD_TIMER_CONTROL));

	ctrl &= ~TWD_TIMER_CONTROL_ENABLE;
	vmm_writel(ctrl, (void *)(tcc->base + TWD_TIMER_CONTROL));
	vmm_writel(1, (void *)(tcc->base + TWD_TIMER_COUNTER));
	ctrl |= TWD_TIMER_CONTROL_ENABLE;
	vmm_writel(ctrl, (void *)(tcc->base + TWD_TIMER_CONTROL));

	while (!vmm_readl((void *)(tcc->base + TWD_TIMER_INTSTAT))) {
		for (i = 0; i < 100; i++);
	}

	return 0;
}
示例#21
0
static inline void epit_irq_enable(struct epit_clockchip *ecc)
{
	u32 val;

	val = vmm_readl((void *)(ecc->base + EPITCR));
	val |= EPITCR_OCIEN;
	vmm_writel(val, (void *)(ecc->base + EPITCR));
}
示例#22
0
void imx_lowlevel_putc(virtual_addr_t base, u8 ch)
{
	/* Wait until there is space in the FIFO */
	while (!imx_lowlevel_can_putc(base)) ;

	/* Send the character */
	vmm_writel(ch, (void *)(base + URTX0));
}
示例#23
0
文件: mmci.c 项目: HeidCloud/xvisor
static int mmci_init_card(struct mmc_host *mmc, struct mmc_card *card)
{
	struct mmci_host *host = mmc_priv(mmc);

	/* MMCI uses open drain drivers in the enumeration phase */
	vmm_writel(host->pwr_init, &host->base->power);

	return VMM_OK;
}
示例#24
0
/**
 * This is a temporary solution until we have a clock management
 * API
 */
void clk_disable(struct clk *clk)
{
	u32 perir_reg = vmm_readl((void *)clk);

	if (perir_reg & (1 << 15)) {
		perir_reg &= ~(1 << 15);

		vmm_writel(perir_reg, (void *)clk);
	}
}
示例#25
0
static void clk_enable(struct clk *clk)
{
	u32 perir_reg = vmm_readl((void *)clk);

	if (!(perir_reg & (1 << 15))) {
		perir_reg |= (1 << 15);

		vmm_writel(perir_reg, (void *)clk);
	}
}
示例#26
0
static int bcm2835_clockchip_set_next_event(unsigned long next, 
					    struct vmm_clockchip *cc)
{
	struct bcm2835_clockchip *bcc = cc->priv;

	/* Configure compare register */
	vmm_writel(vmm_readl(bcc->system_clock) + next, bcc->compare);

	return VMM_OK;
}
示例#27
0
static int aw_timer_force_reset(void)
{
	u32 mode;

	if (!aw_base) {
		return VMM_EFAIL;
	}

	/* Clear & disable watchdog */
	vmm_writel(0, (void *)(aw_base + AW_WDT_REG_MODE));

	/* Force reset by configuring watchdog with minimum interval */
	mode = WDT_MODE_RESET | WDT_MODE_ENABLE;
	vmm_writel(mode, (void *)(aw_base + AW_WDT_REG_MODE));

	/* FIXME: Wait for watchdog to expire ??? */

	return VMM_OK;
}
示例#28
0
void bcm2835_pm_reset(void)
{
	u32 pm_rstc, pm_wdog;
	u32 timeout = 10;

	/* Setup watchdog for reset */
	pm_rstc = vmm_readl(PM_RSTC);

	/* watchdog timer = timer clock / 16; 
	 * need password (31:16) + value (11:0) 
	 */
	pm_wdog  = PM_PASSWORD;
	pm_wdog |= (timeout & PM_WDOG_TIME_SET);
	pm_rstc  = PM_PASSWORD;
	pm_rstc |= (pm_rstc & PM_RSTC_WRCFG_CLR);
	pm_rstc |= PM_RSTC_WRCFG_FULL_RESET;

	vmm_writel(pm_wdog, PM_WDOG);
	vmm_writel(pm_rstc, PM_RSTC);
}
示例#29
0
static int epit_set_next_event(unsigned long cycles, struct vmm_clockchip *evt)
{
	struct epit_clockchip *ecc = evt->priv;
	unsigned long tcmp;

	tcmp = vmm_readl((void *)(ecc->base + EPITCNR));

	vmm_writel(tcmp - cycles, (void *)(ecc->base + EPITCMPR));

	return VMM_OK;
}
示例#30
0
void __init arch_defterm_early_putc(u8 ch)
{
	/* Wait until FIFO is full */
	while (vmm_readl(early_base + IMX21_UTS) & UTS_TXFULL) ;

	/* Send the character */
	vmm_writel(ch, early_base + URTX0);

	/* Wait until FIFO is empty */
	while (!(vmm_readl(early_base + IMX21_UTS) & UTS_TXEMPTY)) ;
}