int main(int argc, char **argv) { uint32_t gpio, gpio_output_function, gpio_output_function_reg; uint32_t index, gpio_input_function_reg, gpio_input_pin; uint32_t cpu_id, cpu_major, cpu_rev; uint32_t gpio_data[32]; struct mmio io; char gpio_direction; char gpio_state; char *pin_header; char *gpio_input[GPIO_MAX]; bzero(gpio_input, sizeof(gpio_input)); if (mmio_map(&io, AR71XX_RESET_BASE + AR71XX_RESET_REG_REV_ID, 0x4)) die_errno("mmio_map() failed for AR71XX_RESET_BASE"); cpu_id=mmio_readl(&io, 0); mmio_unmap(&io); cpu_major = cpu_id & REV_ID_MAJOR_MASK; switch (cpu_major) { case REV_ID_MAJOR_AR9341: cpu_rev = cpu_id & AR934X_REV_ID_REVISION_MASK; printf(" seems to be a WR841N v8 with AR9341 Revision %d\n",cpu_rev); break; case REV_ID_MAJOR_QCA9533: cpu_rev = cpu_id & QCA953X_REV_ID_REVISION_MASK; printf(" seems to be a WR841N v9 with QCA9533 Revision %d\n",cpu_rev); break; default: printf("Unknown CPU ID : 0x%8X\n"); return -1; } if (mmio_map(&io, AR71XX_GPIO_BASE, 0x74)) die_errno("mmio_map() failed AR71XX_GPIO_BASE"); for (index=0; index<3; index++) { gpio_data[index]=mmio_readl(&io, index<<2); } /* GPIO_SET and GPIO_CLEAR are blocked on qca9533 - we don't need them anyway */ for (index=5; index<29; index++) { gpio_data[index]=mmio_readl(&io, index<<2); } mmio_unmap(&io); for (index=0; index<24; index++) { if (index<20) { gpio_input_function_reg = gpio_data[(GPIO_IN_ENABLE0 + index)>>2]; /* there is a gap betwee GPIO_IN_ENABLE5 and GPIO_IN_ENABLE9 */ } else {
static int zynq_uart_getc(struct serial_port *port, char *pc) { struct ns16550_defaults *private_data; private_data = (struct ns16550_defaults *)port->uart; if ((mmio_readl(private_data->io_base + XUARTPS_SR_OFFSET) & XUARTPS_SR_RXEMPTY ) == XUARTPS_SR_RXEMPTY ) { return 0; } *pc = mmio_readl(private_data->io_base + XUARTPS_FIFO_OFFSET); return 1; }
static void zynq_cpu1_init(void) { #if 0 unsigned long r; unsigned long orig_reset; unsigned long loop; unsigned long ctrl; /* Initialize Snoop Control Unit */ ctrl = mmio_readl(ZYNQ_SCU_PHYS_BASE + SCU_CONTROL_0); ctrl |= 1; mmio_writel(ctrl, ZYNQ_SCU_PHYS_BASE + SCU_CONTROL_0); /* Set boot entry */ mmio_writel(virt_to_phys(secondary_startup), IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + EVP_CPU_RESET_VECTOR_0); dsb(); isb(); /* Halt CPU */ mmio_writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + FLOW_CTRL_HALT_CPUx_EVENTS(1)); dsb(); isb(); /* CPU Clock Stop */ r = mmio_readl(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0); r &= ~CPU_CLK_STOP(1); mmio_writel(r, IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0); dsb(); isb(); /* Restart Slave CPU */ mmio_writel(CPU_RESET(1), IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0); dsb(); isb(); #endif }
//---------------------------------------------------------------- // already spin locked static void zynq_uart_putc(struct serial_port *port, char c) { struct ns16550_defaults *private_data; private_data = (struct ns16550_defaults *)port->uart; while (( mmio_readl(private_data->io_base + XUARTPS_SR_OFFSET) & XUARTPS_SR_TXFULL ) == XUARTPS_SR_TXFULL ) { // wait for room } mmio_writel(c, private_data->io_base + XUARTPS_FIFO_OFFSET); }
void gic_set_cpu(unsigned int irq, cpumask_t mask_val) { void *reg = gic_dist_base(irq) + ICDIPTR + (gic_irq(irq) & ~3); unsigned int shift = (irq % 4) * 8; unsigned int cpu = first_cpu(mask_val); u32 val; spin_lock(&irq_controller_lock); irq_desc[irq].cpu = cpu; val = mmio_readl(reg) & ~(0xff << shift); val |= 1 << (cpu + shift); mmio_writel(val, reg); spin_unlock(&irq_controller_lock); }
uint32_t mmio_le_readl(void *addr) { return le_to_cpu32(mmio_readl(addr)); }
static uint32_t internal_chip_readl(const struct flashctx *flash, const chipaddr addr) { return mmio_readl((void *) addr); }
void gic_dist_init(unsigned int gic_nr, void *base, unsigned int irq_start) { unsigned int max_irq, i; // u32 cpumask = 1 << smp_processor_id(); u32 cpumask = 1 << 0; if (gic_nr >= MAX_GIC_NR) { BUG(); } cpumask |= cpumask << 8; cpumask |= cpumask << 16; gic_data[gic_nr].dist_base = base; gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; mmio_writel(0, base + ICDDCR); /* * Find out how many interrupts are supported. */ max_irq = mmio_readl(base + ICDICTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > max(1020, NR_IRQS)) { max_irq = max(1020, NR_IRQS); } /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) { mmio_writel(0, base + ICDICFR + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) { mmio_writel(cpumask, base + ICDIPTR + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) { mmio_writel(0xa0a0a0a0, base + ICDIPR + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) { mmio_writel(0xffffffff, base + ICDICER + i * 4 / 32); } /* * Setup the Linux IRQ subsystem. */ for (i = irq_start; i < max_irq; i++) { uint32_t int_config_field; set_irq_chip(i, &gic_chip); set_irq_chip_data(i, &gic_data[gic_nr]); int_config_field = mmio_readl(base + ICDICFR + i * 4 / 16); int_config_field >>= (((i % 16) * 2) + 1); int_config_field &= 0x1; if ( int_config_field ) { set_irq_handler(i, edge_irq_handler); } else { set_irq_handler(i, level_irq_handler); } set_irq_flags(i, IRQF_VALID); } mmio_writel(1, base + ICDDCR); }