void gic_ack_irq(unsigned int irq) { u32 mask = 1 << (irq % 32); spin_lock(&irq_controller_lock); mmio_writel(mask, gic_dist_base(irq) + ICDICER + (gic_irq(irq) / 32) * 4); mmio_writel(gic_irq(irq), gic_cpu_base(irq) + ICCEOIR); spin_unlock(&irq_controller_lock); }
void gic_cpu_init(unsigned int gic_nr, void *base) { if (gic_nr >= MAX_GIC_NR) { BUG(); } gic_data[gic_nr].cpu_base = base; mmio_writel(0xf0, base + ICCPMR); mmio_writel(1, base + ICCICR); }
void gic_raise_softirq(cpumask_t cpumask, unsigned int irq) { unsigned long map = *cpus_addr(cpumask); /* this always happens on GIC0 */ mmio_writel(map << 16 | irq, gic_data[0].dist_base + ICDSGIR); }
void zynq_uart_init(void) { uint32_t uart_cr; // TODO use dts zynq_uart_params.io_base = ZYNQ_UART0_BASE_PHY_ADDR; uart_cr = zynq_uart_params.io_base + XUARTPS_CR_OFFSET; // disable TX and RX mmio_writel(XUARTPS_CR_TX_DIS | XUARTPS_CR_RX_DIS, uart_cr); // reset TX and RX mmio_writel(XUARTPS_CR_TXRST | XUARTPS_CR_RXRST, uart_cr); // enabled TX and RX mmio_writel(XUARTPS_CR_TX_EN | XUARTPS_CR_RX_EN, uart_cr); // keep default boudrate, since FPGA #ifdef TEST // why AT? please refer history of AT cmd mmio_writel('A', zynq_uart_params.io_base + XUARTPS_FIFO_OFFSET); mmio_writel('T', zynq_uart_params.io_base + XUARTPS_FIFO_OFFSET); #endif #ifdef TEST { struct serial_port port; int i; port.uart = &zynq_uart_params; for( i = 0 ; i < 10 ; ++i ) { char c; while ( zynq_uart_getc( &port , &c) == 0 ) ; zynq_uart_putc( &port, c ); zynq_uart_putc( &port, '-' ); zynq_uart_putc( &port, c + 1 ); zynq_uart_putc( &port, '\r' ); zynq_uart_putc( &port, '\n' ); } zynq_uart_putc( &port, '\r' ); zynq_uart_putc( &port, '\n' ); } #endif serial_register_uart(0, &zynq_uart_driver, &zynq_uart_params); }
static void zynq_cpu1_init(void) { #if 0 unsigned long r; unsigned long orig_reset; unsigned long loop; unsigned long ctrl; /* Initialize Snoop Control Unit */ ctrl = mmio_readl(ZYNQ_SCU_PHYS_BASE + SCU_CONTROL_0); ctrl |= 1; mmio_writel(ctrl, ZYNQ_SCU_PHYS_BASE + SCU_CONTROL_0); /* Set boot entry */ mmio_writel(virt_to_phys(secondary_startup), IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + EVP_CPU_RESET_VECTOR_0); dsb(); isb(); /* Halt CPU */ mmio_writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + FLOW_CTRL_HALT_CPUx_EVENTS(1)); dsb(); isb(); /* CPU Clock Stop */ r = mmio_readl(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0); r &= ~CPU_CLK_STOP(1); mmio_writel(r, IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0); dsb(); isb(); /* Restart Slave CPU */ mmio_writel(CPU_RESET(1), IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0); dsb(); isb(); #endif }
//---------------------------------------------------------------- // already spin locked static void zynq_uart_putc(struct serial_port *port, char c) { struct ns16550_defaults *private_data; private_data = (struct ns16550_defaults *)port->uart; while (( mmio_readl(private_data->io_base + XUARTPS_SR_OFFSET) & XUARTPS_SR_TXFULL ) == XUARTPS_SR_TXFULL ) { // wait for room } mmio_writel(c, private_data->io_base + XUARTPS_FIFO_OFFSET); }
void gic_set_cpu(unsigned int irq, cpumask_t mask_val) { void *reg = gic_dist_base(irq) + ICDIPTR + (gic_irq(irq) & ~3); unsigned int shift = (irq % 4) * 8; unsigned int cpu = first_cpu(mask_val); u32 val; spin_lock(&irq_controller_lock); irq_desc[irq].cpu = cpu; val = mmio_readl(reg) & ~(0xff << shift); val |= 1 << (cpu + shift); mmio_writel(val, reg); spin_unlock(&irq_controller_lock); }
int undo_mmio_write(void *p) { struct undo_mmio_write_data *data = p; msg_pdbg("Restoring MMIO space at %p\n", data->addr); switch (data->type) { case mmio_write_type_b: mmio_writeb(data->bdata, data->addr); break; case mmio_write_type_w: mmio_writew(data->wdata, data->addr); break; case mmio_write_type_l: mmio_writel(data->ldata, data->addr); break; } /* p was allocated in register_undo_mmio_write. */ free(p); return 0; }
void mmio_le_writel(uint32_t val, void *addr) { mmio_writel(cpu_to_le32(val), addr); }
static void internal_chip_writel(const struct flashctx *flash, uint32_t val, chipaddr addr) { mmio_writel(val, (void *) addr); }
void rmmio_writel(uint32_t val, void *addr) { register_undo_mmio_writel(addr); mmio_writel(val, addr); }
void gic_dist_init(unsigned int gic_nr, void *base, unsigned int irq_start) { unsigned int max_irq, i; // u32 cpumask = 1 << smp_processor_id(); u32 cpumask = 1 << 0; if (gic_nr >= MAX_GIC_NR) { BUG(); } cpumask |= cpumask << 8; cpumask |= cpumask << 16; gic_data[gic_nr].dist_base = base; gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31; mmio_writel(0, base + ICDDCR); /* * Find out how many interrupts are supported. */ max_irq = mmio_readl(base + ICDICTR) & 0x1f; max_irq = (max_irq + 1) * 32; /* * The GIC only supports up to 1020 interrupt sources. * Limit this to either the architected maximum, or the * platform maximum. */ if (max_irq > max(1020, NR_IRQS)) { max_irq = max(1020, NR_IRQS); } /* * Set all global interrupts to be level triggered, active low. */ for (i = 32; i < max_irq; i += 16) { mmio_writel(0, base + ICDICFR + i * 4 / 16); } /* * Set all global interrupts to this CPU only. */ for (i = 32; i < max_irq; i += 4) { mmio_writel(cpumask, base + ICDIPTR + i * 4 / 4); } /* * Set priority on all interrupts. */ for (i = 0; i < max_irq; i += 4) { mmio_writel(0xa0a0a0a0, base + ICDIPR + i * 4 / 4); } /* * Disable all interrupts. */ for (i = 0; i < max_irq; i += 32) { mmio_writel(0xffffffff, base + ICDICER + i * 4 / 32); } /* * Setup the Linux IRQ subsystem. */ for (i = irq_start; i < max_irq; i++) { uint32_t int_config_field; set_irq_chip(i, &gic_chip); set_irq_chip_data(i, &gic_data[gic_nr]); int_config_field = mmio_readl(base + ICDICFR + i * 4 / 16); int_config_field >>= (((i % 16) * 2) + 1); int_config_field &= 0x1; if ( int_config_field ) { set_irq_handler(i, edge_irq_handler); } else { set_irq_handler(i, level_irq_handler); } set_irq_flags(i, IRQF_VALID); } mmio_writel(1, base + ICDDCR); }