/** * This is really acting as a filter driver, so only the zynq HAL can know if the SDHCI * controller is enabled. * * If it is, we ask the generic SDHCI driver to do its stuff, otherwise we return an error. **/ static BT_HANDLE zynq_sdhci_probe(const BT_INTEGRATED_DEVICE *pDevice, BT_ERROR *pError) { BT_ERROR Error; BT_BOOL bEnabled = BT_FALSE; volatile ZYNQ_SLCR_REGS *pRegs = bt_ioremap((void *)ZYNQ_SLCR, BT_SIZE_4K); const BT_RESOURCE *pResource = BT_GetIntegratedResource(pDevice, BT_RESOURCE_ENUM, 0); if(!pResource) { goto err_out; } switch(pResource->ulStart) { case 0: bEnabled = (pRegs->SDIO_CLK_CTRL & ZYNQ_SLCR_CLK_CTRL_CLKACT_0); if(bEnabled) { // Attempt to reset the device! zynq_slcr_unlock(pRegs); pRegs->SDIO_RST_CTRL |= 0x11; pRegs->SDIO_RST_CTRL &= ~0x11; zynq_slcr_lock(pRegs); } break; case 1: bEnabled = (pRegs->SDIO_CLK_CTRL & ZYNQ_SLCR_CLK_CTRL_CLKACT_0); if(bEnabled) { zynq_slcr_unlock(pRegs); pRegs->SDIO_RST_CTRL |= 0x22; pRegs->SDIO_RST_CTRL &= ~0x22; zynq_slcr_lock(pRegs); } break; default: break; } bt_iounmap(pRegs); if(!bEnabled) { Error = BT_ERR_GENERIC; goto err_out; } BT_INTEGRATED_DRIVER *pDriver = BT_GetIntegratedDriverByName("mmc,sdhci"); if(!pDriver) { Error = BT_ERR_GENERIC; goto err_out; } return pDriver->pfnProbe(pDevice, pError); err_out: if(pError) { *pError = Error; } return NULL; }
/** * zynq_early_slcr_init - Early slcr init function * * Return: 0 on success, negative errno otherwise. * * Called very early during boot from platform code to unlock SLCR. */ int __init zynq_early_slcr_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-slcr"); if (!np) { pr_err("%s: no slcr node found\n", __func__); BUG(); } zynq_slcr_base = of_iomap(np, 0); if (!zynq_slcr_base) { pr_err("%s: Unable to map I/O memory\n", __func__); BUG(); } np->data = (__force void *)zynq_slcr_base; /* unlock the SLCR so that registers can be changed */ zynq_slcr_unlock(); pr_info("%s mapped to %p\n", np->name, zynq_slcr_base); of_node_put(np); return 0; }
int arch_cpu_init(void) { zynq_slcr_unlock(); #ifndef CONFIG_SPL_BUILD /* Device config APB, unlock the PCAP */ writel(0x757BDF0D, &devcfg_base->unlock); writel(0xFFFFFFFF, &devcfg_base->rom_shadow); #if (CONFIG_SYS_SDRAM_BASE == 0) /* remap DDR to zero, FILTERSTART */ writel(0, &scu_base->filter_start); /* OCM_CFG, Mask out the ROM, map ram into upper addresses */ writel(0x1F, &slcr_base->ocm_cfg); /* FPGA_RST_CTRL, clear resets on AXI fabric ports */ writel(0x0, &slcr_base->fpga_rst_ctrl); /* Set urgent bits with register */ writel(0x0, &slcr_base->ddr_urgent_sel); /* Urgent write, ports S2/S3 */ writel(0xC, &slcr_base->ddr_urgent); #endif #endif zynq_clk_early_init(); zynq_slcr_lock(); return 0; }
/* Setup clk for network */ void zynq_slcr_gem_clk_setup(u32 gem_id, unsigned long clk_rate) { int ret; zynq_slcr_unlock(); if (gem_id > 1) { printf("Non existing GEM id %d\n", gem_id); goto out; } ret = zynq_clk_set_rate(gem0_clk + gem_id, clk_rate); if (ret) goto out; if (gem_id) { /* Configure GEM_RCLK_CTRL */ writel(1, &slcr_base->gem1_rclk_ctrl); } else { /* Configure GEM_RCLK_CTRL */ writel(1, &slcr_base->gem0_rclk_ctrl); } udelay(100000); out: zynq_slcr_lock(); }
static BT_HANDLE devcfg_probe(const BT_INTEGRATED_DEVICE *pDevice, BT_ERROR *pError) { BT_ERROR Error; if(g_bInUse) { Error = BT_ERR_GENERIC; goto err_set_out; } g_bInUse = BT_TRUE; BT_HANDLE hDevcfg = BT_CreateHandle(&oHandleInterface, sizeof(struct _BT_OPAQUE_HANDLE), pError); if(!hDevcfg) { goto err_set_out; } const BT_RESOURCE *pResource = BT_GetIntegratedResource(pDevice, BT_RESOURCE_MEM, 0); if(!pResource) { Error = BT_ERR_GENERIC; goto err_free_out; } hDevcfg->pRegs = (volatile ZYNQ_DEVCFG_REGS *) bt_ioremap((void *) pResource->ulStart, sizeof(ZYNQ_DEVCFG_REGS)); hDevcfg->pRegs->UNLOCK = 0x757BDF0D; // Unlock the DEVCFG interface. hDevcfg->pRegs->INT_STS = 0xFFFFFFFF; // Clear all interrupt status signals. hDevcfg->pRegs->CTRL |= CTRL_PCFG_PROG_B; hDevcfg->pRegs->CTRL |= CTRL_PCAP_MODE; // Enable PCAP transfer mode. hDevcfg->pRegs->CTRL |= CTRL_PCAP_PR; // Select PCAP for reconfiguration, (disables ICAP). hDevcfg->pRegs->CTRL &= ~CTRL_QUARTER_PCAP_RATE; // Set full bandwidth PCAP loading rate. hDevcfg->pRegs->MCTRL &= ~MCTRL_PCAP_LPBK; // Ensure internal PCAP looback is disabled. hDevcfg->pSLCR = (volatile ZYNQ_SLCR_REGS *) bt_ioremap((void *) ZYNQ_SLCR_BASE, sizeof(ZYNQ_SLCR_REGS)); zynq_slcr_unlock(hDevcfg->pSLCR); zynq_slcr_preload_fpga(hDevcfg->pSLCR); zynq_slcr_lock(hDevcfg->pSLCR); devcfg_reset_pl(hDevcfg); if(pError) { *pError = BT_ERR_NONE; } return hDevcfg; err_free_out: BT_DestroyHandle(hDevcfg); err_set_out: if(pError) { *pError = Error; } return NULL; }
int zynq_pll_init(void) { zynq_slcr_unlock(); /* ARM PLL & Clock config * 375 cycles needed for pll * 26 divisor on pll * enable all ARM clocks * 2 divisor on ARM clocks * ARM clock source is ARM PLL */ SLCR_REG(ARM_PLL_CFG) = PLL_CFG_LOCK_CNT(375) | PLL_CFG_PLL_CP(2) | PLL_CFG_PLL_RES(12); SLCR_REG(ARM_PLL_CTRL) = PLL_FDIV(26) | PLL_BYPASS_FORCE | PLL_RESET; SLCR_REG(ARM_PLL_CTRL) &= ~PLL_RESET; if (pll_poll(PLL_STATUS_ARM_PLL_LOCK) == -1) { return -1; } SLCR_REG(ARM_PLL_CTRL) &= ~PLL_BYPASS_FORCE; SLCR_REG(ARM_CLK_CTRL) = ARM_CLK_CTRL_DIVISOR(2) | ARM_CLK_CTRL_CPU_6OR4XCLKACT | ARM_CLK_CTRL_CPU_3OR2XCLKACT | ARM_CLK_CTRL_CPU_2XCLKACT | ARM_CLK_CTRL_CPU_1XCLKACT |ARM_CLK_CTRL_PERI_CLKACT; /* DDR PLL & Clock config * 475 cycles needed * 21 divisor on PLL * enable all DDR clocks * 2 divisor for 3XCLK, 3 divisor for 2XCLK */ SLCR_REG(DDR_PLL_CFG) = PLL_CFG_LOCK_CNT(475) | PLL_CFG_PLL_CP(2) | PLL_CFG_PLL_RES(12); SLCR_REG(DDR_PLL_CTRL) = PLL_FDIV(26) | PLL_BYPASS_FORCE | PLL_RESET; SLCR_REG(DDR_PLL_CTRL) &= ~PLL_RESET; if (pll_poll(PLL_STATUS_DDR_PLL_LOCK) == -1) { return -1; } SLCR_REG(DDR_PLL_CTRL) &= ~PLL_BYPASS_FORCE; SLCR_REG(DDR_CLK_CTRL) = DDR_CLK_CTRL_DDR_3XCLKACT | DDR_CLK_CTRL_DDR_2XCLKACT | DDR_CLK_CTRL_DDR_3XCLK_DIV(2) | DDR_CLK_CTRL_DDR_2XCLK_DIV(3); /* IO PLL config * 500 cycles needed for pll * 20 divisor */ SLCR_REG(IO_PLL_CFG) = PLL_CFG_LOCK_CNT(500) | PLL_CFG_PLL_CP(2) | PLL_CFG_PLL_RES(12); SLCR_REG(IO_PLL_CTRL) = PLL_FDIV(20) | PLL_BYPASS_FORCE | PLL_RESET; SLCR_REG(IO_PLL_CTRL) &= ~PLL_RESET; if (pll_poll(PLL_STATUS_IO_PLL_LOCK) == -1) { return -1; } SLCR_REG(IO_PLL_CTRL) &= ~PLL_BYPASS_FORCE; zynq_slcr_lock(); return 0; }
void zynq_cpu_stop(cyg_uint32 cpu) { cyg_uint32 reg; zynq_slcr_unlock(); reg = zynq_slcr_read(XSLCR_A9_CPU_RST_CTRL_OFFSET); reg |= (XSLCR_A9_CPU_STOP | XSLCR_A9_CPU_RST) << cpu; zynq_slcr_write(XSLCR_A9_CPU_RST_CTRL_OFFSET, reg); zynq_slcr_lock(); }
void zynq_slcr_devcfg_enable(void) { zynq_slcr_unlock(); /* Set Level Shifters DT618760 */ writel(0xF, &slcr_base->lvl_shftr_en); /* Enable AXI interface by de-asserting FPGA resets */ writel(0x0, &slcr_base->fpga_rst_ctrl); zynq_slcr_lock(); }
void zynq_slcr_devcfg_enable(void) { zynq_slcr_unlock(); /* Set Level Shifters DT618760 */ writel(0xF, &slcr_base->lvl_shftr_en); /* Disable AXI interface */ writel(0x0, &slcr_base->fpga_rst_ctrl); zynq_slcr_lock(); }
static void zynq_cpu_start(cyg_uint32 cpu) { cyg_uint32 reg; zynq_slcr_unlock(); reg = zynq_slcr_read(XSLCR_A9_CPU_RST_CTRL_OFFSET); reg &= ~(XSLCR_A9_CPU_RST << cpu); zynq_slcr_write(XSLCR_A9_CPU_RST_CTRL_OFFSET, reg); reg &= ~(XSLCR_A9_CPU_STOP << cpu); zynq_slcr_write(XSLCR_A9_CPU_RST_CTRL_OFFSET, reg); zynq_slcr_lock(); }
void platform_early_init(void) { zynq_mio_init(); zynq_pll_init(); zynq_clk_init(); #if ZYNQ_SDRAM_INIT zynq_ddr_init(); #endif zynq_slcr_unlock(); /* Enable all level shifters */ SLCR_REG(LVL_SHFTR_EN) = 0xF; /* FPGA SW reset (not documented, but mandatory) */ SLCR_REG(FPGA_RST_CTRL) = 0x0; /* zynq manual says this is mandatory for cache init */ *REG32(SLCR_BASE + 0xa1c) = 0x020202; zynq_slcr_lock(); /* early initialize the uart so we can printf */ uart_init_early(); /* initialize the interrupt controller */ arm_gic_init(); /* initialize the timer block */ arm_cortex_a9_timer_init(CPUPRIV_BASE, zynq_get_arm_timer_freq()); /* add the main memory arena */ #if !ZYNQ_CODE_IN_SDRAM && SDRAM_SIZE != 0 /* In the case of running from SRAM, and we are using SDRAM, * there is a discontinuity between the end of SRAM (256K) and the start of SDRAM (1MB), * so intentionally bump the boot-time allocator to start in the base of SDRAM. */ extern uintptr_t boot_alloc_start; extern uintptr_t boot_alloc_end; boot_alloc_start = KERNEL_BASE + MB; boot_alloc_end = KERNEL_BASE + MB; #endif #if SDRAM_SIZE != 0 pmm_add_arena(&sdram_arena); #endif pmm_add_arena(&sram_arena); }
static BT_ERROR devcfg_cleanup(BT_HANDLE h) { g_bInUse = BT_FALSE; while(!(h->pRegs->INT_STS & INT_STS_PCFG_DONE)) { BT_ThreadYield(); } zynq_slcr_unlock(h->pSLCR); zynq_slcr_postload_fpga(h->pSLCR); zynq_slcr_lock(h->pSLCR); bt_iounmap(h->pRegs); bt_iounmap(h->pSLCR); return BT_ERR_NONE; }
int zynq_mio_init(void) { zynq_slcr_unlock(); /* This DDRIOB configuration applies to both zybo and uzed, but it's possible * it may not work for all boards in the future. Just something to keep in mind * with different memory configurations. */ #if ZYNQ_SDRAM_INIT SLCR_REG(GPIOB_CTRL) = GPIOB_CTRL_VREF_EN; SLCR_REG(DDRIOB_ADDR0) = DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_ADDR1) = DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DATA0) = DDRIOB_INP_TYPE(1) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DATA1) = DDRIOB_INP_TYPE(1) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DIFF0) = DDRIOB_INP_TYPE(2) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DIFF1) = DDRIOB_INP_TYPE(2) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_CLOCK) = DDRIOB_OUTPUT_EN(0x3); /* These register fields are not documented in the TRM. These * values represent the defaults generated via the Zynq tools */ SLCR_REG(DDRIOB_DRIVE_SLEW_ADDR) = 0x0018C61CU; SLCR_REG(DDRIOB_DRIVE_SLEW_DATA) = 0x00F9861CU; SLCR_REG(DDRIOB_DRIVE_SLEW_DIFF) = 0x00F9861CU; SLCR_REG(DDRIOB_DRIVE_SLEW_CLOCK) = 0x00F9861CU; SLCR_REG(DDRIOB_DDR_CTRL) = 0x00000E60U; SLCR_REG(DDRIOB_DCI_CTRL) = 0x00000001U; SLCR_REG(DDRIOB_DCI_CTRL) |= 0x00000020U; SLCR_REG(DDRIOB_DCI_CTRL) |= 0x00000823U; #endif for (size_t pin = 0; pin < countof(zynq_mio_cfg); pin++) { if (zynq_mio_cfg[pin] != 0) { SLCR_REG(MIO_PIN_00 + (pin * sizeof(uint32_t))) = zynq_mio_cfg[pin]; } } SLCR_REG(SD0_WP_CD_SEL) = SDIO0_WP_SEL(0x37) | SDIO0_CD_SEL(0x2F); zynq_slcr_lock(); return 0; }
static int cmd_zynq(int argc, const cmd_args *argv) { if (argc < 2) { notenoughargs: printf("not enough arguments\n"); usage: printf("usage: %s <command>\n", argv[0].str); printf("\tslcr lock\n"); printf("\tslcr unlock\n"); printf("\tslcr lockstatus\n"); printf("\tmio\n"); printf("\tclocks\n"); return -1; } if (!strcmp(argv[1].str, "slcr")) { if (argc < 3) goto notenoughargs; bool print_lock_status = false; if (!strcmp(argv[2].str, "lock")) { zynq_slcr_lock(); print_lock_status = true; } else if (!strcmp(argv[2].str, "unlock")) { zynq_slcr_unlock(); print_lock_status = true; } else if (print_lock_status || !strcmp(argv[2].str, "lockstatus")) { printf("%s\n", (SLCR->SLCR_LOCKSTA & 0x1) ? "locked" : "unlocked"); } else { goto usage; } } else if (!strcmp(argv[1].str, "mio")) { printf("zynq mio:\n"); for (size_t i = 0; i < ZYNQ_MIO_CNT; i++) { printf("\t%02u: 0x%08x", i, *REG32((uintptr_t)&SLCR->MIO_PIN_00 + (i * 4))); if (i % 4 == 3 || i == 53) { putchar('\n'); } } } else if (!strcmp(argv[1].str, "clocks")) { zynq_dump_clocks(); } else { goto usage; } return 0; }
void zynq_clk_init(void) { zynq_slcr_unlock(); SLCR_REG(DCI_CLK_CTRL) = zynq_clk_cfg.dci_clk; SLCR_REG(GEM0_CLK_CTRL) = zynq_clk_cfg.gem0_clk; SLCR_REG(GEM0_RCLK_CTRL) = zynq_clk_cfg.gem0_rclk; SLCR_REG(LQSPI_CLK_CTRL) = zynq_clk_cfg.lqspi_clk; SLCR_REG(SDIO_CLK_CTRL) = zynq_clk_cfg.sdio_clk; SLCR_REG(UART_CLK_CTRL) = zynq_clk_cfg.uart_clk; SLCR_REG(PCAP_CLK_CTRL) = zynq_clk_cfg.pcap_clk; SLCR_REG(FPGA0_CLK_CTRL) = zynq_clk_cfg.fpga0_clk; SLCR_REG(FPGA1_CLK_CTRL) = zynq_clk_cfg.fpga1_clk; SLCR_REG(FPGA2_CLK_CTRL) = zynq_clk_cfg.fpga2_clk; SLCR_REG(FPGA3_CLK_CTRL) = zynq_clk_cfg.fpga3_clk; SLCR_REG(APER_CLK_CTRL) = zynq_clk_cfg.aper_clk; SLCR_REG(CLK_621_TRUE) = zynq_clk_cfg.clk_621_true; zynq_slcr_lock(); }
/* Reset the entire system */ void zynq_slcr_cpu_reset(void) { /* * Unlock the SLCR then reset the system. * Note that this seems to require raw i/o * functions or there's a lockup? */ zynq_slcr_unlock(); /* * Clear 0x0F000000 bits of reboot status register to workaround * the FSBL not loading the bitstream after soft-reboot * This is a temporary solution until we know more. */ clrbits_le32(&slcr_base->reboot_status, 0xF000000); writel(1, &slcr_base->pss_rst_ctrl); }
void zynq_slcr_devcfg_disable(void) { u32 reg_val; zynq_slcr_unlock(); /* Disable AXI interface by asserting FPGA resets */ writel(0xF, &slcr_base->fpga_rst_ctrl); /* Disable Level shifters before setting PS-PL */ reg_val = readl(&slcr_base->lvl_shftr_en); reg_val &= ~0xF; writel(reg_val, &slcr_base->lvl_shftr_en); /* Set Level Shifters DT618760 */ writel(0xA, &slcr_base->lvl_shftr_en); zynq_slcr_lock(); }
status_t zynq_set_clock(enum zynq_periph periph, bool enable, enum zynq_clock_source source, uint32_t divisor, uint32_t divisor2) { DEBUG_ASSERT(periph < _PERIPH_MAX); DEBUG_ASSERT(!enable || (divisor > 0 && divisor <= 0x3f)); DEBUG_ASSERT(source < 4); // get the clock control register base addr_t clk_reg = periph_clk_ctrl_reg(periph); DEBUG_ASSERT(clk_reg != 0); int enable_bitpos = periph_clk_ctrl_enable_bitpos(periph); zynq_slcr_unlock(); // if we're enabling if (enable) { uint32_t ctrl = *REG32(clk_reg); // set the divisor, divisor2 (if applicable), source, and enable ctrl = (ctrl & ~(0x3f << 20)) | (divisor2 << 20); ctrl = (ctrl & ~(0x3f << 8)) | (divisor << 8); ctrl = (ctrl & ~(0x3 << 4)) | (source << 4); if (enable_bitpos >= 0) ctrl |= (1 << enable_bitpos); *REG32(clk_reg) = ctrl; } else { if (enable_bitpos >= 0) { // disabling uint32_t ctrl = *REG32(clk_reg); ctrl &= ~(1 << enable_bitpos); *REG32(clk_reg) = ctrl; } } zynq_slcr_lock(); return NO_ERROR; }
/* For each PLL we need to configure the cp / res / lock_cnt and then place the PLL in bypass * before doing a reset to switch to the new values. Then bypass is removed to switch back to using * the PLL once its locked. */ int zynq_pll_init(void) { const zynq_pll_cfg_tree_t *cfg = &zynq_pll_cfg; zynq_slcr_unlock(); SLCR_REG(ARM_PLL_CFG) = PLL_CFG_LOCK_CNT(cfg->arm.lock_cnt) | PLL_CFG_PLL_CP(cfg->arm.cp) | PLL_CFG_PLL_RES(cfg->arm.res); SLCR_REG(ARM_PLL_CTRL) = PLL_FDIV(cfg->arm.fdiv) | PLL_BYPASS_FORCE | PLL_RESET; SLCR_REG(ARM_PLL_CTRL) &= ~PLL_RESET; if (reg_poll((uintptr_t)&SLCR->PLL_STATUS, PLL_STATUS_ARM_PLL_LOCK) == -1) { return -1; } SLCR_REG(ARM_PLL_CTRL) &= ~PLL_BYPASS_FORCE; SLCR_REG(ARM_CLK_CTRL) = zynq_clk_cfg.arm_clk; #if ZYNQ_SDRAM_INIT SLCR_REG(DDR_PLL_CFG) = PLL_CFG_LOCK_CNT(cfg->ddr.lock_cnt) | PLL_CFG_PLL_CP(cfg->ddr.cp) | PLL_CFG_PLL_RES(cfg->ddr.res); SLCR_REG(DDR_PLL_CTRL) = PLL_FDIV(cfg->ddr.fdiv) | PLL_BYPASS_FORCE | PLL_RESET; SLCR_REG(DDR_PLL_CTRL) &= ~PLL_RESET; if (reg_poll((uintptr_t)&SLCR->PLL_STATUS, PLL_STATUS_DDR_PLL_LOCK) == -1) { return -1; } SLCR_REG(DDR_PLL_CTRL) &= ~PLL_BYPASS_FORCE; SLCR_REG(DDR_CLK_CTRL) = zynq_clk_cfg.ddr_clk; #endif SLCR_REG(IO_PLL_CFG) = PLL_CFG_LOCK_CNT(cfg->io.lock_cnt) | PLL_CFG_PLL_CP(cfg->io.cp) | PLL_CFG_PLL_RES(cfg->io.res); SLCR_REG(IO_PLL_CTRL) = PLL_FDIV(cfg->io.fdiv) | PLL_BYPASS_FORCE | PLL_RESET; SLCR_REG(IO_PLL_CTRL) &= ~PLL_RESET; if (reg_poll((uintptr_t)&SLCR->PLL_STATUS, PLL_STATUS_IO_PLL_LOCK) == -1) { return -1; } SLCR_REG(IO_PLL_CTRL) &= ~PLL_BYPASS_FORCE; zynq_slcr_lock(); return 0; }
/** * zynq_slcr_system_reset - Reset the entire system. */ void zynq_slcr_system_reset(void) { u32 reboot; /* * Unlock the SLCR then reset the system. * Note that this seems to require raw i/o * functions or there's a lockup? */ zynq_slcr_unlock(); /* * Clear 0x0F000000 bits of reboot status register to workaround * the FSBL not loading the bitstream after soft-reboot * This is a temporary solution until we know more. */ zynq_slcr_read(&reboot, SLCR_REBOOT_STATUS_OFFSET); zynq_slcr_write(reboot & 0xF0FFFFFF, SLCR_REBOOT_STATUS_OFFSET); zynq_slcr_write(1, SLCR_PS_RST_CTRL_OFFSET); }
void zynq_ddr_init(void) { zynq_slcr_unlock(); /* Write addresss / value pairs from target table */ for (size_t i = 0; i < zynq_ddr_cfg_cnt; i += 2) { *REG32(zynq_ddr_cfg[i]) = zynq_ddr_cfg[i+1]; } /* Wait for DCI done */ reg_poll((uintptr_t)&SLCR->DDRIOB_DCI_STATUS, 0x2000); /* Bring ddr out of reset and wait until self refresh */ *REG32(DDRC_CTRL) |= DDRC_CTRL_OUT_OF_RESET; reg_poll(DDRC_MODE_STATUS, DDRC_STS_SELF_REFRESH); /* Switch timer to 64k */ *REG32(0XF8007000) = *REG32(0xF8007000) & ~0x20000000U; zynq_slcr_lock(); }
void zynq_ddr_init(void) { zynq_slcr_unlock(); /* Write addresss / value pairs from target table */ for (size_t i = 0; i < zynq_ddr_cfg_cnt; i += 2) { *REG32(zynq_ddr_cfg[i]) = zynq_ddr_cfg[i+1]; } /* Wait for DCI done */ reg_poll((uintptr_t)&SLCR->DDRIOB_DCI_STATUS, 0x2000); /* Bring ddr out of reset and wait until self refresh */ *REG32(0XF8006000) = 0x00000081U; reg_poll(0xf8006054, 0x00000007); /* Switch timer to 64k */ *REG32(0XF8007000) = *REG32(0xF8007000) & ~0x20000000U; zynq_slcr_lock(); }
/* zynq specific halt */ void platform_halt(platform_halt_action suggested_action, platform_halt_reason reason) { switch (suggested_action) { default: case HALT_ACTION_SHUTDOWN: case HALT_ACTION_HALT: printf("HALT: spinning forever... (reason = %d)\n", reason); enter_critical_section(); for(;;) arch_idle(); break; case HALT_ACTION_REBOOT: printf("REBOOT\n"); enter_critical_section(); for (;;) { zynq_slcr_unlock(); SLCR->PSS_RST_CTRL = 1; } break; } }
/** * zynq_early_slcr_init - Early slcr init function * * Return: 0 on success, negative errno otherwise. * * Called very early during boot from platform code to unlock SLCR. */ int __init zynq_early_slcr_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "xlnx,zynq-slcr"); if (!np) { pr_err("%s: no slcr node found\n", __func__); BUG(); } zynq_slcr_base = of_iomap(np, 0); if (!zynq_slcr_base) { pr_err("%s: Unable to map I/O memory\n", __func__); BUG(); } np->data = (__force void *)zynq_slcr_base; zynq_slcr_regmap = syscon_regmap_lookup_by_compatible("xlnx,zynq-slcr"); if (IS_ERR(zynq_slcr_regmap)) { pr_err("%s: failed to find zynq-slcr\n", __func__); return -ENODEV; } /* unlock the SLCR so that registers can be changed */ zynq_slcr_unlock(); /* See AR#54190 design advisory */ regmap_update_bits(zynq_slcr_regmap, SLCR_L2C_RAM, 0x70707, 0x20202); register_restart_handler(&zynq_slcr_restart_nb); pr_info("%s mapped to %p\n", np->name, zynq_slcr_base); of_node_put(np); return 0; }
int zynq_clk_init(void) { zynq_slcr_unlock(); SLCR_REG(DCI_CLK_CTRL) = CLK_CTRL_CLKACT1 | CLK_CTRL_DIVISOR1(52) | CLK_CTRL_DIVISOR2(2); SLCR_REG(GEM0_RCLK_CTRL) = CLK_CTRL_CLKACT1; SLCR_REG(GEM0_CLK_CTRL) = CLK_CTRL_CLKACT1 | CLK_CTRL_DIVISOR1(8) | CLK_CTRL_DIVISOR2(1); SLCR_REG(LQSPI_CLK_CTRL) = CLK_CTRL_CLKACT1 | CLK_CTRL_DIVISOR1(5); SLCR_REG(SDIO_CLK_CTRL) = CLK_CTRL_CLKACT1 | CLK_CTRL_DIVISOR1(20); SLCR_REG(UART_CLK_CTRL) = CLK_CTRL_CLKACT2 | CLK_CTRL_DIVISOR1(20); SLCR_REG(PCAP_CLK_CTRL) = CLK_CTRL_CLKACT1 | CLK_CTRL_DIVISOR1(5); SLCR_REG(FPGA0_CLK_CTRL) = CLK_CTRL_DIVISOR1(10) | CLK_CTRL_DIVISOR2(1); SLCR_REG(FPGA1_CLK_CTRL) = CLK_CTRL_SRCSEL(3) | CLK_CTRL_DIVISOR1(6) | CLK_CTRL_DIVISOR2(1); SLCR_REG(FPGA2_CLK_CTRL) = CLK_CTRL_SRCSEL(2) | CLK_CTRL_DIVISOR1(53) | CLK_CTRL_DIVISOR2(2); SLCR_REG(FPGA3_CLK_CTRL) = CLK_CTRL_DIVISOR2(1); SLCR_REG(CLK_621_TRUE) = CLK_621_ENABLE; SLCR_REG(APER_CLK_CTRL) = DMA_CPU_CLK_EN | USB0_CPU_CLK_EN | USB1_CPU_CLK_EN | GEM0_CPU_CLK_EN | SDI0_CPU_CLK_EN | I2C0_CPU_CLK_EN | I2C1_CPU_CLK_EN | UART1_CPU_CLK_EN | GPIO_CPU_CLK_EN | LQSPI_CPU_CLK_EN | SMC_CPU_CLK_EN; zynq_slcr_lock(); return 0; }
/* Setup clk for network */ void zynq_slcr_gem_clk_setup(u32 gem_id, u32 rclk, u32 clk) { zynq_slcr_unlock(); if (gem_id > 1) { printf("Non existing GEM id %d\n", gem_id); goto out; } if (gem_id) { /* Set divisors for appropriate frequency in GEM_CLK_CTRL */ writel(clk, &slcr_base->gem1_clk_ctrl); /* Configure GEM_RCLK_CTRL */ writel(rclk, &slcr_base->gem1_rclk_ctrl); } else { /* Set divisors for appropriate frequency in GEM_CLK_CTRL */ writel(clk, &slcr_base->gem0_clk_ctrl); /* Configure GEM_RCLK_CTRL */ writel(rclk, &slcr_base->gem0_rclk_ctrl); } udelay(100000); out: zynq_slcr_lock(); }
int zynq_mio_init(void) { zynq_slcr_unlock(); SLCR_REG(GPIOB_CTRL) = GPIOB_CTRL_VREF_EN; SLCR_REG(DDRIOB_ADDR0) = DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_ADDR1) = DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DATA0) = DDRIOB_INP_TYPE(1) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DATA1) = DDRIOB_INP_TYPE(1) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DIFF0) = DDRIOB_INP_TYPE(2) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_DIFF1) = DDRIOB_INP_TYPE(2) | DDRIOB_TERM_EN | DDRIOB_DCI_TYPE(0x3) | DDRIOB_OUTPUT_EN(0x3); SLCR_REG(DDRIOB_CLOCK) = DDRIOB_OUTPUT_EN(0x3); /* These register fields are not documented in the TRM. These * values represent the defaults generated via the Zynq tools */ SLCR_REG(DDRIOB_DRIVE_SLEW_ADDR) = 0x0018C61CU; SLCR_REG(DDRIOB_DRIVE_SLEW_DATA) = 0x00F9861CU; SLCR_REG(DDRIOB_DRIVE_SLEW_DIFF) = 0x00F9861CU; SLCR_REG(DDRIOB_DRIVE_SLEW_CLOCK) = 0x00F9861CU; SLCR_REG(DDRIOB_DDR_CTRL) = 0x00000E60U; SLCR_REG(DDRIOB_DCI_CTRL) = 0x00000001U; SLCR_REG(DDRIOB_DCI_CTRL) |= 0x00000020U; SLCR_REG(DDRIOB_DCI_CTRL) |= 0x00000823U; /* mio pin config */ SLCR_REG(MIO_PIN_01) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_02) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_03) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_04) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_05) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_06) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_08) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS33; SLCR_REG(MIO_PIN_16) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL | MIO_DISABLE_RCVR; SLCR_REG(MIO_PIN_17) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL | MIO_DISABLE_RCVR; SLCR_REG(MIO_PIN_18) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL | MIO_DISABLE_RCVR; SLCR_REG(MIO_PIN_19) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL | MIO_DISABLE_RCVR; SLCR_REG(MIO_PIN_20) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL | MIO_DISABLE_RCVR; SLCR_REG(MIO_PIN_21) = MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL | MIO_DISABLE_RCVR; SLCR_REG(MIO_PIN_22) = MIO_TRI_ENABLE | MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL; SLCR_REG(MIO_PIN_23) = MIO_TRI_ENABLE | MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL; SLCR_REG(MIO_PIN_24) = MIO_TRI_ENABLE | MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL; SLCR_REG(MIO_PIN_25) = MIO_TRI_ENABLE | MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL; SLCR_REG(MIO_PIN_26) = MIO_TRI_ENABLE | MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL; SLCR_REG(MIO_PIN_27) = MIO_TRI_ENABLE | MIO_L0_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_HSTL; SLCR_REG(MIO_PIN_28) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_29) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18 | MIO_TRI_ENABLE; SLCR_REG(MIO_PIN_30) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_31) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18 | MIO_TRI_ENABLE; SLCR_REG(MIO_PIN_32) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_33) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_34) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_35) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_36) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18 | MIO_TRI_ENABLE; SLCR_REG(MIO_PIN_37) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_38) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_39) = MIO_L1_SEL | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_40) = MIO_L3_SEL(0x4) | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_41) = MIO_L3_SEL(0x4) | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_42) = MIO_L3_SEL(0x4) | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_43) = MIO_L3_SEL(0x4) | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_44) = MIO_L3_SEL(0x4) | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_45) = MIO_L3_SEL(0x4) | MIO_SPEED_FAST | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_47) = MIO_TRI_ENABLE | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_48) = MIO_L3_SEL(0x7) | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_49) = MIO_TRI_ENABLE | MIO_L3_SEL(0x7) | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_52) = MIO_L3_SEL(0x4) | MIO_IO_TYPE_LVCMOS18; SLCR_REG(MIO_PIN_53) = MIO_L3_SEL(0x4) | MIO_IO_TYPE_LVCMOS18; SLCR_REG(SD0_WP_CD_SEL) = SDIO0_WP_SEL(0x37) | SDIO0_CD_SEL(0x2F); zynq_slcr_lock(); return 0; }
void platform_early_init(void) { #if 0 ps7_init(); #else /* Unlock the registers and leave them that way */ zynq_slcr_unlock(); zynq_mio_init(); zynq_pll_init(); zynq_clk_init(); #if ZYNQ_SDRAM_INIT zynq_ddr_init(); #endif #endif /* Enable all level shifters */ SLCR_REG(LVL_SHFTR_EN) = 0xF; /* FPGA SW reset (not documented, but mandatory) */ SLCR_REG(FPGA_RST_CTRL) = 0x0; /* zynq manual says this is mandatory for cache init */ *REG32(SLCR_BASE + 0xa1c) = 0x020202; /* early initialize the uart so we can printf */ uart_init_early(); /* initialize the interrupt controller */ arm_gic_init(); zynq_gpio_init(); /* initialize the timer block */ arm_cortex_a9_timer_init(CPUPRIV_BASE, zynq_get_arm_timer_freq()); /* bump the 2nd cpu into our code space and remap the top SRAM block */ if (KERNEL_LOAD_OFFSET != 0) { /* construct a trampoline to get the 2nd cpu up to the trap routine */ /* figure out the offset of the trampoline routine in physical space from address 0 */ extern void platform_reset(void); addr_t tramp = (addr_t)&platform_reset; tramp -= KERNEL_BASE; tramp += MEMBASE; /* stuff in a ldr pc, [nextaddrress], and a target address */ uint32_t *ptr = (uint32_t *)KERNEL_BASE; ptr[0] = 0xe51ff004; // ldr pc, [pc, #-4] ptr[1] = tramp; arch_clean_invalidate_cache_range((addr_t)ptr, 8); } /* reset the 2nd cpu, letting it go through its reset vector (at 0x0 physical) */ SLCR_REG(A9_CPU_RST_CTRL) |= (1<<1); // reset cpu 1 spin(10); SLCR_REG(A9_CPU_RST_CTRL) &= ~(1<<1); // unreset cpu 1 /* wait for the 2nd cpu to reset, go through the usual reset vector, and get trapped by our code */ /* see platform/zynq/reset.S */ extern volatile int __cpu_trapped; uint count = 100000; while (--count) { arch_clean_invalidate_cache_range((addr_t)&__cpu_trapped, sizeof(__cpu_trapped)); if (__cpu_trapped != 0) break; } if (count == 0) { panic("ZYNQ: failed to trap 2nd cpu\n"); } /* bounce the 4th sram region down to lower address */ SLCR_REG(OCM_CFG) &= ~0xf; /* all banks at low address */ /* add the main memory arena */ #if !ZYNQ_CODE_IN_SDRAM && SDRAM_SIZE != 0 /* In the case of running from SRAM, and we are using SDRAM, * there is a discontinuity between the end of SRAM (256K) and the start of SDRAM (1MB), * so intentionally bump the boot-time allocator to start in the base of SDRAM. */ extern uintptr_t boot_alloc_start; extern uintptr_t boot_alloc_end; boot_alloc_start = KERNEL_BASE + MB; boot_alloc_end = KERNEL_BASE + MB; #endif #if SDRAM_SIZE != 0 pmm_add_arena(&sdram_arena); #endif pmm_add_arena(&sram_arena); }