static void __init clk_misc_init(void) { u32 val; /* Gate off cpu clock in WFI for power saving */ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU); /* Clear BYPASS for SAIF */ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ); /* SAIF has to use frac div for functional operation */ val = readl_relaxed(SAIF); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF); /* * Source ssp clock from ref_io than ref_xtal, * as ref_xtal only provides 24 MHz as maximum. */ __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ); /* * 480 MHz seems too high to be ssp clock source directly, * so set frac to get a 288 MHz ref_io. */ __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC); __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC); }
const u32 *mxs_get_ocotp(void) { void __iomem *ocotp_base = MXS_IO_ADDRESS(MXS_OCOTP_BASE_ADDR); int timeout = 0x400; size_t i; static int once = 0; if (once) return ocotp_words; mutex_lock(&ocotp_mutex); /* * clk_enable(hbus_clk) for ocotp can be skipped * as it must be on when system is running. */ /* try to clear ERROR bit */ __mxs_clrl(BM_OCOTP_CTRL_ERROR, ocotp_base); /* check both BUSY and ERROR cleared */ while ((__raw_readl(ocotp_base) & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)) && --timeout) cpu_relax(); if (unlikely(!timeout)) goto error_unlock; /* open OCOTP banks for read */ __mxs_setl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base); /* approximately wait 32 hclk cycles */ udelay(1); /* poll BUSY bit becoming cleared */ timeout = 0x400; while ((__raw_readl(ocotp_base) & BM_OCOTP_CTRL_BUSY) && --timeout) cpu_relax(); if (unlikely(!timeout)) goto error_unlock; for (i = 0; i < OCOTP_WORD_COUNT; i++) ocotp_words[i] = __raw_readl(ocotp_base + OCOTP_WORD_OFFSET + i * 0x10); /* close banks for power saving */ __mxs_clrl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base); once = 1; mutex_unlock(&ocotp_mutex); return ocotp_words; error_unlock: mutex_unlock(&ocotp_mutex); pr_err("%s: timeout in reading OCOTP\n", __func__); return NULL; }
int mxs_iomux_setup_pad(iomux_cfg_t pad) { u32 reg, ofs, bp, bm; void __iomem *iomux_base = MXS_IO_ADDRESS(MXS_PINCTRL_BASE_ADDR); ofs = 0x100; ofs += PAD_BANK(pad) * 0x20 + PAD_PIN(pad) / 16 * 0x10; bp = PAD_PIN(pad) % 16 * 2; bm = 0x3 << bp; reg = __raw_readl(iomux_base + ofs); reg &= ~bm; reg |= PAD_MUXSEL(pad) << bp; __raw_writel(reg, iomux_base + ofs); ofs = cpu_is_mx23() ? 0x200 : 0x300; ofs += PAD_BANK(pad) * 0x40 + PAD_PIN(pad) / 8 * 0x10; if (PAD_MA_VALID(pad)) { bp = PAD_PIN(pad) % 8 * 4; bm = 0x3 << bp; reg = __raw_readl(iomux_base + ofs); reg &= ~bm; reg |= PAD_MA(pad) << bp; __raw_writel(reg, iomux_base + ofs); } if (PAD_VOL_VALID(pad)) { bp = PAD_PIN(pad) % 8 * 4 + 2; if (PAD_VOL(pad)) __mxs_setl(1 << bp, iomux_base + ofs); else __mxs_clrl(1 << bp, iomux_base + ofs); } if (PAD_PULL_VALID(pad)) { ofs = cpu_is_mx23() ? 0x400 : 0x600; ofs += PAD_BANK(pad) * 0x10; bp = PAD_PIN(pad); if (PAD_PULL(pad)) __mxs_setl(1 << bp, iomux_base + ofs); else __mxs_clrl(1 << bp, iomux_base + ofs); } return 0; }
static int mxs_gpio_set_irq_type(u32 irq, u32 type) { u32 gpio = irq_to_gpio(irq); u32 pin_mask = 1 << (gpio & 31); struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32]; void __iomem *pin_addr; int edge; switch (type) { case IRQ_TYPE_EDGE_RISING: edge = GPIO_INT_RISE_EDGE; break; case IRQ_TYPE_EDGE_FALLING: edge = GPIO_INT_FALL_EDGE; break; case IRQ_TYPE_LEVEL_LOW: edge = GPIO_INT_LOW_LEV; break; case IRQ_TYPE_LEVEL_HIGH: edge = GPIO_INT_HIGH_LEV; break; default: return -EINVAL; } /* set level or edge */ pin_addr = port->base + PINCTRL_IRQLEV(port->id); if (edge & GPIO_INT_LEV_MASK) __mxs_setl(pin_mask, pin_addr); else __mxs_clrl(pin_mask, pin_addr); /* set polarity */ pin_addr = port->base + PINCTRL_IRQPOL(port->id); if (edge & GPIO_INT_POL_MASK) __mxs_setl(pin_mask, pin_addr); else __mxs_clrl(pin_mask, pin_addr); clear_gpio_irqstatus(port, gpio & 0x1f); return 0; }
static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index, int enable) { if (enable) { __mxs_setl(1 << index, port->base + PINCTRL_IRQEN(port->id)); __mxs_setl(1 << index, port->base + PINCTRL_PIN2IRQ(port->id)); } else { __mxs_clrl(1 << index, port->base + PINCTRL_IRQEN(port->id)); } }
/* * HW_SAIF_CLKMUX_SEL: * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1 * clock pins selected for SAIF1 input clocks. * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and * SAIF0 clock inputs selected for SAIF1 input clocks. * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input * clocks. * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input * clocks. */ int mxs_saif_clkmux_select(unsigned int clkmux) { if (clkmux > 0x3) return -EINVAL; __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL); __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL); return 0; }
static void __init clk_misc_init(void) { u32 val; /* Gate off cpu clock in WFI for power saving */ __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU); /* 0 is a bad default value for a divider */ __mxs_setl(1 << BP_ENET_DIV_TIME, ENET); /* Clear BYPASS for SAIF */ __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ); /* SAIF has to use frac div for functional operation */ val = readl_relaxed(SAIF0); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF0); val = readl_relaxed(SAIF1); val |= 1 << BP_SAIF_DIV_FRAC_EN; writel_relaxed(val, SAIF1); /* Extra fec clock setting */ val = readl_relaxed(ENET); val &= ~(1 << BP_ENET_SLEEP); writel_relaxed(val, ENET); /* * Source ssp clock from ref_io than ref_xtal, * as ref_xtal only provides 24 MHz as maximum. */ __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ); /* * 480 MHz seems too high to be ssp clock source directly, * so set frac0 to get a 288 MHz ref_io0. */ val = readl_relaxed(FRAC0); val &= ~(0x3f << BP_FRAC0_IO0FRAC); val |= 30 << BP_FRAC0_IO0FRAC; writel_relaxed(val, FRAC0); }
static void mxs_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct mxs_gpio_port *port = container_of(chip, struct mxs_gpio_port, chip); void __iomem *pin_addr = port->base + PINCTRL_DOUT(port->id); if (value) __mxs_setl(1 << offset, pin_addr); else __mxs_clrl(1 << offset, pin_addr); }
static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset, int dir) { struct mxs_gpio_port *port = container_of(chip, struct mxs_gpio_port, chip); void __iomem *pin_addr = port->base + PINCTRL_DOE(port->id); if (dir) __mxs_setl(1 << offset, pin_addr); else __mxs_clrl(1 << offset, pin_addr); }
static int clear_poll_bit(void __iomem *addr, u32 mask) { int timeout = 0x400; __mxs_clrl(mask, addr); udelay(1); while ((readl(addr) & mask) && --timeout) ; return !timeout; }
int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt) { int i, j; /* save for local usage */ mxs_gpio_ports = port; gpio_table_size = cnt; pr_info("MXS GPIO hardware\n"); for (i = 0; i < cnt; i++) { /* disable the interrupt and clear the status */ __raw_writel(0, port[i].base + PINCTRL_PIN2IRQ(i)); __raw_writel(0, port[i].base + PINCTRL_IRQEN(i)); /* clear address has to be used to clear IRQSTAT bits */ __mxs_clrl(~0U, port[i].base + PINCTRL_IRQSTAT(i)); for (j = port[i].virtual_irq_start; j < port[i].virtual_irq_start + 32; j++) { set_irq_chip(j, &gpio_irq_chip); set_irq_handler(j, handle_level_irq); set_irq_flags(j, IRQF_VALID); } /* setup one handler for each entry */ set_irq_chained_handler(port[i].irq, mxs_gpio_irq_handler); set_irq_data(port[i].irq, &port[i]); /* register gpio chip */ port[i].chip.direction_input = mxs_gpio_direction_input; port[i].chip.direction_output = mxs_gpio_direction_output; port[i].chip.get = mxs_gpio_get; port[i].chip.set = mxs_gpio_set; port[i].chip.to_irq = mxs_gpio_to_irq; port[i].chip.base = i * 32; port[i].chip.ngpio = 32; /* its a serious configuration bug when it fails */ BUG_ON(gpiochip_add(&port[i].chip) < 0); } return 0; }
/* * Clear the bit and poll it cleared. This is usually called with * a reset address and mask being either SFTRST(bit 31) or CLKGATE * (bit 30). */ static int clear_poll_bit(void __iomem *addr, u32 mask) { int timeout = 0x400; /* clear the bit */ __mxs_clrl(mask, addr); /* * SFTRST needs 3 GPMI clocks to settle, the reference manual * recommends to wait 1us. */ udelay(1); /* poll the bit becoming clear */ while ((__raw_readl(addr) & mask) && --timeout) /* nothing */; return !timeout; }
static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) { int ret; int timeout = 0x400; ret = clear_poll_bit(reset_addr, MODULE_SFTRST); if (unlikely(ret)) goto error; __mxs_clrl(MODULE_CLKGATE, reset_addr); if (!just_enable) { __mxs_setl(MODULE_SFTRST, reset_addr); udelay(1); while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) ; if (unlikely(!timeout)) goto error; } ret = clear_poll_bit(reset_addr, MODULE_SFTRST); if (unlikely(ret)) goto error; ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); if (unlikely(ret)) goto error; return 0; error: pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); return -ETIMEDOUT; }
/* * The current mxs_reset_block() will do two things: * [1] enable the module. * [2] reset the module. * * In most of the cases, it's ok. * But in MX23, there is a hardware bug in the BCH block (see erratum #2847). * If you try to soft reset the BCH block, it becomes unusable until * the next hard reset. This case occurs in the NAND boot mode. When the board * boots by NAND, the ROM of the chip will initialize the BCH blocks itself. * So If the driver tries to reset the BCH again, the BCH will not work anymore. * You will see a DMA timeout in this case. The bug has been fixed * in the following chips, such as MX28. * * To avoid this bug, just add a new parameter `just_enable` for * the mxs_reset_block(), and rewrite it here. */ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable) { int ret; int timeout = 0x400; /* clear and poll SFTRST */ ret = clear_poll_bit(reset_addr, MODULE_SFTRST); if (unlikely(ret)) goto error; /* clear CLKGATE */ __mxs_clrl(MODULE_CLKGATE, reset_addr); if (!just_enable) { /* set SFTRST to reset the block */ __mxs_setl(MODULE_SFTRST, reset_addr); udelay(1); /* poll CLKGATE becoming set */ while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout) /* nothing */; if (unlikely(!timeout)) goto error; } /* clear and poll SFTRST */ ret = clear_poll_bit(reset_addr, MODULE_SFTRST); if (unlikely(ret)) goto error; /* clear and poll CLKGATE */ ret = clear_poll_bit(reset_addr, MODULE_CLKGATE); if (unlikely(ret)) goto error; return 0; error: pr_err("%s(%p): module reset timeout\n", __func__, reset_addr); return -ETIMEDOUT; }
static void timrot_irq_acknowledge(void) { __mxs_clrl(BM_TIMROT_TIMCTRLn_IRQ, mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); }
static inline void timrot_irq_disable(void) { __mxs_clrl(BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); }
static void clear_gpio_irqstatus(struct mxs_gpio_port *port, u32 index) { __mxs_clrl(1 << index, port->base + PINCTRL_IRQSTAT(port->id)); }