/* * Initializes the NFC hardware. */ int mxs_nand_init(struct mxs_nand_info *info) { struct mxs_gpmi_regs *gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; int i = 0, j, ret = 0; info->desc = malloc(sizeof(struct mxs_dma_desc *) * MXS_NAND_DMA_DESCRIPTOR_COUNT); if (!info->desc) { ret = -ENOMEM; goto err1; } /* Allocate the DMA descriptors. */ for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { info->desc[i] = mxs_dma_desc_alloc(); if (!info->desc[i]) { ret = -ENOMEM; goto err2; } } /* Init the DMA controller. */ mxs_dma_init(); for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { ret = mxs_dma_init_channel(j); if (ret) goto err3; } /* Reset the GPMI block. */ mxs_reset_block(&gpmi_regs->hw_gpmi_ctrl0_reg); mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); /* * Choose NAND mode, set IRQ polarity, disable write protection and * select BCH ECC. */ clrsetbits_le32(&gpmi_regs->hw_gpmi_ctrl1, GPMI_CTRL1_GPMI_MODE, GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | GPMI_CTRL1_BCH_MODE); return 0; err3: for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) mxs_dma_release(j); err2: for (--i; i >= 0; i--) mxs_dma_desc_free(info->desc[i]); free(info->desc); err1: if (ret == -ENOMEM) printf("MXS NAND: Unable to allocate DMA descriptors\n"); return ret; }
void __init avic_init_irq(void __iomem *base, int nr_irqs) { int i; g_icoll_base = base; mxs_reset_block(base + HW_ICOLL_CTRL, 0); for (i = 0; i < nr_irqs; i++) { __raw_writel(0, g_icoll_base + HW_ICOLL_INTERRUPTn(i)); set_irq_chip(i, &icoll_chip); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID | IRQF_PROBE); } __raw_writel(BF_ICOLL_LEVELACK_IRQLEVELACK (BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL0), g_icoll_base + HW_ICOLL_LEVELACK); __raw_writel(BF_ICOLL_LEVELACK_IRQLEVELACK (BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL1), g_icoll_base + HW_ICOLL_LEVELACK); __raw_writel(BF_ICOLL_LEVELACK_IRQLEVELACK (BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL2), g_icoll_base + HW_ICOLL_LEVELACK); __raw_writel(BF_ICOLL_LEVELACK_IRQLEVELACK (BV_ICOLL_LEVELACK_IRQLEVELACK__LEVEL3), g_icoll_base + HW_ICOLL_LEVELACK); __raw_writel(0, g_icoll_base + HW_ICOLL_VECTOR); /* Barrier */ (void)__raw_readl(g_icoll_base + HW_ICOLL_STAT); }
static int mxsmmc_init(struct mmc *mmc) { struct mxsmmc_priv *priv = (struct mxsmmc_priv *)mmc->priv; struct mxs_ssp_regs *ssp_regs = priv->regs; /* Reset SSP */ mxs_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); /* Reconfigure the SSP block for MMC operation */ writel(SSP_CTRL1_SSP_MODE_SD_MMC | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS | SSP_CTRL1_DMA_ENABLE | SSP_CTRL1_POLARITY | SSP_CTRL1_RECV_TIMEOUT_IRQ_EN | SSP_CTRL1_DATA_CRC_IRQ_EN | SSP_CTRL1_DATA_TIMEOUT_IRQ_EN | SSP_CTRL1_RESP_TIMEOUT_IRQ_EN | SSP_CTRL1_RESP_ERR_IRQ_EN, &ssp_regs->hw_ssp_ctrl1_set); /* Set initial bit clock 400 KHz */ mxs_set_ssp_busclock(priv->id, 400); /* Send initial 74 clock cycles (185 us @ 400 KHz)*/ writel(SSP_CMD0_CONT_CLKING_EN, &ssp_regs->hw_ssp_cmd0_set); udelay(200); writel(SSP_CMD0_CONT_CLKING_EN, &ssp_regs->hw_ssp_cmd0_clr); return 0; }
static void mx23_mem_init(void) { /* * Reset/ungate the EMI block. This is essential, otherwise the system * suffers from memory instability. This thing is mx23 specific and is * no longer present on mx28. */ mxs_reset_block((struct mxs_register_32 *)MXS_EMI_BASE); mx23_mem_setup_vddmem(); /* * Configure the DRAM registers */ /* Clear START and SREFRESH bit from DRAM_CTL8 */ clrbits_le32(MXS_DRAM_BASE + 0x20, (1 << 16) | (1 << 8)); initialize_dram_values(); /* Set START bit in DRAM_CTL8 */ setbits_le32(MXS_DRAM_BASE + 0x20, 1 << 16); clrbits_le32(MXS_DRAM_BASE + 0x40, 1 << 17); early_delay(20000); /* Adjust EMI port priority. */ clrsetbits_le32(0x80020000, 0x1f << 16, 0x2); early_delay(20000); setbits_le32(MXS_DRAM_BASE + 0x40, 1 << 19); setbits_le32(MXS_DRAM_BASE + 0x40, 1 << 11); }
static int stmp3xxx_rtc_resume(struct platform_device *dev) { struct stmp3xxx_rtc_data *rtc_data = platform_get_drvdata(dev); mxs_reset_block(rtc_data->io); writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE, rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR); return 0; }
void __init icoll_init_irq(void) { int i; mxs_reset_block(icoll_base + HW_ICOLL_CTRL); for (i = 0; i < MXS_INTERNAL_IRQS; i++) { irq_set_chip_and_handler(i, &mxs_icoll_chip, handle_level_irq); set_irq_flags(i, IRQF_VALID); } }
void __init mxs_timer_init(int irq) { struct clk *timer_clk; timer_clk = clk_get_sys("timrot", NULL); if (IS_ERR(timer_clk)) { pr_err("%s: failed to get clk\n", __func__); return; } clk_prepare_enable(timer_clk); /* * Initialize timers to a known state */ mxs_reset_block(mxs_timrot_base + HW_TIMROT_ROTCTRL); /* get timrot version */ timrot_major_version = __raw_readl(mxs_timrot_base + (cpu_is_mx23() ? MX23_TIMROT_VERSION_OFFSET : MX28_TIMROT_VERSION_OFFSET)); timrot_major_version >>= BP_TIMROT_MAJOR_VERSION; /* one for clock_event */ __raw_writel((timrot_is_v1() ? BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL) | BM_TIMROT_TIMCTRLn_UPDATE | BM_TIMROT_TIMCTRLn_IRQ_EN, mxs_timrot_base + HW_TIMROT_TIMCTRLn(0)); /* another for clocksource */ __raw_writel((timrot_is_v1() ? BV_TIMROTv1_TIMCTRLn_SELECT__32KHZ_XTAL : BV_TIMROTv2_TIMCTRLn_SELECT__32KHZ_XTAL) | BM_TIMROT_TIMCTRLn_RELOAD, mxs_timrot_base + HW_TIMROT_TIMCTRLn(1)); /* set clocksource timer fixed count to the maximum */ if (timrot_is_v1()) __raw_writel(0xffff, mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)); else __raw_writel(0xffffffff, mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1)); /* init and register the timer to the framework */ mxs_clocksource_init(timer_clk); mxs_clockevent_init(timer_clk); /* Make irqs happen */ setup_irq(irq, &mxs_timer_irq); }
/* * Nominally, the purpose of this function is to look for or create the bad * block table. In fact, since the we call this function at the very end of * the initialization process started by nand_scan(), and we doesn't have a * more formal mechanism, we "hook" this function to continue init process. * * At this point, the physical NAND Flash chips have been identified and * counted, so we know the physical geometry. This enables us to make some * important configuration decisions. * * The return value of this function propogates directly back to this driver's * call to nand_scan(). Anything other than zero will cause this driver to * tear everything down and declare failure. */ static int mxs_nand_scan_bbt(struct mtd_info *mtd) { struct nand_chip *nand = mtd->priv; struct mxs_nand_info *nand_info = nand->priv; struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; uint32_t tmp; /* Configure BCH and set NFC geometry */ mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); /* Configure layout 0 */ tmp = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; writel(tmp, &bch_regs->hw_bch_flash0layout0); tmp = (mtd->writesize + mtd->oobsize) << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; writel(tmp, &bch_regs->hw_bch_flash0layout1); /* Set *all* chip selects to use layout 0 */ writel(0, &bch_regs->hw_bch_layoutselect); /* Enable BCH complete interrupt */ writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); /* Hook some operations at the MTD level. */ if (mtd->_read_oob != mxs_nand_hook_read_oob) { nand_info->hooked_read_oob = mtd->_read_oob; mtd->_read_oob = mxs_nand_hook_read_oob; } if (mtd->_write_oob != mxs_nand_hook_write_oob) { nand_info->hooked_write_oob = mtd->_write_oob; mtd->_write_oob = mxs_nand_hook_write_oob; } if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { nand_info->hooked_block_markbad = mtd->_block_markbad; mtd->_block_markbad = mxs_nand_hook_block_markbad; } /* We use the reference implementation for bad block management. */ return nand_default_bbt(mtd); }
void rtc_reset(void) { struct mxs_rtc_regs *rtc_regs = (struct mxs_rtc_regs *)MXS_RTC_BASE; int ret; /* Set time to 1970-01-01 */ mxs_rtc_set_time(0); /* Reset the RTC block */ ret = mxs_reset_block(&rtc_regs->hw_rtc_ctrl_reg); if (ret) printf("MXS RTC: Block reset timeout\n"); }
void __init icoll_init_irq(void) { int i; /* * Interrupt Collector reset, which initializes the priority * for each irq to level 0. */ mxs_reset_block(icoll_base + HW_ICOLL_CTRL); for (i = 0; i < MXS_INTERNAL_IRQS; i++) { set_irq_chip(i, &mxs_icoll_chip); set_irq_handler(i, handle_level_irq); set_irq_flags(i, IRQF_VALID); } }
static int mxs_nand_gpmi_init(void) { int ret; /* Reset the GPMI block. */ ret = mxs_reset_block(&gpmi_regs->hw_gpmi_ctrl0_reg); if (ret) return ret; /* * Choose NAND mode, set IRQ polarity, disable write protection and * select BCH ECC. */ clrsetbits_le32(&gpmi_regs->hw_gpmi_ctrl1, GPMI_CTRL1_GPMI_MODE, GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | GPMI_CTRL1_BCH_MODE); writel(0x500 << 16, &gpmi_regs->hw_gpmi_timing1); return 0; }
int timer_init(void) { struct mxs_timrot_regs *timrot_regs = (struct mxs_timrot_regs *)MXS_TIMROT_BASE; /* Reset Timers and Rotary Encoder module */ mxs_reset_block(&timrot_regs->hw_timrot_rotctrl_reg); /* Set fixed_count to 0 */ writel(0, &timrot_regs->hw_timrot_fixed_count0); /* Set UPDATE bit and 1Khz frequency */ writel(TIMROT_TIMCTRLn_UPDATE | TIMROT_TIMCTRLn_RELOAD | TIMROT_TIMCTRLn_SELECT_1KHZ_XTAL, &timrot_regs->hw_timrot_timctrl0); #ifndef DEBUG_TIMER_WRAP /* Set fixed_count to maximum value */ writel(TIMER_LOAD_VAL, &timrot_regs->hw_timrot_fixed_count0); #else /* Set fixed_count so that the counter will wrap after 20 seconds */ writel(20 * MXS_INCREMENTER_HZ, &timrot_regs->hw_timrot_fixed_count0); gd->arch.lastinc = TIMER_LOAD_VAL - 20 * MXS_INCREMENTER_HZ; #endif #ifdef DEBUG_TIMER_WRAP /* Make the usec counter roll over 30 seconds after startup */ writel(-30000000, MXS_HW_DIGCTL_MICROSECONDS); #endif writel(TIMROT_TIMCTRLn_UPDATE, &timrot_regs->hw_timrot_timctrl0_clr); #ifdef DEBUG_TIMER_WRAP /* Set fixed_count to maximal value for subsequent loads */ writel(TIMER_LOAD_VAL, &timrot_regs->hw_timrot_fixed_count0); #endif gd->arch.timer_rate_hz = MXS_INCREMENTER_HZ; gd->arch.tbl = TIMER_START; gd->arch.tbu = 0; return 0; }
static int init_bl(struct mxs_platform_bl_data *data) { int ret = 0; pwm_clk = clk_get(NULL, "pwm"); if (IS_ERR(pwm_clk)) { ret = PTR_ERR(pwm_clk); return ret; } clk_enable(pwm_clk); mxs_reset_block(REGS_PWM_BASE, 1); __raw_writel(BF_PWM_ACTIVEn_INACTIVE(400) | BF_PWM_ACTIVEn_ACTIVE(0), REGS_PWM_BASE + HW_PWM_ACTIVEn(4)); __raw_writel(BF_PWM_PERIODn_CDIV(6) | /* divide by 64 */ BF_PWM_PERIODn_INACTIVE_STATE(2) | /* low */ BF_PWM_PERIODn_ACTIVE_STATE(3) | /* high */ BF_PWM_PERIODn_PERIOD(599), REGS_PWM_BASE + HW_PWM_PERIODn(4)); __raw_writel(BM_PWM_CTRL_PWM4_ENABLE, REGS_PWM_BASE + HW_PWM_CTRL_SET); __raw_writel(BF_PINCTRL_DOUT3_DOUT(1 << 30), REGS_PINCTRL_BASE + HW_PINCTRL_DOUT3_SET); return 0; }
static int mxsmmc_init(struct mmc *mmc) { struct mxsmmc_priv *priv = (struct mxsmmc_priv *)mmc->priv; struct mxs_ssp_regs *ssp_regs = priv->regs; /* Reset SSP */ mxs_reset_block(&ssp_regs->hw_ssp_ctrl0_reg); /* 8 bits word length in MMC mode */ clrsetbits_le32(&ssp_regs->hw_ssp_ctrl1, SSP_CTRL1_SSP_MODE_MASK | SSP_CTRL1_WORD_LENGTH_MASK | SSP_CTRL1_DMA_ENABLE, SSP_CTRL1_SSP_MODE_SD_MMC | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS); /* Set initial bit clock 400 KHz */ mx28_set_ssp_busclock(priv->id, 400); /* Send initial 74 clock cycles (185 us @ 400 KHz)*/ writel(SSP_CMD0_CONT_CLKING_EN, &ssp_regs->hw_ssp_cmd0_set); udelay(200); writel(SSP_CMD0_CONT_CLKING_EN, &ssp_regs->hw_ssp_cmd0_clr); return 0; }
void video_hw_init(void *lcdbase) { int ret; unsigned int div = 0, best = 0, pix_clk; u32 frac1; const unsigned long lcd_clk = 480000000; u32 lcd_ctrl = LCD_CTRL_DEFAULT | LCDIF_CTRL_RUN; u32 lcd_ctrl1 = LCD_CTRL1_DEFAULT, lcd_ctrl2 = LCD_CTRL2_DEFAULT; u32 lcd_vdctrl0 = LCD_VDCTRL0_DEFAULT; u32 lcd_vdctrl1 = LCD_VDCTRL1_DEFAULT; u32 lcd_vdctrl2 = LCD_VDCTRL2_DEFAULT; u32 lcd_vdctrl3 = LCD_VDCTRL3_DEFAULT; u32 lcd_vdctrl4 = LCD_VDCTRL4_DEFAULT; struct mxs_clkctrl_regs *clk_regs = (void *)MXS_CLKCTRL_BASE; char buf1[16], buf2[16]; /* pixel format in memory */ switch (color_depth) { case 8: lcd_ctrl |= LCDIF_CTRL_WORD_LENGTH_8BIT; lcd_ctrl1 |= LCDIF_CTRL1_BYTE_PACKING_FORMAT(1); break; case 16: lcd_ctrl |= LCDIF_CTRL_WORD_LENGTH_16BIT; lcd_ctrl1 |= LCDIF_CTRL1_BYTE_PACKING_FORMAT(3); break; case 18: lcd_ctrl |= LCDIF_CTRL_WORD_LENGTH_18BIT; lcd_ctrl1 |= LCDIF_CTRL1_BYTE_PACKING_FORMAT(7); break; case 24: lcd_ctrl |= LCDIF_CTRL_WORD_LENGTH_24BIT; lcd_ctrl1 |= LCDIF_CTRL1_BYTE_PACKING_FORMAT(7); break; default: printf("Invalid bpp: %d\n", color_depth); return; } /* pixel format on the LCD data pins */ switch (pix_fmt) { case PIX_FMT_RGB332: lcd_ctrl |= LCDIF_CTRL_LCD_DATABUS_WIDTH_8BIT; break; case PIX_FMT_RGB565: lcd_ctrl |= LCDIF_CTRL_LCD_DATABUS_WIDTH_16BIT; break; case PIX_FMT_BGR666: lcd_ctrl |= 1 << LCDIF_CTRL_INPUT_DATA_SWIZZLE_OFFSET; /* fallthru */ case PIX_FMT_RGB666: lcd_ctrl |= LCDIF_CTRL_LCD_DATABUS_WIDTH_18BIT; break; case PIX_FMT_BGR24: lcd_ctrl |= 1 << LCDIF_CTRL_INPUT_DATA_SWIZZLE_OFFSET; /* fallthru */ case PIX_FMT_RGB24: lcd_ctrl |= LCDIF_CTRL_LCD_DATABUS_WIDTH_24BIT; break; default: printf("Invalid pixel format: %c%c%c%c\n", fourcc_str(pix_fmt)); return; } pix_clk = PICOS2KHZ(mxsfb_var.pixclock); debug("designated pix_clk: %sMHz\n", strmhz(buf1, pix_clk * 1000)); for (frac1 = 18; frac1 < 36; frac1++) { static unsigned int err = ~0; unsigned long clk = lcd_clk / 1000 * 18 / frac1; unsigned int d = (clk + pix_clk - 1) / pix_clk; unsigned int diff = abs(clk / d - pix_clk); debug("frac1=%u div=%u lcd_clk=%-8sMHz pix_clk=%-8sMHz diff=%u err=%u\n", frac1, d, strmhz(buf1, clk * 1000), strmhz(buf2, clk * 1000 / d), diff, err); if (clk < pix_clk) break; if (d > 255) continue; if (diff < err) { best = frac1; div = d; err = diff; if (err == 0) break; } } if (div == 0) { printf("Requested pixel clock %sMHz out of range\n", strmhz(buf1, pix_clk * 1000)); return; } debug("div=%lu(%u*%u/18) for pixel clock %sMHz with base clock %sMHz\n", lcd_clk / pix_clk / 1000, best, div, strmhz(buf1, lcd_clk / div * 18 / best), strmhz(buf2, lcd_clk)); frac1 = (readl(&clk_regs->hw_clkctrl_frac1_reg) & ~0xff) | best; writel(frac1, &clk_regs->hw_clkctrl_frac1_reg); writel(1 << 14, &clk_regs->hw_clkctrl_clkseq_clr); /* enable LCD clk and fractional divider */ writel(div, &clk_regs->hw_clkctrl_lcdif_reg); while (readl(&clk_regs->hw_clkctrl_lcdif_reg) & (1 << 29)) ; ret = mxs_reset_block(&lcd_regs->hw_lcdif_ctrl_reg); if (ret) { printf("Failed to reset LCD controller: LCDIF_CTRL: %08x CLKCTRL_LCDIF: %08x\n", readl(&lcd_regs->hw_lcdif_ctrl_reg), readl(&clk_regs->hw_clkctrl_lcdif_reg)); return; } if (mxsfb_var.sync & FB_SYNC_HOR_HIGH_ACT) lcd_vdctrl0 |= LCDIF_VDCTRL0_HSYNC_POL; if (mxsfb_var.sync & FB_SYNC_VERT_HIGH_ACT) lcd_vdctrl0 |= LCDIF_VDCTRL0_HSYNC_POL; if (mxsfb_var.sync & FB_SYNC_DATA_ENABLE_HIGH_ACT) lcd_vdctrl0 |= LCDIF_VDCTRL0_ENABLE_POL; if (mxsfb_var.sync & FB_SYNC_DOTCLK_FALLING_ACT) lcd_vdctrl0 |= LCDIF_VDCTRL0_DOTCLK_POL; lcd_vdctrl0 |= LCDIF_VDCTRL0_VSYNC_PULSE_WIDTH(mxsfb_var.vsync_len); lcd_vdctrl1 |= LCDIF_VDCTRL1_VSYNC_PERIOD(mxsfb_var.vsync_len + mxsfb_var.upper_margin + mxsfb_var.lower_margin + mxsfb_var.yres); lcd_vdctrl2 |= LCDIF_VDCTRL2_HSYNC_PULSE_WIDTH(mxsfb_var.hsync_len); lcd_vdctrl2 |= LCDIF_VDCTRL2_HSYNC_PERIOD(mxsfb_var.hsync_len + mxsfb_var.left_margin + mxsfb_var.right_margin + mxsfb_var.xres); lcd_vdctrl3 |= LCDIF_VDCTRL3_HORIZONTAL_WAIT_CNT(mxsfb_var.left_margin + mxsfb_var.hsync_len); lcd_vdctrl3 |= LCDIF_VDCTRL3_VERTICAL_WAIT_CNT(mxsfb_var.upper_margin + mxsfb_var.vsync_len); lcd_vdctrl4 |= LCDIF_VDCTRL4_DOTCLK_H_VALID_DATA_CNT(mxsfb_var.xres); writel((u32)lcdbase, &lcd_regs->hw_lcdif_next_buf_reg); writel(LCDIF_TRANSFER_COUNT_H_COUNT(mxsfb_var.xres) | LCDIF_TRANSFER_COUNT_V_COUNT(mxsfb_var.yres), &lcd_regs->hw_lcdif_transfer_count_reg); writel(lcd_vdctrl0, &lcd_regs->hw_lcdif_vdctrl0_reg); writel(lcd_vdctrl1, &lcd_regs->hw_lcdif_vdctrl1_reg); writel(lcd_vdctrl2, &lcd_regs->hw_lcdif_vdctrl2_reg); writel(lcd_vdctrl3, &lcd_regs->hw_lcdif_vdctrl3_reg); writel(lcd_vdctrl4, &lcd_regs->hw_lcdif_vdctrl4_reg); writel(lcd_ctrl1, &lcd_regs->hw_lcdif_ctrl1_reg); writel(lcd_ctrl2, &lcd_regs->hw_lcdif_ctrl2_reg); writel(lcd_ctrl, &lcd_regs->hw_lcdif_ctrl_reg); debug("mxsfb framebuffer driver initialized\n"); }
static int __devinit mxs_pwm_led_probe(struct platform_device *pdev) { struct mxs_pwm_leds_plat_data *plat_data; struct resource *res; struct led_classdev *led; unsigned int pwmn; int leds_in_use = 0, rc = 0; int i; plat_data = (struct mxs_pwm_leds_plat_data *)pdev->dev.platform_data; if (plat_data == NULL) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -ENODEV; leds.base = (unsigned int)IO_ADDRESS(res->start); mxs_reset_block((void __iomem *)leds.base, 1); leds.led_num = plat_data->num; if (leds.led_num <= 0 || leds.led_num > CONFIG_MXS_PWM_CHANNELS) return -EFAULT; leds.leds = plat_data->leds; if (leds.leds == NULL) return -EFAULT; leds.pwm_clk = clk_get(&pdev->dev, "pwm"); if (IS_ERR(leds.pwm_clk)) { rc = PTR_ERR(leds.pwm_clk); return rc; } clk_enable(leds.pwm_clk); for (i = 0; i < leds.led_num; i++) { pwmn = leds.leds[i].pwm; if (pwmn >= CONFIG_MXS_PWM_CHANNELS) { dev_err(&pdev->dev, "[led-pwm%d]:PWM %d doesn't exist\n", i, pwmn); continue; } led = &(leds.leds[i].dev); led->name = leds.leds[i].name; /*Foxconn, KingsChen, MKD, 20130924 {*/ led->brightness = MAX_LEVEL; led->max_brightness=MAX_LEVEL; MKD_DEFAULT_LEVEL = MAX_LEVEL; //led->brightness = LED_HALF; /*Foxconn, KingsChen, MKD, 20130924 }*/ led->flags = 0; led->brightness_set = mxs_pwm_led_brightness_set; led->default_trigger = 0; rc = led_classdev_register(&pdev->dev, led); if (rc < 0) { dev_err(&pdev->dev, "Unable to register LED device %d (err=%d)\n", i, rc); continue; } leds_in_use++; /* Set default brightness */ /*Foxconn, KingsChen, MKD, 20130924 {*/ mxs_pwm_led_brightness_set(led, MAX_LEVEL); //mxs_pwm_led_brightness_set(led, LED_HALF); /*Foxconn, KingsChen, MKD, 20130924 }*/ } /*Foxconn, KingsChen, MKD, 20131119 {*/ led_probe_done=1; /*Foxconn, KingsChen, MKD, 20131119 }*/ if (leds_in_use == 0) { dev_info(&pdev->dev, "No PWM LEDs available\n"); clk_disable(leds.pwm_clk); clk_put(leds.pwm_clk); return -ENODEV; } return 0; }
/* * Nominally, the purpose of this function is to look for or create the bad * block table. In fact, since the we call this function at the very end of * the initialization process started by nand_scan(), and we don't have a * more formal mechanism, we "hook" this function to continue init process. * * At this point, the physical NAND Flash chips have been identified and * counted, so we know the physical geometry. This enables us to make some * important configuration decisions. * * The return value of this function propogates directly back to this driver's * call to nand_scan(). Anything other than zero will cause this driver to * tear everything down and declare failure. */ static int mxs_nand_scan_bbt(struct mtd_info *mtd) { struct nand_chip *nand = mtd->priv; struct mxs_nand_info *nand_info = nand->priv; uint32_t tmp; /* Configure BCH and set NFC geometry */ if (readl(&bch_regs->hw_bch_ctrl_reg) & (BCH_CTRL_SFTRST | BCH_CTRL_CLKGATE)) /* When booting from NAND the BCH engine will already * be operational and obviously does not like being reset here. * There will be occasional read errors upon boot when this * reset is done. */ mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); readl(&bch_regs->hw_bch_ctrl_reg); debug("mtd->writesize=%d\n", mtd->writesize); debug("mtd->oobsize=%d\n", mtd->oobsize); debug("ecc_strength=%d\n", mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize)); /* Configure layout 0 */ tmp = (mxs_nand_ecc_chunk_cnt(mtd) - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE; writel(tmp, &bch_regs->hw_bch_flash0layout0); tmp = (mtd->writesize + mtd->oobsize) << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; tmp |= MXS_NAND_CHUNK_DATA_CHUNK_SIZE; writel(tmp, &bch_regs->hw_bch_flash0layout1); /* Set *all* chip selects to use layout 0 */ writel(0, &bch_regs->hw_bch_layoutselect); /* Enable BCH complete interrupt */ writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); /* Hook some operations at the MTD level. */ if (mtd->read_oob != mxs_nand_hook_read_oob) { nand_info->hooked_read_oob = mtd->read_oob; mtd->read_oob = mxs_nand_hook_read_oob; } if (mtd->write_oob != mxs_nand_hook_write_oob) { nand_info->hooked_write_oob = mtd->write_oob; mtd->write_oob = mxs_nand_hook_write_oob; } if (mtd->block_markbad != mxs_nand_hook_block_markbad) { nand_info->hooked_block_markbad = mtd->block_markbad; mtd->block_markbad = mxs_nand_hook_block_markbad; } /* We use the reference implementation for bad block management. */ return nand_default_bbt(mtd); }
static int stmp3xxx_rtc_probe(struct platform_device *pdev) { struct stmp3xxx_rtc_data *rtc_data; struct resource *r; int err; rtc_data = kzalloc(sizeof *rtc_data, GFP_KERNEL); if (!rtc_data) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(&pdev->dev, "failed to get resource\n"); err = -ENXIO; goto out_free; } rtc_data->io = ioremap(r->start, resource_size(r)); if (!rtc_data->io) { dev_err(&pdev->dev, "ioremap failed\n"); err = -EIO; goto out_free; } rtc_data->irq_alarm = platform_get_irq(pdev, 0); if (!(readl(STMP3XXX_RTC_STAT + rtc_data->io) & STMP3XXX_RTC_STAT_RTC_PRESENT)) { dev_err(&pdev->dev, "no device onboard\n"); err = -ENODEV; goto out_remap; } platform_set_drvdata(pdev, rtc_data); mxs_reset_block(rtc_data->io); writel(STMP3XXX_RTC_PERSISTENT0_ALARM_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE_EN | STMP3XXX_RTC_PERSISTENT0_ALARM_WAKE, rtc_data->io + STMP3XXX_RTC_PERSISTENT0_CLR); writel(STMP3XXX_RTC_CTRL_ONEMSEC_IRQ_EN | STMP3XXX_RTC_CTRL_ALARM_IRQ_EN, rtc_data->io + STMP3XXX_RTC_CTRL_CLR); rtc_data->rtc = rtc_device_register(pdev->name, &pdev->dev, &stmp3xxx_rtc_ops, THIS_MODULE); if (IS_ERR(rtc_data->rtc)) { err = PTR_ERR(rtc_data->rtc); goto out_remap; } err = request_irq(rtc_data->irq_alarm, stmp3xxx_rtc_interrupt, 0, "RTC alarm", &pdev->dev); if (err) { dev_err(&pdev->dev, "Cannot claim IRQ%d\n", rtc_data->irq_alarm); goto out_irq_alarm; } return 0; out_irq_alarm: rtc_device_unregister(rtc_data->rtc); out_remap: platform_set_drvdata(pdev, NULL); iounmap(rtc_data->io); out_free: kfree(rtc_data); return err; }
/* * Nominally, the purpose of this function is to look for or create the bad * block table. In fact, since the we call this function at the very end of * the initialization process started by nand_scan(), and we doesn't have a * more formal mechanism, we "hook" this function to continue init process. * * At this point, the physical NAND Flash chips have been identified and * counted, so we know the physical geometry. This enables us to make some * important configuration decisions. * * The return value of this function propagates directly back to this driver's * call to nand_scan(). Anything other than zero will cause this driver to * tear everything down and declare failure. */ static int mxs_nand_scan_bbt(struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); struct mxs_nand_info *nand_info = nand_get_controller_data(nand); struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; uint32_t tmp; if (mtd->oobsize > MXS_NAND_CHUNK_DATA_CHUNK_SIZE) { galois_field = 14; chunk_data_size = MXS_NAND_CHUNK_DATA_CHUNK_SIZE * 2; } if (mtd->oobsize > chunk_data_size) { printf("Not support the NAND chips whose oob size is larger then %d bytes!\n", chunk_data_size); return -EINVAL; } /* Configure BCH and set NFC geometry */ mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); /* Configure layout 0 */ tmp = (mxs_nand_ecc_chunk_cnt(mtd->writesize) - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; tmp |= chunk_data_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; tmp |= (14 == galois_field ? 1 : 0) << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; writel(tmp, &bch_regs->hw_bch_flash0layout0); tmp = (mtd->writesize + mtd->oobsize) << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; tmp |= (mxs_nand_get_ecc_strength(mtd->writesize, mtd->oobsize) >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; tmp |= chunk_data_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; tmp |= (14 == galois_field ? 1 : 0) << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; writel(tmp, &bch_regs->hw_bch_flash0layout1); /* Set *all* chip selects to use layout 0 */ writel(0, &bch_regs->hw_bch_layoutselect); /* Enable BCH complete interrupt */ writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); /* Hook some operations at the MTD level. */ if (mtd->_read_oob != mxs_nand_hook_read_oob) { nand_info->hooked_read_oob = mtd->_read_oob; mtd->_read_oob = mxs_nand_hook_read_oob; } if (mtd->_write_oob != mxs_nand_hook_write_oob) { nand_info->hooked_write_oob = mtd->_write_oob; mtd->_write_oob = mxs_nand_hook_write_oob; } if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { nand_info->hooked_block_markbad = mtd->_block_markbad; mtd->_block_markbad = mxs_nand_hook_block_markbad; } /* We use the reference implementation for bad block management. */ return nand_default_bbt(mtd); }