/** * omap_plat_register_i2c_bus - register I2C bus with device descriptors * @bus_id: bus id counting from number 1 * @clkrate: clock rate of the bus in kHz * @info: pointer into I2C device descriptor table or NULL * @len: number of descriptors in the table * * Returns 0 on success or an error code. */ int __init omap_plat_register_i2c_bus(int bus_id, u32 clkrate, struct i2c_board_info const *info, unsigned len) { int err; int nr_ports = 0; if (cpu_class_is_omap1()) nr_ports = omap1_i2c_nr_ports(); else if (cpu_class_is_omap2()) nr_ports = omap2_i2c_nr_ports(); BUG_ON(bus_id < 1 || bus_id > nr_ports); if (info) { err = i2c_register_board_info(bus_id, info, len); if (err) return err; } if (!omap_i2c_pdata[bus_id - 1].rate) omap_i2c_pdata[bus_id - 1].rate = clkrate; omap_i2c_pdata[bus_id - 1].rate &= ~OMAP_I2C_CMDLINE_SETUP; if (cpu_class_is_omap1()) return omap1_i2c_add_bus(bus_id); else if (cpu_class_is_omap2()) return omap2_i2c_add_bus(bus_id); return 0; }
/* * Clears any DMA state so the DMA engine is ready to restart with new buffers * through omap_start_dma(). Any buffers in flight are discarded. */ void omap_clear_dma(int lch) { unsigned long flags; flags = splhigh(); if (cpu_class_is_omap1()) { u32 l; l = dma_read(CCR(lch)); l &= ~OMAP_DMA_CCR_EN; dma_write(l, CCR(lch)); /* Clear pending interrupts */ l = dma_read(CSR(lch)); } if (cpu_class_is_omap2()) { int i; void __iomem *lch_base = omap_dma_base + OMAP_DMA4_CH_BASE(lch); for (i = 0; i < 0x44; i += 4) __raw_writel(0, lch_base + i); } splx(flags); }
int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; if (cpu_is_omap16xx() || cpu_class_is_omap2()) { struct clk *sync_32k_ick; if (cpu_is_omap16xx()) clocksource_32k.read = omap16xx_32k_read; else if (cpu_is_omap2420()) clocksource_32k.read = omap2420_32k_read; else if (cpu_is_omap2430()) clocksource_32k.read = omap2430_32k_read; else if (cpu_is_omap34xx()) clocksource_32k.read = omap34xx_32k_read; else if (cpu_is_omap44xx()) clocksource_32k.read = omap44xx_32k_read; else return -ENODEV; sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); if (!IS_ERR(sync_32k_ick)) clk_enable(sync_32k_ick); offset_32k = clocksource_32k.read(&clocksource_32k); if (clocksource_register_hz(&clocksource_32k, 32768)) printk(err, clocksource_32k.name); init_fixed_sched_clock(&cd, omap_update_sched_clock, 32, 32768, SC_MULT, SC_SHIFT); } return 0; }
/* Note that dest_port is only for OMAP1 */ void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, unsigned long dest_start, int dst_ei, int dst_fi) { u32 l; if (cpu_class_is_omap1()) { l = dma_read(CSDP(lch)); l &= ~(0x1f << 9); l |= dest_port << 9; dma_write(l, CSDP(lch)); } l = dma_read(CCR(lch)); l &= ~(0x03 << 14); l |= dest_amode << 14; dma_write(l, CCR(lch)); if (cpu_class_is_omap1()) { dma_write(dest_start >> 16, CDSA_U(lch)); dma_write(dest_start, CDSA_L(lch)); } if (cpu_class_is_omap2()) dma_write(dest_start, CDSA(lch)); dma_write(dst_ei, CDEI(lch)); dma_write(dst_fi, CDFI(lch)); }
/* Note that src_port is only for omap1 */ void omap_set_dma_src_params(int lch, int src_port, int src_amode, unsigned long src_start, int src_ei, int src_fi) { u32 l; if (cpu_class_is_omap1()) { u16 w; w = dma_read(CSDP(lch)); w &= ~(0x1f << 2); w |= src_port << 2; dma_write(w, CSDP(lch)); } l = dma_read(CCR(lch)); l &= ~(0x03 << 12); l |= src_amode << 12; dma_write(l, CCR(lch)); if (cpu_class_is_omap1()) { dma_write(src_start >> 16, CSSA_U(lch)); dma_write((u16)src_start, CSSA_L(lch)); } if (cpu_class_is_omap2()) dma_write(src_start, CSSA(lch)); dma_write(src_ei, CSEI(lch)); dma_write(src_fi, CSFI(lch)); }
static int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; if (cpu_is_omap16xx() || cpu_class_is_omap2()) { struct clk *sync_32k_ick; if (cpu_is_omap16xx()) clocksource_32k.read = omap16xx_32k_read; else if (cpu_is_omap2420()) clocksource_32k.read = omap2420_32k_read; else if (cpu_is_omap2430()) clocksource_32k.read = omap2430_32k_read; else if (cpu_is_omap34xx()) clocksource_32k.read = omap34xx_32k_read; else if (cpu_is_omap44xx()) clocksource_32k.read = omap44xx_32k_read; else return -ENODEV; sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); if (sync_32k_ick) clk_enable(sync_32k_ick); clocksource_32k.mult = clocksource_hz2mult(32768, clocksource_32k.shift); if (clocksource_register(&clocksource_32k)) printk(err, clocksource_32k.name); } return 0; }
static int omap_i2c_get_clocks(struct omap_i2c_dev *dev) { if (cpu_is_omap16xx() || cpu_class_is_omap2()) { dev->iclk = clk_get(dev->dev, "i2c_ick"); if (IS_ERR(dev->iclk)) { dev->iclk = NULL; return -ENODEV; } } /* For I2C operations on 2430 we need 96Mhz clock */ if (cpu_is_omap2430()) { dev->fclk = clk_get(dev->dev, "i2chs_fck"); if (IS_ERR(dev->fclk)) { if (dev->iclk != NULL) { clk_put(dev->iclk); dev->iclk = NULL; } dev->fclk = NULL; return -ENODEV; } } else { dev->fclk = clk_get(dev->dev, "i2c_fck"); if (IS_ERR(dev->fclk)) { if (dev->iclk != NULL) { clk_put(dev->iclk); dev->iclk = NULL; } dev->fclk = NULL; return -ENODEV; } } return 0; }
static void __init omap_disable_wdt(void) { if (cpu_class_is_omap2()) omap_hwmod_for_each_by_class("wd_timer", omap2_disable_wdt, NULL); return; }
/** * _read_32ksynct - read the OMAP 32K sync timer * * Returns the current value of the 32KiHz synchronization counter. * XXX this should be generalized to simply read the system clocksource. * XXX this should be moved to a separate synctimer32k.c file */ static u32 _read_32ksynct(void) { if (!cpu_class_is_omap2()) BUG(); return __raw_readl(OMAP2_IO_ADDRESS(OMAP_32KSYNCT_BASE + 0x010)); }
void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color) { BUG_ON(omap_dma_in_1510_mode()); if (cpu_class_is_omap1()) { u16 w; w = dma_read(CCR2(lch)); w &= ~0x03; switch (mode) { case OMAP_DMA_CONSTANT_FILL: w |= 0x01; break; case OMAP_DMA_TRANSPARENT_COPY: w |= 0x02; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(w, CCR2(lch)); w = dma_read(LCH_CTRL(lch)); w &= ~0x0f; /* Default is channel type 2D */ if (mode) { dma_write((u16)color, COLOR_L(lch)); dma_write((u16)(color >> 16), COLOR_U(lch)); w |= 1; /* Channel type G */ } dma_write(w, LCH_CTRL(lch)); } if (cpu_class_is_omap2()) { u32 val; val = dma_read(CCR(lch)); val &= ~((1 << 17) | (1 << 16)); switch (mode) { case OMAP_DMA_CONSTANT_FILL: val |= 1 << 16; break; case OMAP_DMA_TRANSPARENT_COPY: val |= 1 << 17; break; case OMAP_DMA_COLOR_DIS: break; default: BUG(); } dma_write(val, CCR(lch)); color &= 0xffffff; dma_write(color, COLOR(lch)); } }
void omap_set_dma_dest_index(int lch, int eidx, int fidx) { if (cpu_class_is_omap2()) return; dma_write(eidx, CDEI(lch)); dma_write(fidx, CDFI(lch)); }
void omap_set_dma_transfer_params(int lch, int data_type, int elem_count, int frame_count, int sync_mode, int dma_trigger, int src_or_dst_synch) { u32 l; l = dma_read(CSDP(lch)); l &= ~0x03; l |= data_type; dma_write(l, CSDP(lch)); if (cpu_class_is_omap1()) { u16 ccr; ccr = dma_read(CCR(lch)); ccr &= ~(1 << 5); if (sync_mode == OMAP_DMA_SYNC_FRAME) ccr |= 1 << 5; dma_write(ccr, CCR(lch)); ccr = dma_read(CCR2(lch)); ccr &= ~(1 << 2); if (sync_mode == OMAP_DMA_SYNC_BLOCK) ccr |= 1 << 2; dma_write(ccr, CCR2(lch)); } if (cpu_class_is_omap2() && dma_trigger) { u32 val; val = dma_read(CCR(lch)); /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */ val &= ~((3 << 19) | 0x1f); val |= (dma_trigger & ~0x1f) << 14; val |= dma_trigger & 0x1f; if (sync_mode & OMAP_DMA_SYNC_FRAME) val |= 1 << 5; else val &= ~(1 << 5); if (sync_mode & OMAP_DMA_SYNC_BLOCK) val |= 1 << 18; else val &= ~(1 << 18); if (src_or_dst_synch) val |= 1 << 24; /* source synch */ else val &= ~(1 << 24); /* dest synch */ dma_write(val, CCR(lch)); } dma_write(elem_count, CEN(lch)); dma_write(frame_count, CFN(lch)); }
static void omap_init_wdt(void) { if (cpu_class_is_omap2()) omap_hwmod_for_each_by_class("wd_timer", omap2_init_wdt, NULL); else if (cpu_is_omap16xx()) (void) platform_device_register(&omap_wdt_device); return; }
/* * The amount of SRAM depends on the core type. * Note that we cannot try to test for SRAM here because writes * to secure SRAM will hang the system. Also the SRAM is not * yet mapped at this point. */ static void __init omap_detect_sram(void) { omap_sram_skip = SRAM_BOOTLOADER_SZ; if (cpu_class_is_omap2()) { if (is_sram_locked()) { if (cpu_is_omap34xx()) { omap_sram_start = OMAP3_SRAM_PUB_PA; if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { omap_sram_size = 0x7000; /* 28K */ omap_sram_skip += SZ_16K; } else { omap_sram_size = 0x8000; /* 32K */ } } else if (cpu_is_omap44xx()) { omap_sram_start = OMAP4_SRAM_PUB_PA; omap_sram_size = 0xa000; /* 40K */ } else { omap_sram_start = OMAP2_SRAM_PUB_PA; omap_sram_size = 0x800; /* 2K */ } } else { if (cpu_is_am33xx()) { omap_sram_start = AM33XX_SRAM_PA; omap_sram_size = 0x10000; /* 64K */ } else if (cpu_is_omap34xx()) { omap_sram_start = OMAP3_SRAM_PA; omap_sram_size = 0x10000; /* 64K */ } else if (cpu_is_omap44xx()) { omap_sram_start = OMAP4_SRAM_PA; omap_sram_size = 0xe000; /* 56K */ } else { omap_sram_start = OMAP2_SRAM_PA; if (cpu_is_omap242x()) omap_sram_size = 0xa0000; /* 640K */ else if (cpu_is_omap243x()) omap_sram_size = 0x10000; /* 64K */ } } } else { omap_sram_start = OMAP1_SRAM_PA; if (cpu_is_omap7xx()) omap_sram_size = 0x32000; /* 200K */ else if (cpu_is_omap15xx()) omap_sram_size = 0x30000; /* 192K */ else if (cpu_is_omap1610() || cpu_is_omap1611() || cpu_is_omap1621() || cpu_is_omap1710()) omap_sram_size = 0x4000; /* 16K */ else { pr_err("Could not detect SRAM size\n"); omap_sram_size = 0x4000; } } }
void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode) { if (cpu_class_is_omap2()) { u32 csdp; csdp = dma_read(CSDP(lch)); csdp &= ~(0x3 << 16); csdp |= (mode << 16); dma_write(csdp, CSDP(lch)); } }
void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) { unsigned int burst = 0; u32 l; l = dma_read(CSDP(lch)); l &= ~(0x03 << 7); switch (burst_mode) { case OMAP_DMA_DATA_BURST_DIS: break; case OMAP_DMA_DATA_BURST_4: if (cpu_class_is_omap2()) burst = 0x1; else burst = 0x2; break; case OMAP_DMA_DATA_BURST_8: if (cpu_class_is_omap2()) { burst = 0x2; break; } /* not supported by current hardware on OMAP1 * w |= (0x03 << 7); * fall through */ case OMAP_DMA_DATA_BURST_16: if (cpu_class_is_omap2()) { burst = 0x3; break; } /* OMAP1 don't support burst 16 * fall through */ default: BUG(); } l |= (burst << 7); dma_write(l, CSDP(lch)); }
int __init omap_init_clocksource_32k(void) { static char err[] __initdata = KERN_ERR "%s: can't register clocksource!\n"; if (cpu_is_omap16xx() || cpu_class_is_omap2()) { u32 pbase; unsigned long size = SZ_4K; void __iomem *base; struct clk *sync_32k_ick; if (cpu_is_omap16xx()) { pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED; size = SZ_1K; } else if (cpu_is_omap2420()) pbase = OMAP2420_32KSYNCT_BASE + 0x10; else if (cpu_is_omap2430()) pbase = OMAP2430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap34xx()) pbase = OMAP3430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap44xx()) pbase = OMAP4430_32KSYNCT_BASE + 0x10; else if (cpu_is_omap54xx()) pbase = OMAP54XX_32KSYNCT_BASE + 0x30; else return -ENODEV; /* For this to work we must have a static mapping in io.c for this area */ base = ioremap(pbase, size); if (!base) return -ENODEV; sync_32k_ick = clk_get(NULL, "omap_32ksync_ick"); if (!IS_ERR(sync_32k_ick)) clk_enable(sync_32k_ick); timer_32k_base = base; /* * 120000 rough estimate from the calculations in * __clocksource_updatefreq_scale. */ clocks_calc_mult_shift(&persistent_mult, &persistent_shift, 32768, NSEC_PER_SEC, 120000); if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32, clocksource_mmio_readl_up)) printk(err, "32k_counter"); setup_sched_clock(omap_32k_read_sched_clock, 32, 32768); } return 0; }
void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode) { unsigned int burst = 0; u32 l; l = dma_read(CSDP(lch)); l &= ~(0x03 << 14); switch (burst_mode) { case OMAP_DMA_DATA_BURST_DIS: break; case OMAP_DMA_DATA_BURST_4: if (cpu_class_is_omap2()) burst = 0x1; else burst = 0x2; break; case OMAP_DMA_DATA_BURST_8: if (cpu_class_is_omap2()) burst = 0x2; else burst = 0x3; break; case OMAP_DMA_DATA_BURST_16: if (cpu_class_is_omap2()) { burst = 0x3; break; } /* OMAP1 don't support burst 16 * fall through */ default: IOLog("Invalid DMA burst mode"); BUG(); return; } l |= (burst << 14); dma_write(l, CSDP(lch)); }
static inline void omap_enable_channel_irq(int lch) { u32 status; /* Clear CSR */ if (cpu_class_is_omap1()) status = dma_read(CSR(lch)); else if (cpu_class_is_omap2()) dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); /* Enable some nice interrupts. */ dma_write(dma_chan[lch].enabled_irqs, CICR(lch)); }
static inline void omap2_enable_irq_lch(int lch) { u32 val; unsigned long flags; if (!cpu_class_is_omap2()) return; spin_lock_irqsave(&dma_chan_lock, flags); val = dma_read(IRQENABLE_L0); val |= 1 << lch; dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); }
int __init omap_sram_init(void) { omap_detect_sram(); omap_map_sram(); if (!(cpu_class_is_omap2())) omap1_sram_init(); else if (cpu_is_omap242x()) omap242x_sram_init(); else if (cpu_is_omap2430()) omap243x_sram_init(); else if (cpu_is_omap34xx()) omap34xx_sram_init(); return 0; }
void omap_free_dma(int lch) { unsigned long flags; if (dma_chan[lch].dev_id == -1) { IOLog("omap_dma: trying to free unallocated DMA channel %d\n", lch); return; } if (cpu_class_is_omap1()) { /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); } if (cpu_class_is_omap2()) { u32 val; spin_lock_irqsave(&dma_chan_lock, flags); /* Disable interrupts */ val = dma_read(IRQENABLE_L0); val &= ~(1 << lch); dma_write(val, IRQENABLE_L0); spin_unlock_irqrestore(&dma_chan_lock, flags); /* Clear the CSR register and IRQ status register */ dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR(lch)); dma_write(1 << lch, IRQSTATUS_L0); /* Disable all DMA interrupts for the channel. */ dma_write(0, CICR(lch)); /* Make sure the DMA transfer is stopped. */ dma_write(0, CCR(lch)); omap_clear_dma(lch); } spin_lock_irqsave(&dma_chan_lock, flags); dma_chan[lch].dev_id = -1; dma_chan[lch].next_lch = -1; dma_chan[lch].callback = NULL; spin_unlock_irqrestore(&dma_chan_lock, flags); }
/* * Register busses defined in command line but that are not registered with * omap_register_i2c_bus from board initialization code. */ static int __init omap_register_i2c_bus_cmdline(void) { int i, err = 0; for (i = 0; i < ARRAY_SIZE(omap_i2c_pdata); i++) if (omap_i2c_pdata[i].rate & OMAP_I2C_CMDLINE_SETUP) { omap_i2c_pdata[i].rate &= ~OMAP_I2C_CMDLINE_SETUP; err = -EINVAL; if (cpu_class_is_omap1()) err = omap1_i2c_add_bus(i + 1); else if (cpu_class_is_omap2()) err = omap2_i2c_add_bus(i + 1); if (err) goto out; } out: return err; }
static inline void enable_lnk(int lch) { u32 l; l = dma_read(CLNK_CTRL(lch)); if (cpu_class_is_omap1()) l &= ~(1 << 14); /* Set the ENABLE_LNK bits */ if (dma_chan[lch].next_lch != -1) l = dma_chan[lch].next_lch | (1 << 15); #ifndef CONFIG_ARCH_OMAP1 if (cpu_class_is_omap2()) if (dma_chan[lch].next_linked_ch != -1) l = dma_chan[lch].next_linked_ch | (1 << 15); #endif dma_write(l, CLNK_CTRL(lch)); }
/** * @brief omap_dma_set_global_params : Set global priority settings for dma * * @param arb_rate * @param max_fifo_depth * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM * DMA_THREAD_RESERVE_ONET * DMA_THREAD_RESERVE_TWOT * DMA_THREAD_RESERVE_THREET */ void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams) { u32 reg; if (!cpu_class_is_omap2()) { IOLog("FIXME: no %s on 15xx/16xx", __func__); return; } if (max_fifo_depth == 0) max_fifo_depth = 1; if (arb_rate == 0) arb_rate = 1; reg = 0xff & max_fifo_depth; reg |= (0x3 & tparams) << 12; reg |= (arb_rate & 0xff) << 16; dma_write(reg, GCR); }
void omap_set_dma_priority(int lch, int dst_port, int priority) { unsigned long reg; u32 l; if (cpu_class_is_omap1()) { switch (dst_port) { case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */ reg = OMAP_TC_OCPT1_PRIOR; break; case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */ reg = OMAP_TC_OCPT2_PRIOR; break; case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */ reg = OMAP_TC_EMIFF_PRIOR; break; case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */ reg = OMAP_TC_EMIFS_PRIOR; break; default: BUG(); return; } l = omap_readl(reg); l &= ~(0xf << 8); l |= (priority & 0xf) << 8; omap_writel(l, reg); } if (cpu_class_is_omap2()) { u32 ccr; ccr = dma_read(CCR(lch)); if (priority) ccr |= (1 << 6); else ccr &= ~(1 << 6); dma_write(ccr, CCR(lch)); } }
static int __init omap_init_wdt(void) { int id = -1; struct platform_device *pdev; struct omap_hwmod *oh; char *oh_name = "wd_timer2"; char *dev_name = "omap_wdt"; if (!cpu_class_is_omap2()) return 0; oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("Could not look up wd_timer%d hwmod\n", id); return -EINVAL; } pdev = omap_device_build(dev_name, id, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n", dev_name, oh->name); return 0; }
static inline void disable_lnk(int lch) { u32 l; l = dma_read(CLNK_CTRL(lch)); /* Disable interrupts */ if (cpu_class_is_omap1()) { dma_write(0, CICR(lch)); /* Set the STOP_LNK bit */ l |= 1 << 14; } if (cpu_class_is_omap2()) { omap_disable_channel_irq(lch); /* Clear the ENABLE_LNK bit */ l &= ~(1 << 15); } dma_write(l, CLNK_CTRL(lch)); dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE; }
static int __init omap_i2c_get_clocks(struct omap_i2c_dev *dev) { if (cpu_is_omap16xx() || cpu_class_is_omap2()) { dev->iclk = clk_get(dev->dev, "i2c_ick"); if (IS_ERR(dev->iclk)) { dev->iclk = NULL; return -ENODEV; } } dev->fclk = clk_get(dev->dev, "i2c_fck"); if (IS_ERR(dev->fclk)) { if (dev->iclk != NULL) { clk_put(dev->iclk); dev->iclk = NULL; } dev->fclk = NULL; return -ENODEV; } return 0; }
static int __init omap_init_wdt(void) { int id = -1; struct omap_device *od; struct omap_hwmod *oh; char *oh_name = "wd_timer2"; char *dev_name = "omap_wdt"; if (!cpu_class_is_omap2()) return 0; oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("Could not look up wd_timer%d hwmod\n", id); return -EINVAL; } od = omap_device_build(dev_name, id, oh, NULL, 0, omap_wdt_latency, ARRAY_SIZE(omap_wdt_latency), 0); WARN(IS_ERR(od), "Cant build omap_device for %s:%s.\n", dev_name, oh->name); return 0; }