void cpm2_set_pin(int port, int pin, int flags) { struct cpm2_ioports __iomem *iop = (struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport; pin = 1 << (31 - pin); if (flags & CPM_PIN_OUTPUT) setbits32(&iop[port].dir, pin); else clrbits32(&iop[port].dir, pin); if (!(flags & CPM_PIN_GPIO)) setbits32(&iop[port].par, pin); else clrbits32(&iop[port].par, pin); if (flags & CPM_PIN_SECONDARY) setbits32(&iop[port].sor, pin); else clrbits32(&iop[port].sor, pin); if (flags & CPM_PIN_OPENDRAIN) setbits32(&iop[port].odr, pin); else clrbits32(&iop[port].odr, pin); }
static void setup_smc2_ioports(struct fs_uart_platform_info* pdata) { immap_t *immap = (immap_t *) IMAP_ADDR; unsigned *bcsr_io; unsigned int iobits = 0x00000c00; bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); if (bcsr_io == NULL) { printk(KERN_CRIT "Could not remap BCSR1\n"); return; } clrbits32(bcsr_io,BCSR1_RS232EN_2); iounmap(bcsr_io); #ifndef CONFIG_SERIAL_CPM_ALT_SMC2 setbits32(&immap->im_cpm.cp_pbpar, iobits); clrbits32(&immap->im_cpm.cp_pbdir, iobits); clrbits16(&immap->im_cpm.cp_pbodr, iobits); #else setbits16(&immap->im_ioport.iop_papar, iobits); clrbits16(&immap->im_ioport.iop_padir, iobits); clrbits16(&immap->im_ioport.iop_paodr, iobits); #endif }
/** * fsl_ssi_trigger: start and stop the DMA transfer. * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. * * The DMA channel is in external master start and pause mode, which * means the SSI completely controls the flow of data. */ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct ccsr_ssi __iomem *ssi = ssi_private->ssi; switch (cmd) { case SNDRV_PCM_TRIGGER_START: clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE); else setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) clrbits32(&ssi->scr, CCSR_SSI_SCR_TE); else clrbits32(&ssi->scr, CCSR_SSI_SCR_RE); break; default: return -EINVAL; } return 0; }
static void mpc866ads_fixup_scc_irda_pdata(struct platform_device *pdev, int idx) { immap_t *immap = (immap_t *) IMAP_ADDR; unsigned *bcsr_io; /* This is for IRDA devices only */ if (!pdev || !pdev->name || (!strstr(pdev->name, "fsl-cpm-scc:irda"))) return; bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); if (bcsr_io == NULL) { printk(KERN_CRIT "Could not remap BCSR1\n"); return; } /* Enable the IRDA. */ clrbits32(bcsr_io,BCSR1_IRDAEN); iounmap(bcsr_io); /* Configure port A pins. */ setbits16(&immap->im_ioport.iop_papar, 0x000c); clrbits16(&immap->im_ioport.iop_padir, 0x000c); /* Configure Serial Interface clock routing. * First, clear all SCC bits to zero, then set the ones we want. */ clrbits32(&immap->im_cpm.cp_sicr, 0x0000ff00); setbits32(&immap->im_cpm.cp_sicr, 0x00001200); }
void smc1_lineif(struct uart_cpm_port *pinfo) { /* XXX SMC1: insert port configuration here */ unsigned *bcsr_io; cpm8xx_t *cp; pinfo->brg = 1; #if defined (CONFIG_MPC885ADS) || defined (CONFIG_MPC86XADS) #if defined(CONFIG_MPC885ADS) cp = (cpm8xx_t *)immr_map(im_cpm); setbits32(&cp->cp_pepar, 0x000000c0); clrbits32(&cp->cp_pedir, 0x000000c0); clrbits32(&cp->cp_peso, 0x00000040); setbits32(&cp->cp_peso, 0x00000080); immr_unmap(cp); #endif bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); if (bcsr_io == NULL) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } out_be32(bcsr_io, in_be32(bcsr_io) & ~BCSR1_RS232EN_1); iounmap(bcsr_io); #endif }
static void setup_scc3_ioports(void) { immap_t *immap = (immap_t *) IMAP_ADDR; unsigned *bcsr_io; bcsr_io = ioremap(BCSR_ADDR, BCSR_SIZE); if (bcsr_io == NULL) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } /* Enable the PHY. */ setbits32(bcsr_io+4, BCSR4_ETH10_RST); /* Configure port A pins for Txd and Rxd. */ setbits16(&immap->im_ioport.iop_papar, PA_ENET_RXD | PA_ENET_TXD); clrbits16(&immap->im_ioport.iop_padir, PA_ENET_RXD | PA_ENET_TXD); /* Configure port C pins to enable CLSN and RENA. */ clrbits16(&immap->im_ioport.iop_pcpar, PC_ENET_CLSN | PC_ENET_RENA); clrbits16(&immap->im_ioport.iop_pcdir, PC_ENET_CLSN | PC_ENET_RENA); setbits16(&immap->im_ioport.iop_pcso, PC_ENET_CLSN | PC_ENET_RENA); /* Configure port E for TCLK and RCLK. */ setbits32(&immap->im_cpm.cp_pepar, PE_ENET_TCLK | PE_ENET_RCLK); clrbits32(&immap->im_cpm.cp_pepar, PE_ENET_TENA); clrbits32(&immap->im_cpm.cp_pedir, PE_ENET_TCLK | PE_ENET_RCLK | PE_ENET_TENA); clrbits32(&immap->im_cpm.cp_peso, PE_ENET_TCLK | PE_ENET_RCLK); setbits32(&immap->im_cpm.cp_peso, PE_ENET_TENA); /* Configure Serial Interface clock routing. * First, clear all SCC bits to zero, then set the ones we want. */ clrbits32(&immap->im_cpm.cp_sicr, SICR_ENET_MASK); setbits32(&immap->im_cpm.cp_sicr, SICR_ENET_CLKRT); /* Disable Rx and Tx. SMC1 sshould be stopped if SCC3 eternet are used. */ immap->im_cpm.cp_smc[0].smc_smcmr &= ~(SMCMR_REN | SMCMR_TEN); /* On the MPC885ADS SCC ethernet PHY is initialized in the full duplex mode * by H/W setting after reset. SCC ethernet controller support only half duplex. * This discrepancy of modes causes a lot of carrier lost errors. */ /* In the original SCC enet driver the following code is placed at the end of the initialization */ setbits32(&immap->im_cpm.cp_pepar, PE_ENET_TENA); clrbits32(&immap->im_cpm.cp_pedir, PE_ENET_TENA); setbits32(&immap->im_cpm.cp_peso, PE_ENET_TENA); setbits32(bcsr_io+1, BCSR1_ETHEN); iounmap(bcsr_io); }
void __init m82xx_board_setup(void) { cpm2_map_t* immap = ioremap(CPM_MAP_ADDR, sizeof(cpm2_map_t)); u32 *bcsr = ioremap(BCSR_ADDR+4, sizeof(u32)); /* Enable the 2nd UART port */ clrbits32(bcsr, BCSR1_RS232_EN2); #ifdef CONFIG_SERIAL_CPM_SCC1 clrbits32((u32*)&immap->im_scc[0].scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32((u32*)&immap->im_scc[0].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); #endif #ifdef CONFIG_SERIAL_CPM_SCC2 clrbits32((u32*)&immap->im_scc[1].scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32((u32*)&immap->im_scc[1].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); #endif #ifdef CONFIG_SERIAL_CPM_SCC3 clrbits32((u32*)&immap->im_scc[2].scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32((u32*)&immap->im_scc[2].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); #endif #ifdef CONFIG_SERIAL_CPM_SCC4 clrbits32((u32*)&immap->im_scc[3].scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32((u32*)&immap->im_scc[3].scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); #endif iounmap(bcsr); iounmap(immap); }
static void init_fcc2_ioports(struct fs_platform_info *fpi) { cpm2_map_t *immap = ioremap(get_immrbase(), sizeof(cpm2_map_t)); struct device_node *np; struct resource r; u32 *bcsr; struct io_port *io; u32 tempval; np = of_find_node_by_type(NULL, "memory"); if (!np) { printk(KERN_INFO "No memory node in device tree\n"); return; } if (of_address_to_resource(np, 1, &r)) { printk(KERN_INFO "No memory reg property [1] in devicetree\n"); return; } of_node_put(np); io = &immap->im_ioport; bcsr = ioremap(r.start + 12, sizeof(u32)); /* Enable the PHY */ clrbits32(bcsr, BCSR3_FETHIEN2); setbits32(bcsr, BCSR3_FETH2_RST); /* FCC2 are port B/C. */ /* Configure port A and C pins for FCC2 Ethernet. */ tempval = in_be32(&io->iop_pdirb); tempval &= ~PB2_DIRB0; tempval |= PB2_DIRB1; out_be32(&io->iop_pdirb, tempval); tempval = in_be32(&io->iop_psorb); tempval &= ~PB2_PSORB0; tempval |= PB2_PSORB1; out_be32(&io->iop_psorb, tempval); setbits32(&io->iop_pparb, PB2_DIRB0 | PB2_DIRB1); tempval = PC_CLK(fpi->clk_tx - 8) | PC_CLK(fpi->clk_rx - 8); /* Alter clocks */ clrbits32(&io->iop_psorc, tempval); clrbits32(&io->iop_pdirc, tempval); setbits32(&io->iop_pparc, tempval); cpm2_clk_setup(CPM_CLK_FCC2, fpi->clk_rx, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC2, fpi->clk_tx, CPM_CLK_TX); iounmap(bcsr); iounmap(immap); }
static void setup_fec2_ioports(void) { immap_t *immap = (immap_t *) IMAP_ADDR; /* configure FEC2 pins */ setbits32(&immap->im_cpm.cp_pepar, 0x0003fffc); setbits32(&immap->im_cpm.cp_pedir, 0x0003fffc); setbits32(&immap->im_cpm.cp_peso, 0x00037800); clrbits32(&immap->im_cpm.cp_peso, 0x000087fc); clrbits32(&immap->im_cpm.cp_cptr, 0x00000080); }
static void init_fec2_ioports(struct fs_platform_info *ptr) { cpm8xx_t *cp = (cpm8xx_t *) immr_map(im_cpm); iop8xx_t *io_port = (iop8xx_t *) immr_map(im_ioport); /* configure FEC2 pins */ setbits32(&cp->cp_pepar, 0x0003fffc); setbits32(&cp->cp_pedir, 0x0003fffc); clrbits32(&cp->cp_peso, 0x000087fc); setbits32(&cp->cp_peso, 0x00037800); clrbits32(&cp->cp_cptr, 0x00000080); immr_unmap(io_port); immr_unmap(cp); }
static void init_scc4_uart_ioports(struct fs_uart_platform_info*) { cpm2_map_t* immap = ioremap(CPM_MAP_ADDR, sizeof(cpm2_map_t)); setbits32(&immap->im_ioport.iop_ppard,0x00000600); clrbits32(&immap->im_ioport.iop_psord,0x00000600); clrbits32(&immap->im_ioport.iop_pdird,0x00000200); setbits32(&immap->im_ioport.iop_pdird,0x00000400); /* Wire BRG4 to SCC4 */ clrbits32(&immap->im_cpmux.cmx_scr,0x000000ff); setbits32(&immap->im_cpmux.cmx_scr,0x0000001b); iounmap(immap); }
static void setup_scc1_ioports(struct fs_platform_info* pdata) { immap_t *immap = (immap_t *) IMAP_ADDR; unsigned *bcsr_io; bcsr_io = ioremap(BCSR1, sizeof(unsigned long)); if (bcsr_io == NULL) { printk(KERN_CRIT "Could not remap BCSR1\n"); return; } /* Enable the PHY. */ clrbits32(bcsr_io,BCSR1_ETHEN); /* Configure port A pins for Txd and Rxd. */ /* Disable receive and transmit in case EPPC-Bug started it. */ setbits16(&immap->im_ioport.iop_papar, PA_ENET_RXD | PA_ENET_TXD); clrbits16(&immap->im_ioport.iop_padir, PA_ENET_RXD | PA_ENET_TXD); clrbits16(&immap->im_ioport.iop_paodr, PA_ENET_TXD); /* Configure port C pins to enable CLSN and RENA. */ clrbits16(&immap->im_ioport.iop_pcpar, PC_ENET_CLSN | PC_ENET_RENA); clrbits16(&immap->im_ioport.iop_pcdir, PC_ENET_CLSN | PC_ENET_RENA); setbits16(&immap->im_ioport.iop_pcso, PC_ENET_CLSN | PC_ENET_RENA); /* Configure port A for TCLK and RCLK. */ setbits16(&immap->im_ioport.iop_papar, PA_ENET_TCLK | PA_ENET_RCLK); clrbits16(&immap->im_ioport.iop_padir, PA_ENET_TCLK | PA_ENET_RCLK); clrbits32(&immap->im_cpm.cp_pbpar, PB_ENET_TENA); clrbits32(&immap->im_cpm.cp_pbdir, PB_ENET_TENA); /* Configure Serial Interface clock routing. * First, clear all SCC bits to zero, then set the ones we want. */ clrbits32(&immap->im_cpm.cp_sicr, SICR_ENET_MASK); setbits32(&immap->im_cpm.cp_sicr, SICR_ENET_CLKRT); /* In the original SCC enet driver the following code is placed at the end of the initialization */ setbits32(&immap->im_cpm.cp_pbpar, PB_ENET_TENA); setbits32(&immap->im_cpm.cp_pbdir, PB_ENET_TENA); }
static void flipper_pic_mask(unsigned int virq) { int irq = virq_to_hw(virq); void __iomem *io_base = get_irq_chip_data(virq); clrbits32(io_base + FLIPPER_IMR, 1 << irq); }
static void cpm_mask_irq(unsigned int irq) { int cpm_vec = irq - CPM_IRQ_OFFSET; clrbits32(&((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr, (1 << cpm_vec)); }
/* * Initialize port. This is called from early_console stuff * so we have to be careful here ! */ static int cpm_uart_request_port(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; int ret; pr_debug("CPM uart[%d]:request port\n", port->line); if (pinfo->flags & FLAG_CONSOLE) return 0; if (IS_SMC(pinfo)) { clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); } else { clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } ret = cpm_uart_allocbuf(pinfo, 0); if (ret) return ret; cpm_uart_initbd(pinfo); if (IS_SMC(pinfo)) cpm_uart_init_smc(pinfo); else cpm_uart_init_scc(pinfo); return 0; }
static int cpm_uart_startup(struct uart_port *port) { int retval; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; pr_debug("CPM uart[%d]:startup\n", port->line); /* If the port is not the console, make sure rx is disabled. */ if (!(pinfo->flags & FLAG_CONSOLE)) { /* Disable UART rx */ if (IS_SMC(pinfo)) { clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN); clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX); } else { clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR); clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); } cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); } /* Install interrupt handler. */ retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); if (retval) return retval; /* Startup rx-int */ if (IS_SMC(pinfo)) { setbits8(&pinfo->smcp->smc_smcm, SMCM_RX); setbits16(&pinfo->smcp->smc_smcmr, (SMCMR_REN | SMCMR_TEN)); } else { setbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); setbits32(&pinfo->sccp->scc_gsmrl, (SCC_GSMRL_ENR | SCC_GSMRL_ENT)); } return 0; }
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) { struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; unsigned long flags; switch (flow_type) { case IRQ_TYPE_EDGE_FALLING: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); setbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; case IRQ_TYPE_EDGE_BOTH: spin_lock_irqsave(&mpc8xxx_gc->lock, flags); clrbits32(mm->regs + GPIO_ICR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); break; default: return -EINVAL; } return 0; }
static void fsl_ssi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = rtd->dai->cpu_dai->private_data; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ssi_private->playback--; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ssi_private->capture--; if (ssi_private->first_stream == substream) ssi_private->first_stream = ssi_private->second_stream; ssi_private->second_stream = NULL; if (!ssi_private->playback && !ssi_private->capture) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); free_irq(ssi_private->irq, ssi_private); } }
/** * pmc_enable_wake - enable OF device as wakeup event source * @ofdev: OF device affected * @state: PM state from which device will issue wakeup events * @enable: True to enable event generation; false to disable * * This enables the device as a wakeup event source, or disables it. * * RETURN VALUE: * 0 is returned on success * -EINVAL is returned if device is not supposed to wake up the system * Error code depending on the platform is returned if both the platform and * the native mechanism fail to enable the generation of wake-up events */ int pmc_enable_wake(struct of_device *ofdev, suspend_state_t state, bool enable) { int ret = 0; struct device_node *clk_np; u32 *pmcdr_mask; if (enable && !device_may_wakeup(&ofdev->dev)) return -EINVAL; clk_np = of_parse_phandle(ofdev->dev.of_node, "clk-handle", 0); if (!clk_np) return -EINVAL; pmcdr_mask = (u32 *)of_get_property(clk_np, "fsl,pmcdr-mask", NULL); if (!pmcdr_mask) { ret = -EINVAL; goto out; } /* clear to enable clock in low power mode */ if (enable) clrbits32(&pmc_regs->pmcdr, *pmcdr_mask); else setbits32(&pmc_regs->pmcdr, *pmcdr_mask); out: of_node_put(clk_np); return ret; }
static void flipper_pic_mask(struct irq_data *d) { int irq = irqd_to_hwirq(d); void __iomem *io_base = irq_data_get_irq_chip_data(d); clrbits32(io_base + FLIPPER_IMR, 1 << irq); }
static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) { int div; int pre_div = 2; clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); if (clock == 0) goto out; if (host->max_clk / 16 > clock) { for (; pre_div < 256; pre_div *= 2) { if (host->max_clk / pre_div < clock * 16) break; } } for (div = 1; div <= 16; div++) { if (host->max_clk / (div * pre_div) <= clock) break; } pre_div >>= 1; setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT); mdelay(100); out: host->clock = clock; }
void kgdb_params_early_init(void) { if (cpm_uart_nr) return; get_from_flat_dt("cpu", "clock-frequency", &ppc_proc_freq); get_from_flat_dt("cpm", "brg-frequency", &brgfreq); get_from_flat_dt("soc", "reg", &immrbase); #ifdef CONFIG_FSL_BOOKE /* MMU configuration for early access to IMMR and memory */ settlbcam(num_tlbcam_entries - 1, immrbase, immrbase, 0x100000, _PAGE_IO, 0); settlbcam(0, KERNELBASE, 0, lmb_end_of_DRAM(), _PAGE_KERNEL, 0); #else /* Set up BAT for early access to IMMR */ mb(); mtspr(SPRN_DBAT1L, (immrbase & 0xffff0000) | 0x2a); mtspr(SPRN_DBAT1U, (immrbase & 0xffff0000) | BL_256M << 2 | 2); mb(); setbat(1, immrbase, immrbase, 0x10000000, _PAGE_IO); #endif #ifdef CONFIG_MPC8272_ADS /* Enable serial ports in BCSR */ clrbits32((u32 *)0xf4500000, BCSR1_RS232_EN1 | BCSR1_RS232_EN2); #endif cpm2_reset(); init_ioports(); }
/** * pmc_enable_lossless - enable lossless ethernet in low power mode * @enable: True to enable event generation; false to disable */ void pmc_enable_lossless(int enable) { if (enable && has_lossless) setbits32(&pmc_regs->pmcsr, PMCSR_LOSSLESS); else clrbits32(&pmc_regs->pmcsr, PMCSR_LOSSLESS); }
static void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) { int pre_div = 2; int div = 1; clrbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); if (clock == 0) goto out; while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; while (host->max_clk / pre_div / div > clock && div < 16) div++; dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", clock, host->max_clk / pre_div / div); pre_div >>= 1; div--; setbits32(host->ioaddr + ESDHC_SYSTEM_CONTROL, ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | div << ESDHC_DIVIDER_SHIFT | pre_div << ESDHC_PREDIV_SHIFT); mdelay(100); out: host->clock = clock; }
static void init_scc1_uart_ioports(struct fs_uart_platform_info*) { cpm2_map_t* immap = ioremap(CPM_MAP_ADDR, sizeof(cpm2_map_t)); /* SCC1 is only on port D */ setbits32(&immap->im_ioport.iop_ppard,0x00000003); clrbits32(&immap->im_ioport.iop_psord,0x00000001); setbits32(&immap->im_ioport.iop_psord,0x00000002); clrbits32(&immap->im_ioport.iop_pdird,0x00000001); setbits32(&immap->im_ioport.iop_pdird,0x00000002); /* Wire BRG1 to SCC1 */ clrbits32(&immap->im_cpmux.cmx_scr,0x00ffffff); iounmap(immap); }
static void __init init_ioports(void) { int i; for (i = 0; i < ARRAY_SIZE(km82xx_pins); i++) { const struct cpm_pin *pin = &km82xx_pins[i]; cpm2_set_pin(pin->port, pin->pin, pin->flags); } cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX); cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); /* Force USB FULL SPEED bit to '1' */ setbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 10)); /* clear USB_SLAVE */ clrbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 11)); }
/** * fsl_ssi_shutdown: shutdown the SSI * * Shutdown the SSI if there are no other substreams open. */ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ssi_private->playback--; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ssi_private->capture--; if (ssi_private->first_stream == substream) ssi_private->first_stream = ssi_private->second_stream; ssi_private->second_stream = NULL; /* * If this is the last active substream, disable the SSI and release * the IRQ. */ if (!ssi_private->playback && !ssi_private->capture) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); free_irq(ssi_private->irq, ssi_private); } }
static void pcmcia_hw_setup(int slot, int enable) { if (enable) clrbits32(&bcsr[1], BCSR1_PCCEN); else setbits32(&bcsr[1], BCSR1_PCCEN); }
static void __init mpc86xads_setup_arch(void) { struct device_node *np; u32 __iomem *bcsr_io; cpm_reset(); init_ioports(); np = of_find_compatible_node(NULL, NULL, "fsl,mpc866ads-bcsr"); if (!np) { printk(KERN_CRIT "Could not find fsl,mpc866ads-bcsr node\n"); return; } bcsr_io = of_iomap(np, 0); of_node_put(np); if (bcsr_io == NULL) { printk(KERN_CRIT "Could not remap BCSR\n"); return; } clrbits32(bcsr_io, BCSR1_RS232EN_1 | BCSR1_RS232EN_2 | BCSR1_ETHEN); iounmap(bcsr_io); }
/* Top-level security violation interrupt */ static irqreturn_t caam_secvio_interrupt(int irq, void *snvsdev) { struct device *dev = snvsdev; struct caam_drv_private_secvio *svpriv = dev_get_drvdata(dev); u32 irqstate; /* Check the HP secvio status register */ irqstate = rd_reg32(&svpriv->svregs->hp.secvio_status) | HP_SECVIOST_SECVIOMASK; if (!irqstate) return IRQ_NONE; /* Mask out one or more causes for deferred service */ clrbits32(&svpriv->svregs->hp.secvio_int_ctl, irqstate); /* Now ACK causes */ setbits32(&svpriv->svregs->hp.secvio_status, irqstate); /* And run deferred service */ preempt_disable(); tasklet_schedule(&svpriv->irqtask[smp_processor_id()]); preempt_enable(); return IRQ_HANDLED; }