/** * Allocate I/O buffer with specified alignment and offset * * @v len Required length of buffer * @v align Physical alignment * @v offset Offset from physical alignment * @ret iobuf I/O buffer, or NULL if none available * * @c align will be rounded up to the nearest power of two. */ struct io_buffer * alloc_iob_raw ( size_t len, size_t align, size_t offset ) { struct io_buffer *iobuf; void *data; /* Align buffer length to ensure that struct io_buffer is aligned */ len = ( len + __alignof__ ( *iobuf ) - 1 ) & ~( __alignof__ ( *iobuf ) - 1 ); /* Round up alignment to the nearest power of two */ align = ( 1 << fls ( align - 1 ) ); /* Allocate buffer plus descriptor as a single unit, unless * doing so will push the total size over the alignment * boundary. */ if ( ( len + sizeof ( *iobuf ) ) <= align ) { /* Allocate memory for buffer plus descriptor */ data = malloc_dma_offset ( len + sizeof ( *iobuf ), align, offset ); if ( ! data ) return NULL; iobuf = ( data + len ); } else { /* Allocate memory for buffer */ data = malloc_dma_offset ( len, align, offset ); if ( ! data ) return NULL; /* Allocate memory for descriptor */ iobuf = malloc ( sizeof ( *iobuf ) ); if ( ! iobuf ) { free_dma ( data, len ); return NULL; } } /* Populate descriptor */ iobuf->head = iobuf->data = iobuf->tail = data; iobuf->end = ( data + len ); return iobuf; }
static int apb_filter(void *arg) { struct apb_softc *sc = arg; struct thread *td; uint32_t i, intr; td = curthread; /* Workaround: do not inflate intr nesting level */ td->td_intr_nesting_level--; if(ar531x_soc >= AR531X_SOC_AR5315) intr = ATH_READ_REG(AR5315_SYSREG_BASE + AR5315_SYSREG_MISC_INTSTAT); else intr = ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_MISC_INTSTAT); while ((i = fls(intr)) != 0) { i--; intr &= ~(1u << i); if(i == 1 && ar531x_soc < AR531X_SOC_AR5315) { ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_AHBPERR); ATH_READ_REG(AR5312_SYSREG_BASE + AR5312_SYSREG_AHBDMAE); } if (intr_isrc_dispatch(PIC_INTR_ISRC(sc, i), curthread->td_intr_frame) != 0) { device_printf(sc->apb_dev, "Stray interrupt %u detected\n", i); apb_mask_irq((void*)i); continue; } } KASSERT(i == 0, ("all interrupts handled")); td->td_intr_nesting_level++; return (FILTER_HANDLED); }
static int pmic8xxx_kpd_init(struct pmic8xxx_kp *kp) { int bits, rc, cycles; u8 scan_val = 0, ctrl_val = 0; static const u8 row_bits[] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, }; /* Find column bits */ if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN) bits = 0; else bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN; ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) << KEYP_CTRL_SCAN_COLS_SHIFT; /* Find row bits */ if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN) bits = 0; else bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN]; ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT); rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL); if (rc < 0) return rc; bits = (kp->pdata->debounce_ms / 5) - 1; scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT); bits = fls(kp->pdata->scan_delay_ms) - 1; scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT); /* Row hold time is a multiple of 32KHz cycles. */ cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC; scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT); rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); return rc; }
/* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ static void mmc_power_up(struct mmc_host *host) { int bit; /* If ocr is set, we use it */ if (host->ocr) bit = ffs(host->ocr) - 1; else bit = fls(host->ocr_avail) - 1; host->ios.vdd = bit; if (mmc_host_is_spi(host)) { host->ios.chip_select = MMC_CS_HIGH; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; } else { host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; } host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. */ mmc_delay(10); if (host->f_min > 400000) { pr_warning("%s: Minimum clock frequency too high for " "identification mode\n", mmc_hostname(host)); host->ios.clock = host->f_min; } else host->ios.clock = 400000; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); /* * This delay must be at least 74 clock sizes, or 1 ms, or the * time required to reach a stable voltage. */ mmc_delay(10); }
static void nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) { #if IS_ENABLED(CONFIG_IOMMU_API) struct device *dev = &tdev->pdev->dev; unsigned long pgsize_bitmap; int ret; if (!tdev->func->iommu_bit) return; mutex_init(&tdev->iommu.mutex); if (iommu_present(&platform_bus_type)) { tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); if (IS_ERR(tdev->iommu.domain)) goto error; /* * A IOMMU is only usable if it supports page sizes smaller * or equal to the system's PAGE_SIZE, with a preference if * both are equal. */ pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; if (pgsize_bitmap & PAGE_SIZE) { tdev->iommu.pgshift = PAGE_SHIFT; } else { tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); if (tdev->iommu.pgshift == 0) { dev_warn(dev, "unsupported IOMMU page size\n"); goto free_domain; } tdev->iommu.pgshift -= 1; } ret = iommu_attach_device(tdev->iommu.domain, dev); if (ret) goto free_domain; ret = nvkm_mm_init(&tdev->iommu.mm, 0, (1ULL << tdev->func->iommu_bit) >> tdev->iommu.pgshift, 1); if (ret) goto detach_device; }
static irqreturn_t sdma_int_handler(int irq, void *dev_id) { struct sdma_engine *sdma = dev_id; u32 stat; stat = __raw_readl(sdma->regs + SDMA_H_INTR); __raw_writel(stat, sdma->regs + SDMA_H_INTR); while (stat) { int channel = fls(stat) - 1; struct sdma_channel *sdmac = &sdma->channel[channel]; mxc_sdma_handle_channel(sdmac); stat &= ~(1 << channel); } return IRQ_HANDLED; }
/* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ static void mmc_power_up(struct mmc_host *host) { int bit; /* If ocr is set, we use it */ if (host->ocr) bit = ffs(host->ocr) - 1; else bit = fls(host->ocr_avail) - 1; host->ios.vdd = bit; if (mmc_host_is_spi(host)) { host->ios.chip_select = MMC_CS_HIGH; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; } else { host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; } host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. */ if (host->caps & MMC_CAP_ATHEROS_WIFI) mmc_delay(400); else if (!(host->caps & MMC_CAP_CLOCK_GATING)) mmc_delay(10); host->ios.clock = host->f_min; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); /* * This delay must be at least 74 clock sizes, or 1 ms, or the * time required to reach a stable voltage. */ if (!(host->caps & MMC_CAP_CLOCK_GATING)) mmc_delay(10); }
/* s5pv310_irq_demux_eint * * This function demuxes the IRQ from the group0 external interrupts, * from EINTs 16 to 31. It is designed to be inlined into the specific * handler s5p_irq_demux_eintX_Y. * * Each EINT pend/mask registers handle eight of them. */ static inline u32 s5pv310_irq_demux_eint(unsigned int irq, unsigned int start) { unsigned int cascade_irq; u32 status = __raw_readl(S5P_EINT_PEND(s5pv310_irq_split(start))); u32 mask = __raw_readl(S5P_EINT_MASK(s5pv310_irq_split(start))); u32 action = 0; status &= ~mask; status &= 0xff; while (status) { cascade_irq = fls(status) - 1; generic_handle_irq(cascade_irq + start); status &= ~(1 << cascade_irq); ++action; } return action; }
static int pca953x_read_single(struct udevice *dev, int reg, u8 *val, int offset) { struct pca953x_info *info = dev_get_platdata(dev); int bank_shift = fls((info->gpio_count - 1) / BANK_SZ); int off = offset / BANK_SZ; int ret; u8 byte; ret = dm_i2c_read(dev, (reg << bank_shift) + off, &byte, 1); if (ret) { dev_err(dev, "%s error\n", __func__); return ret; } *val = byte; return 0; }
static irqreturn_t mdss_irq(int irq, void *arg) { struct msm_mdss *mdss = arg; u32 intr; intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS); VERB("intr=%08x", intr); while (intr) { irq_hw_number_t hwirq = fls(intr) - 1; generic_handle_irq(irq_find_mapping( mdss->irqcontroller.domain, hwirq)); intr &= ~(1 << hwirq); } return IRQ_HANDLED; }
static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { int i, j, m; unsigned v; for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) { struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i]; v = readl(msm_chip->regs.int_status); v &= msm_chip->int_enable[0]; while (v) { m = v & -v; j = fls(m) - 1; /* printk("msm_gpio_irq_handler %08x %08x bit %d gpio %d irq %d\n", v, m, j, msm_chip->chip.start + j, FIRST_GPIO_IRQ + msm_chip->chip.start + j); */ v &= ~m; generic_handle_irq(FIRST_GPIO_IRQ + msm_chip->chip.start + j); } } desc->chip->ack(irq); }
static int sdhci_init(struct mmc *mmc) { struct sdhci_host *host = mmc->priv; if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) { aligned_buffer = memalign(8, 512*1024); if (!aligned_buffer) { printf("%s: Aligned buffer alloc failed!!!\n", __func__); return -1; } } sdhci_set_power(host, fls(mmc->cfg->voltages) - 1); if (host->quirks & SDHCI_QUIRK_NO_CD) { #if defined(CONFIG_PIC32_SDHCI) /* PIC32 SDHCI CD errata: * - set CD_TEST and clear CD_TEST_INS bit */ sdhci_writeb(host, SDHCI_CTRL_CD_TEST, SDHCI_HOST_CONTROL); #else unsigned int status; sdhci_writeb(host, SDHCI_CTRL_CD_TEST_INS | SDHCI_CTRL_CD_TEST, SDHCI_HOST_CONTROL); status = sdhci_readl(host, SDHCI_PRESENT_STATE); while ((!(status & SDHCI_CARD_PRESENT)) || (!(status & SDHCI_CARD_STATE_STABLE)) || (!(status & SDHCI_CARD_DETECT_PIN_LEVEL))) status = sdhci_readl(host, SDHCI_PRESENT_STATE); #endif } /* Enable only interrupts served by the SD controller */ sdhci_writel(host, SDHCI_INT_DATA_MASK | SDHCI_INT_CMD_MASK, SDHCI_INT_ENABLE); /* Mask all sdhci interrupt sources */ sdhci_writel(host, 0x0, SDHCI_SIGNAL_ENABLE); return 0; }
int mips_pic_intr(void *arg) { struct mips_pic_softc *sc = arg; register_t cause, status; struct intr_irqsrc *isrc; int i, intr; cause = mips_rd_cause(); status = mips_rd_status(); intr = (cause & MIPS_INT_MASK) >> 8; /* * Do not handle masked interrupts. They were masked by * pre_ithread function (mips_mask_XXX_intr) and will be * unmasked once ithread is through with handler */ intr &= (status & MIPS_INT_MASK) >> 8; while ((i = fls(intr)) != 0) { i--; /* Get a 0-offset interrupt. */ intr &= ~(1 << i); isrc = sc->pic_irqs[i]; if (isrc == NULL) { device_printf(sc->pic_dev, "Stray interrupt %u detected\n", i); pic_irq_mask(sc, i); continue; } intr_irq_dispatch(isrc, curthread->td_intr_frame); } KASSERT(i == 0, ("all interrupts handled")); #ifdef HWPMC_HOOKS if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) { struct trapframe *tf = PCPU_GET(curthread)->td_intr_frame; pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); } #endif return (FILTER_HANDLED); }
static int spi_flash_select_pp(struct spi_flash *flash, const struct spi_flash_parameters *params, u32 shared_hwcaps) { int cmd, best_match = fls(shared_hwcaps & SFLASH_HWCAPS_PP_MASK) - 1; const struct spi_flash_pp_command *pp; if (best_match < 0) return -1; cmd = spi_flash_hwcaps2cmd((0x1UL << best_match)); if (cmd < 0) return -1; pp = ¶ms->page_programs[cmd]; flash->write_inst = pp->inst; flash->write_proto = pp->proto; return 0; }
/* s5p_irq_demux_eint * * This function demuxes the IRQ from the group0 external interrupts, * from EINTs 16 to 31. It is designed to be inlined into the specific * handler s5p_irq_demux_eintX_Y. * * Each EINT pend/mask registers handle eight of them. */ static inline u32 s5p_irq_demux_eint(unsigned int start) { u32 status = __raw_readl(S5P_EINT_PEND(EINT_REG_NR(start))); u32 mask = __raw_readl(S5P_EINT_MASK(EINT_REG_NR(start))); unsigned int irq; u32 action = 0; status &= ~mask; status &= 0xff; while (status) { irq = fls(status) - 1; generic_handle_irq(irq + start); status &= ~(1 << irq); ++action; } return action; }
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; bool sup_pclk = p ? p->sup_pclk : false; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; if (sup_pclk && clk == host->clk) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); else sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & ((fls(DIV_ROUND_UP(host->clk, clk) - 1) - 1) << 16)); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); }
static int tegra_sdhci_suspend(struct platform_device *pdev, pm_message_t state) { struct tegra_sdhci_host *host = platform_get_drvdata(pdev); int ret = 0; if (host->card_always_on && is_card_sdio(host->sdhci->mmc->card)) { int div = 0; u16 clk; unsigned int clock = 100000; if (device_may_wakeup(&pdev->dev)) { enable_irq_wake(host->sdhci->irq); } /* save interrupt status before suspending */ host->sdhci_ints = sdhci_readl(host->sdhci, SDHCI_INT_ENABLE); /* reduce host controller clk and card clk to 100 KHz */ tegra_sdhci_set_clock(host->sdhci, clock); sdhci_writew(host->sdhci, 0, SDHCI_CLOCK_CONTROL); if (host->sdhci->max_clk > clock) { div = 1 << (fls(host->sdhci->max_clk / clock) - 2); if (div > 128) div = 128; } clk = div << SDHCI_DIVIDER_SHIFT; clk |= SDHCI_CLOCK_INT_EN | SDHCI_CLOCK_CARD_EN; sdhci_writew(host->sdhci, clk, SDHCI_CLOCK_CONTROL); return ret; } ret = sdhci_suspend_host(host->sdhci, state); if (ret) pr_err("%s: failed, error = %d\n", __func__, ret); tegra_sdhci_enable_clock(host, 0); return ret; }
static void max8997_rtc_data_to_tm(u8 *data, struct rtc_time *tm, int rtc_24hr_mode) { tm->tm_sec = data[RTC_SEC] & 0x7f; tm->tm_min = data[RTC_MIN] & 0x7f; if (rtc_24hr_mode) tm->tm_hour = data[RTC_HOUR] & 0x1f; else { tm->tm_hour = data[RTC_HOUR] & 0x0f; if (data[RTC_HOUR] & HOUR_PM_MASK) tm->tm_hour += 12; } tm->tm_wday = fls(data[RTC_WEEKDAY] & 0x7f) - 1; tm->tm_mday = data[RTC_DATE] & 0x1f; tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1; tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100; tm->tm_yday = 0; tm->tm_isdst = 0; }
static void v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, int flags, void *data) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize_bits = fls(v9ses->maxdata - 1); sb->s_blocksize = 1 << sb->s_blocksize_bits; sb->s_magic = V9FS_MAGIC; if (v9fs_proto_dotl(v9ses)) { sb->s_op = &v9fs_super_ops_dotl; sb->s_xattr = v9fs_xattr_handlers; } else sb->s_op = &v9fs_super_ops; sb->s_bdi = &v9ses->bdi; sb->s_flags = flags | MS_ACTIVE | MS_SYNCHRONOUS | MS_DIRSYNC | MS_NOATIME; save_mount_options(sb, data); }
static void mmc_power_off(struct mmc_host *host) { host->ios.clock = 0; host->ios.vdd = 0; /* * Reset ocr mask to be the highest possible voltage supported for * this mmc host. This value will be used at next power up. */ host->ocr = 1 << (fls(host->ocr_avail) - 1); if (!mmc_host_is_spi(host)) { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; } host->ios.power_mode = MMC_POWER_OFF; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); }
static int suspend_time_pm_event(struct notifier_block *notifier, unsigned long pm_event, void *unused) { struct timespec after; switch (pm_event) { case PM_SUSPEND_PREPARE: getnstimeofday(&suspend_time_before); break; case PM_POST_SUSPEND: getnstimeofday(&after); after = timespec_sub(after, suspend_time_before); time_in_suspend_bins[fls(after.tv_sec)]++; pr_info("Suspended for %lu.%03lu seconds\n", after.tv_sec, after.tv_nsec / NSEC_PER_MSEC); break; default: break; } return NOTIFY_DONE; }
static irqreturn_t max310x_ist(int irq, void *dev_id) { struct max310x_port *s = (struct max310x_port *)dev_id; if (s->uart.nr > 1) { do { unsigned int val = ~0; WARN_ON_ONCE(regmap_read(s->regmap, MAX310X_GLOBALIRQ_REG, &val)); val = ((1 << s->uart.nr) - 1) & ~val; if (!val) break; max310x_port_irq(s, fls(val) - 1); } while (1); } else max310x_port_irq(s, 0); return IRQ_HANDLED; }
int sdhci_init(struct mmc *mmc) { struct sdhci_host *host = (struct sdhci_host *)mmc->priv; if ((host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR) && !aligned_buffer) { aligned_buffer = memalign(8, 512*1024); if (!aligned_buffer) { printf("Aligned buffer alloc failed!!!"); return -1; } } /* Eable all state */ sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_INT_ENABLE); sdhci_writel(host, SDHCI_INT_ALL_MASK, SDHCI_SIGNAL_ENABLE); sdhci_set_power(host, fls(mmc->voltages) - 1); return 0; }
/* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ static void mmc_power_up(struct mmc_host *host) { int bit = fls(host->ocr_avail) - 1; host->ios.vdd = bit; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); mmc_delay(1); host->ios.clock = host->f_min; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); mmc_delay(2); }
/* * Work Item to notify the core about card insertion/removal */ static void mmc_omap_detect(struct work_struct *work) { u16 vdd = 0; struct mmc_omap_host *host = container_of(work, struct mmc_omap_host, mmc_carddetect_work); if (host->carddetect) { if (!(OMAP_HSMMC_READ(host->base, HCTL) & SDVSDET)) { /* * Set the VDD back to 3V when the card is removed * before the set_ios fn turns off the power. */ vdd = fls(host->mmc->ocr_avail) - 1; if (omap_mmc_switch_opcond(host, vdd) != 0) host->mmc->ios.vdd = vdd; } mmc_detect_change(host->mmc, (HZ * 200) / 1000); } else mmc_detect_change(host->mmc, (HZ * 50) / 1000); }
/*! * Allocates the DRAM memory for the frame buffer. This buffer is remapped * into a non-cached, non-buffered, memory region to allow palette and pixel * writes to occur without flushing the cache. Once this area is remapped, * all virtual memory access to the video memory should occur at the new region. * * @param fbi framebuffer information pointer * * @return Error code indicating success or failure */ static int mxcfb_map_video_memory(struct fb_info *fbi) { u32 msb; u32 offset; struct mxcfb_info *mxcfbi = fbi->par; fbi->fix.smem_len = fbi->var.xres_virtual * fbi->var.yres_virtual * 4; /* Set size to power of 2. */ msb = fls(fbi->fix.smem_len); if (!(fbi->fix.smem_len & ((1UL << msb) - 1))) msb--; /* Already aligned to power 2 */ if (msb < 11) msb = 11; mxcfbi->alloc_size = (1UL << msb) * 2; mxcfbi->alloc_start_vaddr = dma_alloc_coherent(fbi->device, mxcfbi->alloc_size, &mxcfbi-> alloc_start_paddr, GFP_KERNEL | GFP_DMA); if (mxcfbi->alloc_start_vaddr == 0) { dev_err(fbi->device, "Unable to allocate framebuffer memory\n"); return -ENOMEM; } dev_dbg(fbi->device, "allocated fb memory @ paddr=0x%08X, size=%d.\n", (uint32_t) mxcfbi->alloc_start_paddr, mxcfbi->alloc_size); offset = ((mxcfbi->alloc_size / 2) - 1) & ~((mxcfbi->alloc_size / 2) - 1); fbi->fix.smem_start = mxcfbi->alloc_start_paddr + offset; dev_dbg(fbi->device, "aligned fb start @ paddr=0x%08lX, size=%u.\n", fbi->fix.smem_start, fbi->fix.smem_len); fbi->screen_base = mxcfbi->alloc_start_vaddr + offset; /* Clear the screen */ memset(fbi->screen_base, 0, fbi->fix.smem_len); return 0; }
asmlinkage void plat_irq_dispatch(void) { const unsigned long core_id = cvmx_get_core_num(); const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2); const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2); const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1; const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1); unsigned long cop0_cause; unsigned long cop0_status; uint64_t ciu_en; uint64_t ciu_sum; while (1) { cop0_cause = read_c0_cause(); cop0_status = read_c0_status(); cop0_cause &= cop0_status; cop0_cause &= ST0_IM; if (unlikely(cop0_cause & STATUSF_IP2)) { ciu_sum = cvmx_read_csr(ciu_sum0_address); ciu_en = cvmx_read_csr(ciu_en0_address); ciu_sum &= ciu_en; if (likely(ciu_sum)) do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); else spurious_interrupt(); } else if (unlikely(cop0_cause & STATUSF_IP3)) { ciu_sum = cvmx_read_csr(ciu_sum1_address); ciu_en = cvmx_read_csr(ciu_en1_address); ciu_sum &= ciu_en; if (likely(ciu_sum)) do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); else spurious_interrupt(); } else if (likely(cop0_cause)) { do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); } else { break; } } }
static int spi_flash_select_read(struct spi_flash *flash, const struct spi_flash_parameters *params, u32 shared_hwcaps) { int cmd, best_match = fls(shared_hwcaps & SFLASH_HWCAPS_READ_MASK) - 1; const struct spi_flash_read_command *read; if (best_match < 0) return -1; cmd = spi_flash_hwcaps2cmd((0x1UL << best_match)); if (cmd < 0) return -1; read = ¶ms->reads[cmd]; flash->num_mode_cycles = read->num_mode_cycles; flash->num_wait_states = read->num_wait_states; flash->read_inst = read->inst; flash->read_proto = read->proto; return 0; }
asmlinkage void plat_irq_dispatch(void) { unsigned long pending; int irq; pending = read_c0_status() & read_c0_cause() & ST0_IM; if (!pending) { spurious_interrupt(); return; } pending >>= CAUSEB_IP; while (pending) { irq = fls(pending) - 1; if (irq < ARRAY_SIZE(irq_wb_chan) && irq_wb_chan[irq] != -1) ath79_ddr_wb_flush(irq_wb_chan[irq]); do_IRQ(MIPS_CPU_IRQ_BASE + irq); pending &= ~BIT(irq); } }
/* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, * enable the bus drivers and clock to the card. * * We must _NOT_ enable the clock prior to power stablising. * * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ void mmc_power_up(struct mmc_host *host) //Robert, 20100618, KB62_CR339 : Sync to latest AB60 SD drivers { int bit; /* If ocr is set, we use it */ if (host->ocr) bit = ffs(host->ocr) - 1; else bit = fls(host->ocr_avail) - 1; host->ios.vdd = bit; if (mmc_host_is_spi(host)) { host->ios.chip_select = MMC_CS_HIGH; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; } else { host->ios.chip_select = MMC_CS_DONTCARE; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; } host->ios.power_mode = MMC_POWER_UP; host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. */ mmc_delay(10); host->ios.clock = host->f_min; host->ios.power_mode = MMC_POWER_ON; mmc_set_ios(host); /* * This delay must be at least 74 clock sizes, or 1 ms, or the * time required to reach a stable voltage. */ mmc_delay(10); }