static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; unsigned long m, n; u32 val; rational_best_approximation(rate, parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), &m, &n); if (fd->flags & CLK_FRAC_DIVIDER_ZERO_BASED) { m--; n--; } if (fd->lock) spin_lock_irqsave(fd->lock, flags); else __acquire(fd->lock); val = clk_fd_readl(fd); val &= ~(fd->mmask | fd->nmask); val |= (m << fd->mshift) | (n << fd->nshift); clk_fd_writel(fd, val); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); else __release(fd->lock); return 0; }
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0; struct _ccu_nkmp _nkmp; unsigned long flags; u32 reg; if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV) rate = rate * nkmp->fixed_post_div; _nkmp.min_n = nkmp->n.min ?: 1; _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; _nkmp.min_k = nkmp->k.min ?: 1; _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width; _nkmp.min_m = 1; _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; _nkmp.min_p = 1; _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1); ccu_nkmp_find_best(parent_rate, rate, &_nkmp); /* * If width is 0, GENMASK() macro may not generate expected mask (0) * as it falls under undefined behaviour by C standard due to shifts * which are equal or greater than width of left operand. This can * be easily avoided by explicitly checking if width is 0. */ if (nkmp->n.width) n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); if (nkmp->k.width) k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); if (nkmp->m.width) m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); if (nkmp->p.width) p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); spin_lock_irqsave(nkmp->common.lock, flags); reg = readl(nkmp->common.base + nkmp->common.reg); reg &= ~(n_mask | k_mask | m_mask | p_mask); reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask; reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask; reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask; reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask; writel(reg, nkmp->common.base + nkmp->common.reg); spin_unlock_irqrestore(nkmp->common.lock, flags); ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock); return 0; }
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long flags = 0; unsigned long m, n; u32 val; rational_best_approximation(rate, parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), &m, &n); if (fd->lock) spin_lock_irqsave(fd->lock, flags); #if 0 else __acquire(fd->lock); #endif val = clk_readl(fd->reg); val &= ~(fd->mmask | fd->nmask); val |= (m << fd->mshift) | (n << fd->nshift); clk_writel(val, fd->reg); if (fd->lock) spin_unlock_irqrestore(fd->lock, flags); #if 0 else __release(fd->lock); #endif return 0; }
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr) { u32 base = addr & GENMASK(31, 19); u32 offset = addr & GENMASK(18, 0); dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base); return MT_PCIE_REMAP_BASE_2 + offset; }
static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg) { u32 reg = readl(priv->ioaddr + id_reg); if (!reg) { dev_info(priv->device, "Version ID not available\n"); return 0x0; } dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n", (unsigned int)(reg & GENMASK(15, 8)) >> 8, (unsigned int)(reg & GENMASK(7, 0))); return reg & GENMASK(7, 0); }
static struct clk *rockchip_clk_register_frac_branch(const char *name, const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, spinlock_t *lock) { struct clk *clk; struct clk_gate *gate = NULL; struct clk_fractional_divider *div = NULL; const struct clk_ops *div_ops = NULL, *gate_ops = NULL; if (gate_offset >= 0) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) return ERR_PTR(-ENOMEM); gate->flags = gate_flags; gate->reg = base + gate_offset; gate->bit_idx = gate_shift; gate->lock = lock; gate_ops = &clk_gate_ops; } if (muxdiv_offset < 0) return ERR_PTR(-EINVAL); div = kzalloc(sizeof(*div), GFP_KERNEL); if (!div) return ERR_PTR(-ENOMEM); div->flags = div_flags; div->reg = base + muxdiv_offset; div->mshift = 16; div->mwidth = 16; div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift; div->nshift = 0; div->nwidth = 16; div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; div->lock = lock; div_ops = &clk_fractional_divider_ops; clk = clk_register_composite(NULL, name, parent_names, num_parents, NULL, NULL, &div->hw, div_ops, gate ? &gate->hw : NULL, gate_ops, flags); return clk; }
int arch_cpu_init_dm(void) { int ret; ret = riscv_cpu_probe(); if (ret) return ret; /* Enable FPU */ if (supports_extension('d') || supports_extension('f')) { csr_set(MODE_PREFIX(status), MSTATUS_FS); csr_write(fcsr, 0); } if (CONFIG_IS_ENABLED(RISCV_MMODE)) { /* * Enable perf counters for cycle, time, * and instret counters only */ csr_write(mcounteren, GENMASK(2, 0)); /* Disable paging */ if (supports_extension('s')) csr_write(satp, 0); } return 0; }
static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, int scrambler) { struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); u32 bch = meson_chip->bch_mode, cmd; int len = mtd->writesize, pagesize, pages; pagesize = nand->ecc.size; if (raw) { len = mtd->writesize + mtd->oobsize; cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir); writel(cmd, nfc->reg_base + NFC_REG_CMD); return; } pages = len / nand->ecc.size; cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch, NFC_CMD_SHORTMODE_DISABLE, pagesize, pages); writel(cmd, nfc->reg_base + NFC_REG_CMD); }
/* * The logical block number assigned to a physical block is stored in the OOB * of the first page, in 3 16-bit copies with the following layout: * * 01234567 89abcdef * -------- -------- * ECC BB xyxyxy * * When reading we check that the first two copies agree. * In case of error, matching is tried using the following pairs. * Reserved values 0xffff mean the block is kept for wear leveling. * * 01234567 89abcdef * -------- -------- * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9 * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11 * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13 */ static int sharpsl_nand_get_logical_num(u8 *oob) { u16 us; int good0, good1; if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] && oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) { good0 = NAND_NOOB_LOGADDR_00; good1 = NAND_NOOB_LOGADDR_01; } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] && oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) { good0 = NAND_NOOB_LOGADDR_10; good1 = NAND_NOOB_LOGADDR_11; } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] && oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) { good0 = NAND_NOOB_LOGADDR_20; good1 = NAND_NOOB_LOGADDR_21; } else { return -EINVAL; } us = oob[good0] | oob[good1] << 8; /* parity check */ if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT) return -EINVAL; /* reserved */ if (us == BLOCK_IS_RESERVED) return BLOCK_IS_RESERVED; return (us >> 1) & GENMASK(9, 0); }
static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr) { struct vgic_lr lr_desc; u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr]; if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) lr_desc.irq = val & ICH_LR_VIRTUALID_MASK; else lr_desc.irq = val & GICH_LR_VIRTUALID; lr_desc.source = 0; if (lr_desc.irq <= 15 && vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7; lr_desc.state = 0; if (val & ICH_LR_PENDING_BIT) lr_desc.state |= LR_STATE_PENDING; if (val & ICH_LR_ACTIVE_BIT) lr_desc.state |= LR_STATE_ACTIVE; if (val & ICH_LR_EOI) lr_desc.state |= LR_EOI_INT; if (val & ICH_LR_HW) { lr_desc.state |= LR_HW; lr_desc.hwirq = (val >> ICH_LR_PHYS_ID_SHIFT) & GENMASK(9, 0); }
static void xgene_get_extd_stats(struct xgene_enet_pdata *pdata) { u32 rx_drop, tx_drop; u32 mask, tmp; int i; for (i = 0; i < XGENE_EXTD_STATS_LEN; i++) { tmp = xgene_enet_rd_stat(pdata, gstrings_extd_stats[i].addr); if (gstrings_extd_stats[i].mask) { mask = GENMASK(gstrings_extd_stats[i].mask - 1, 0); pdata->extd_stats[i] += (tmp & mask); } } if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { /* Errata 10GE_10 - SW should intepret RALN as 0 */ pdata->extd_stats[RALN_IDX] = 0; } else { /* Errata ENET_15 - Fixes RFCS, RFLR, TFCS counter */ pdata->extd_stats[RFCS_IDX] -= pdata->extd_stats[RALN_IDX]; pdata->extd_stats[RFLR_IDX] -= pdata->extd_stats[RUND_IDX]; pdata->extd_stats[TFCS_IDX] -= pdata->extd_stats[TFRG_IDX]; } pdata->mac_ops->get_drop_cnt(pdata, &rx_drop, &tx_drop); pdata->extd_stats[RX_OVERRUN_IDX] += rx_drop; pdata->extd_stats[TX_UNDERRUN_IDX] += tx_drop; /* Errata 10GE_8 - Update Frame recovered from Errata 10GE_8/ENET_11 */ pdata->extd_stats[FALSE_RFLR_IDX] = pdata->false_rflr; /* Errata ENET_15 - Jabber Frame recov'ed from Errata 10GE_10/ENET_15 */ pdata->extd_stats[FALSE_RJBR_IDX] = pdata->vlan_rjbr; }
static unsigned int get_system_type(void) { #define GPIO_PDIR 0x10 u32 pdir; void __iomem *gpio2 = IOMEM(VF610_GPIO2_BASE_ADDR); void __iomem *iomux = IOMEM(VF610_IOMUXC_BASE_ADDR); unsigned low, high; /* * System type is encoded as a 4-bit number specified by the * following pins (pulled up or down with resistors on the * board). */ vf610_setup_pad(iomux, VF610_PAD_PTD16__GPIO_78); vf610_setup_pad(iomux, VF610_PAD_PTD17__GPIO_77); vf610_setup_pad(iomux, VF610_PAD_PTD18__GPIO_76); vf610_setup_pad(iomux, VF610_PAD_PTD19__GPIO_75); pdir = readl(gpio2 + GPIO_PDIR); low = 75 % 32; high = 78 % 32; pdir &= GENMASK(high, low); pdir >>= low; return pdir; }
static void __init create_one_pll(struct clockgen *cg, int idx) { u32 __iomem *reg; u32 mult; struct clockgen_pll *pll = &cg->pll[idx]; const char *input = "cg-sysclk"; int i; if (!(cg->info.pll_mask & (1 << idx))) return; if (cg->coreclk && idx != PLATFORM_PLL) { if (IS_ERR(cg->coreclk)) return; input = "cg-coreclk"; } if (cg->info.flags & CG_VER3) { switch (idx) { case PLATFORM_PLL: reg = cg->regs + 0x60080; break; case CGA_PLL1: reg = cg->regs + 0x80; break; case CGA_PLL2: reg = cg->regs + 0xa0; break; case CGB_PLL1: reg = cg->regs + 0x10080; break; case CGB_PLL2: reg = cg->regs + 0x100a0; break; default: WARN_ONCE(1, "index %d\n", idx); return; } } else { if (idx == PLATFORM_PLL) reg = cg->regs + 0xc00; else reg = cg->regs + 0x800 + 0x20 * (idx - 1); } /* Get the multiple of PLL */ mult = cg_in(cg, reg); /* Check if this PLL is disabled */ if (mult & PLL_KILL) { pr_debug("%s(): pll %p disabled\n", __func__, reg); return; } if ((cg->info.flags & CG_VER3) || ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) mult = (mult & GENMASK(8, 1)) >> 1; else
static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw); u32 n_mask, k_mask, m_mask, p_mask; struct _ccu_nkmp _nkmp; unsigned long flags; u32 reg; if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV) rate = rate * nkmp->fixed_post_div; _nkmp.min_n = nkmp->n.min ?: 1; _nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width; _nkmp.min_k = nkmp->k.min ?: 1; _nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width; _nkmp.min_m = 1; _nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width; _nkmp.min_p = 1; _nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1); ccu_nkmp_find_best(parent_rate, rate, &_nkmp); n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1, nkmp->n.shift); k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1, nkmp->k.shift); m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1, nkmp->m.shift); p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1, nkmp->p.shift); spin_lock_irqsave(nkmp->common.lock, flags); reg = readl(nkmp->common.base + nkmp->common.reg); reg &= ~(n_mask | k_mask | m_mask | p_mask); reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask; reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask; reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask; reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask; writel(reg, nkmp->common.base + nkmp->common.reg); spin_unlock_irqrestore(nkmp->common.lock, flags); ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock); return 0; }
u32 cpsw_mdio_get_alive(struct mii_dev *bus) { struct cpsw_mdio *mdio = bus->priv; u32 val; val = readl(&mdio->regs->control); return val & GENMASK(15, 0); }
static void mux_set_sel(const struct mux *mux, u32 sel) { u32 mask = GENMASK(mux->mux_width - 1, 0); u32 val = read32(mux->reg); val &= ~(mask << mux->mux_shift); val |= (sel & mask) << mux->mux_shift; write32(mux->reg, val); }
static void imx_ocotp_field_decode(uint32_t field, unsigned *word, unsigned *bit, unsigned *mask) { unsigned width; *word = FIELD_GET(OCOTP_WORD_MASK, field) * 4; *bit = FIELD_GET(OCOTP_BIT_MASK, field); width = FIELD_GET(OCOTP_WIDTH_MASK, field); *mask = GENMASK(width, 0); }
/* * Convert a priority to a preemption level, taking the relevant BPR * into account by zeroing the sub-priority bits. */ static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp) { unsigned int bpr; if (!grp) bpr = __vgic_v3_get_bpr0(vmcr) + 1; else bpr = __vgic_v3_get_bpr1(vmcr); return pri & (GENMASK(7, 0) << bpr); }
static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate, unsigned long *m, unsigned long *n) { struct clk_fractional_divider *fd = to_clk_fd(hw); unsigned long scale; /* * Get rate closer to *parent_rate to guarantee there is no overflow * for m and n. In the result it will be the nearest rate left shifted * by (scale - fd->nwidth) bits. */ scale = fls_long((unsigned long)udiv64(*parent_rate, rate) - 1); if (scale > fd->nwidth) rate <<= scale - fd->nwidth; rational_best_approximation(rate, *parent_rate, GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0), m, n); }
static int creg_gpio_validate_pg(struct device *dev, struct creg_gpio *hcg, int i) { const struct creg_layout *layout = hcg->layout; if (layout->bit_per_gpio[i] < 1 || layout->bit_per_gpio[i] > 8) return -EINVAL; /* Check that on valiue fits it's placeholder */ if (GENMASK(31, layout->bit_per_gpio[i]) & layout->on[i]) return -EINVAL; /* Check that off valiue fits it's placeholder */ if (GENMASK(31, layout->bit_per_gpio[i]) & layout->off[i]) return -EINVAL; if (layout->on[i] == layout->off[i]) return -EINVAL; return 0; }
struct clk_hw *clk_hw_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, u8 clk_divider_flags, spinlock_t *lock) { struct clk_fractional_divider *fd; struct clk_init_data init; struct clk_hw *hw; int ret; fd = kzalloc(sizeof(*fd), GFP_KERNEL); if (!fd) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_fractional_divider_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; fd->reg = reg; fd->mshift = mshift; fd->mwidth = mwidth; fd->mmask = GENMASK(mwidth - 1, 0) << mshift; fd->nshift = nshift; fd->nwidth = nwidth; fd->nmask = GENMASK(nwidth - 1, 0) << nshift; fd->flags = clk_divider_flags; fd->lock = lock; fd->hw.init = &init; hw = &fd->hw; ret = clk_hw_register(dev, hw); if (ret) { kfree(fd); hw = ERR_PTR(ret); } return hw; }
static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk); u32 div; if (socfpgaclk->fixed_div) { div = socfpgaclk->fixed_div; } else if (socfpgaclk->div_reg) { div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; div &= GENMASK(socfpgaclk->width - 1, 0); div += 1; } else {
static u64 aqr107_get_stat(struct phy_device *phydev, int index) { const struct aqr107_hw_stat *stat = aqr107_hw_stats + index; int len_l = min(stat->size, 16); int len_h = stat->size - len_l; u64 ret; int val; val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg); if (val < 0) return U64_MAX; ret = val & GENMASK(len_l - 1, 0); if (len_h) { val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1); if (val < 0) return U64_MAX; ret += (val & GENMASK(len_h - 1, 0)) << 16; } return ret; }
static int init_8960(struct tsens_device *tmdev) { int ret, i; u32 reg_cntl; tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL); if (!tmdev->tm_map) return -ENODEV; /* * The status registers for each sensor are discontiguous * because some SoCs have 5 sensors while others have more * but the control registers stay in the same place, i.e * directly after the first 5 status registers. */ for (i = 0; i < tmdev->num_sensors; i++) { if (i >= 5) tmdev->sensor[i].status = S0_STATUS_ADDR + 40; tmdev->sensor[i].status += i * 4; } reg_cntl = SW_RST; ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl); if (ret) return ret; if (tmdev->num_sensors > 1) { reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18); reg_cntl &= ~SW_RST; ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR, CONFIG_MASK, CONFIG); } else { reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16); reg_cntl &= ~CONFIG_MASK_8660; reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660; } reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT; ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl); if (ret) return ret; reg_cntl |= EN; ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl); if (ret) return ret; return 0; }
static irqreturn_t rockchip_saradc_isr(int irq, void *dev_id) { struct rockchip_saradc *info = dev_id; /* Read value */ info->last_val = readl_relaxed(info->regs + SARADC_DATA); info->last_val &= GENMASK(info->data->num_bits - 1, 0); /* Clear irq & power down adc */ writel_relaxed(0, info->regs + SARADC_CTRL); complete(&info->completion); return IRQ_HANDLED; }
static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs, u32 v, u8 bits) { struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master); xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0)); xspi->data_sz += bits; if (xspi->data_sz >= 16) { xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA, xspi->data >> (xspi->data_sz - 16)); xspi->data_sz -= 16; xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1); xtfpga_spi_wait_busy(xspi); xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0); }
static int ad7298_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { int ret; struct ad7298_state *st = iio_priv(indio_dev); switch (m) { case IIO_CHAN_INFO_RAW: ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; if (chan->address == AD7298_CH_TEMP) ret = ad7298_scan_temp(st, val); else ret = ad7298_scan_direct(st, chan->address); iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; if (chan->address != AD7298_CH_TEMP) *val = ret & GENMASK(chan->scan_type.realbits - 1, 0); return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_VOLTAGE: *val = ad7298_get_ref_voltage(st); *val2 = chan->scan_type.realbits; return IIO_VAL_FRACTIONAL_LOG2; case IIO_TEMP: *val = ad7298_get_ref_voltage(st); *val2 = 10; return IIO_VAL_FRACTIONAL; default: return -EINVAL; } case IIO_CHAN_INFO_OFFSET: *val = 1093 - 2732500 / ad7298_get_ref_voltage(st); return IIO_VAL_INT; } return -EINVAL; }
static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk); u32 div, val; if (socfpgaclk->fixed_div) { div = socfpgaclk->fixed_div; } else { if (socfpgaclk->div_reg) { val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; val &= GENMASK(socfpgaclk->width - 1, 0); parent_rate /= (val + 1); } div = ((readl(socfpgaclk->hw.reg) & 0x1ff) + 1); }
/* * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the * guest. * * @vcpu: the offending vcpu * * Returns: * 1: GICV access successfully performed * 0: Not a GICV access * -1: Illegal GICV access */ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct vgic_dist *vgic = &kvm->arch.vgic; phys_addr_t fault_ipa; void __iomem *addr; int rd; /* Build the full address */ fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); /* If not for GICV, move on */ if (fault_ipa < vgic->vgic_cpu_base || fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE)) return 0; /* Reject anything but a 32bit access */ if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) return -1; /* Not aligned? Don't bother */ if (fault_ipa & 3) return -1; rd = kvm_vcpu_dabt_get_rd(vcpu); addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { u32 data = vcpu_get_reg(vcpu, rd); if (__is_be(vcpu)) { /* guest pre-swabbed data, undo this for writel() */ data = swab32(data); } writel_relaxed(data, addr); } else { u32 data = readl_relaxed(addr); if (__is_be(vcpu)) { /* guest expects swabbed data */ data = swab32(data); } vcpu_set_reg(vcpu, rd, data); } return 1; }
static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, u32 nbuf) { struct sk_buff *skb; struct xgene_enet_raw_desc16 *raw_desc; struct xgene_enet_pdata *pdata; struct net_device *ndev; struct device *dev; dma_addr_t dma_addr; u32 tail = buf_pool->tail; u32 slots = buf_pool->slots - 1; u16 bufdatalen, len; int i; ndev = buf_pool->ndev; dev = ndev_to_dev(buf_pool->ndev); pdata = netdev_priv(ndev); bufdatalen = BUF_LEN_CODE_2K | (SKB_BUFFER_SIZE & GENMASK(11, 0)); len = XGENE_ENET_MAX_MTU; for (i = 0; i < nbuf; i++) { raw_desc = &buf_pool->raw_desc16[tail]; skb = netdev_alloc_skb_ip_align(ndev, len); if (unlikely(!skb)) return -ENOMEM; buf_pool->rx_skb[tail] = skb; dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dma_addr)) { netdev_err(ndev, "DMA mapping error\n"); dev_kfree_skb_any(skb); return -EINVAL; } raw_desc->m1 = cpu_to_le64(SET_VAL(DATAADDR, dma_addr) | SET_VAL(BUFDATALEN, bufdatalen) | SET_BIT(COHERENT)); tail = (tail + 1) & slots; } pdata->ring_ops->wr_cmd(buf_pool, nbuf); buf_pool->tail = tail; return 0; }