static int stm32_usbphyc_pll_init(struct stm32_usbphyc *usbphyc) { struct pll_params pll_params; u32 clk_rate = clk_get_rate(usbphyc->clk); u32 ndiv, frac; u32 usbphyc_pll; if ((clk_rate < PLL_INFF_MIN_RATE_HZ) || (clk_rate > PLL_INFF_MAX_RATE_HZ)) { dev_err(usbphyc->dev, "input clk freq (%dHz) out of range\n", clk_rate); return -EINVAL; } stm32_usbphyc_get_pll_params(clk_rate, &pll_params); ndiv = FIELD_PREP(PLLNDIV, pll_params.ndiv); frac = FIELD_PREP(PLLFRACIN, pll_params.frac); usbphyc_pll = PLLDITHEN1 | PLLDITHEN0 | PLLSTRBYP | ndiv; if (pll_params.frac) usbphyc_pll |= PLLFRACCTL | frac; writel_relaxed(usbphyc_pll, usbphyc->base + STM32_USBPHYC_PLL); dev_dbg(usbphyc->dev, "input clk freq=%dHz, ndiv=%lu, frac=%lu\n", clk_rate, FIELD_GET(PLLNDIV, usbphyc_pll), FIELD_GET(PLLFRACIN, usbphyc_pll)); return 0; }
static void meson_ao_cec_write(struct meson_ao_cec_device *ao_cec, unsigned long address, u8 data, int *res) { unsigned long flags; u32 reg = FIELD_PREP(CEC_RW_ADDR, address) | FIELD_PREP(CEC_RW_WR_DATA, data) | CEC_RW_WRITE_EN; int ret = 0; if (res && *res) return; spin_lock_irqsave(&ao_cec->cec_reg_lock, flags); ret = meson_ao_cec_wait_busy(ao_cec); if (ret) goto write_out; writel_relaxed(reg, ao_cec->base + CEC_RW_REG); write_out: spin_unlock_irqrestore(&ao_cec->cec_reg_lock, flags); if (res) *res = ret; }
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) { struct sk_buff *iter, *last = skb; u32 info, pad; /* Buffer layout: * | 4B | xfer len | pad | 4B | * | TXINFO | pkt/cmd | zero pad to 4B | zero | * * length field of TXINFO should be set to 'xfer len'. */ info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; put_unaligned_le32(info, skb_push(skb, sizeof(info))); /* Add zero pad of 4 - 7 bytes */ pad = round_up(skb->len, 4) + 4 - skb->len; /* First packet of a A-MSDU burst keeps track of the whole burst * length, need to update lenght of it and the last packet. */ skb_walk_frags(skb, iter) { last = iter; if (!iter->next) { skb->data_len += pad; skb->len += pad; break; } }
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count, unsigned int host_num_mems) { struct nfp_flower_priv *priv = app->priv; int err, stats_size; hash_init(priv->mask_table); err = rhashtable_init(&priv->flow_table, &nfp_flower_table_params); if (err) return err; get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed)); /* Init ring buffer and unallocated mask_ids. */ priv->mask_ids.mask_id_free_list.buf = kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL); if (!priv->mask_ids.mask_id_free_list.buf) goto err_free_flow_table; priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1; /* Init timestamps for mask id*/ priv->mask_ids.last_used = kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS, sizeof(*priv->mask_ids.last_used), GFP_KERNEL); if (!priv->mask_ids.last_used) goto err_free_mask_id; /* Init ring buffer and unallocated stats_ids. */ priv->stats_ids.free_list.buf = vmalloc(array_size(NFP_FL_STATS_ELEM_RS, priv->stats_ring_size)); if (!priv->stats_ids.free_list.buf) goto err_free_last_used; priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems); stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) | FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1); priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats), GFP_KERNEL); if (!priv->stats) goto err_free_ring_buf; spin_lock_init(&priv->stats_lock); return 0; err_free_ring_buf: vfree(priv->stats_ids.free_list.buf); err_free_last_used: kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); err_free_flow_table: rhashtable_destroy(&priv->flow_table); return -ENOMEM; }
static int uniphier_pciephy_init(struct phy *phy) { struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy); int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; ret = reset_control_deassert(priv->rst); if (ret) goto out_clk_disable; uniphier_pciephy_set_param(priv, PCL_PHY_R00, RX_EQ_ADJ_EN, RX_EQ_ADJ_EN); uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ, FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL)); uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL, FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL)); usleep_range(1, 10); uniphier_pciephy_deassert(priv); usleep_range(1, 10); return 0; out_clk_disable: clk_disable_unprepare(priv->clk); return ret; }
static void nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame, struct tc_cls_flower_offload *flow, u8 key_type, bool mask_version) { struct flow_dissector_key_vlan *flow_vlan; u16 tmp_tci; /* Populate the metadata frame. */ frame->nfp_flow_key_layer = key_type; frame->mask_id = ~0; if (mask_version) { frame->tci = cpu_to_be16(~0); return; } flow_vlan = skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_VLAN, flow->key); /* Populate the tci field. */ if (!flow_vlan->vlan_id) { tmp_tci = 0; } else { tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, flow_vlan->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, flow_vlan->vlan_id) | NFP_FLOWER_MASK_VLAN_CFI; } frame->tci = cpu_to_be16(tmp_tci); }
/* Switch to another slot if needed */ static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot) { struct cvm_mmc_host *host = slot->host; struct cvm_mmc_slot *old_slot; u64 emm_sample, emm_switch; if (slot->bus_id == host->last_slot) return; if (host->last_slot >= 0 && host->slot[host->last_slot]) { old_slot = host->slot[host->last_slot]; old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host)); old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host)); } writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host)); emm_switch = slot->cached_switch; set_bus_id(&emm_switch, slot->bus_id); do_switch(host, emm_switch); emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) | FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt); writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host)); host->last_slot = slot->bus_id; }
/* Try to clean up failed DMA. */ static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts) { u64 emm_dma; emm_dma = readq(host->base + MIO_EMM_DMA(host)); emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) | FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1); set_bus_id(&emm_dma, get_bus_id(rsp_sts)); writeq(emm_dma, host->base + MIO_EMM_DMA(host)); }
static int __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf, const void *fw_data, int len, u32 dst_addr) { u8 *data = sg_virt(&buf->urb->sg[0]); DECLARE_COMPLETION_ONSTACK(cmpl); __le32 info; u32 val; int err; info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) | FIELD_PREP(MT_MCU_MSG_LEN, len) | MT_MCU_MSG_TYPE_CMD); memcpy(data, &info, sizeof(info)); memcpy(data + sizeof(info), fw_data, len); memset(data + sizeof(info) + len, 0, 4); mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_ADDR, dst_addr); len = roundup(len, 4); mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE, MT_FCE_DMA_LEN, len << 16); buf->len = MT_CMD_HDR_LEN + len + sizeof(info); err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, buf, GFP_KERNEL, mt76u_mcu_complete_urb, &cmpl); if (err < 0) return err; if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { dev_err(dev->mt76.dev, "firmware upload timed out\n"); usb_kill_urb(buf->urb); return -ETIMEDOUT; } if (mt76u_urb_error(buf->urb)) { dev_err(dev->mt76.dev, "firmware upload failed: %d\n", buf->urb->status); return buf->urb->status; } val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); val++; mt76_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); return 0; }
static int stm32_sai_sync_conf_provider(struct stm32_sai_data *sai, int synco) { u32 prev_synco; int ret; /* Enable peripheral clock to allow GCR register access */ ret = clk_prepare_enable(sai->pclk); if (ret) { dev_err(&sai->pdev->dev, "failed to enable clock: %d\n", ret); return ret; } dev_dbg(&sai->pdev->dev, "Set %pOFn%s as synchro provider\n", sai->pdev->dev.of_node, synco == STM_SAI_SYNC_OUT_A ? "A" : "B"); prev_synco = FIELD_GET(SAI_GCR_SYNCOUT_MASK, readl_relaxed(sai->base)); if (prev_synco != STM_SAI_SYNC_OUT_NONE && synco != prev_synco) { dev_err(&sai->pdev->dev, "%pOFn%s already set as sync provider\n", sai->pdev->dev.of_node, prev_synco == STM_SAI_SYNC_OUT_A ? "A" : "B"); clk_disable_unprepare(sai->pclk); return -EINVAL; } writel_relaxed(FIELD_PREP(SAI_GCR_SYNCOUT_MASK, synco), sai->base); clk_disable_unprepare(sai->pclk); return 0; }
static void sdhci_cdns_set_control_reg(struct sdhci_host *host) { struct mmc *mmc = host->mmc; struct sdhci_cdns_plat *plat = dev_get_platdata(mmc->dev); unsigned int clock = mmc->clock; u32 mode, tmp; /* * REVISIT: * The mode should be decided by MMC_TIMING_* like Linux, but * U-Boot does not support timing. Use the clock frequency instead. */ if (clock <= 26000000) { mode = SDHCI_CDNS_HRS06_MODE_SD; /* use this for Legacy */ } else if (clock <= 52000000) { if (mmc->ddr_mode) mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR; else mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR; } else { if (mmc->ddr_mode) mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400; else mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200; } tmp = readl(plat->hrs_addr + SDHCI_CDNS_HRS06); tmp &= ~SDHCI_CDNS_HRS06_MODE; tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode); writel(tmp, plat->hrs_addr + SDHCI_CDNS_HRS06); }
static int mt76x2_efuse_read(struct mt76x2_dev *dev, u16 addr, u8 *data) { u32 val; int i; val = mt76_rr(dev, MT_EFUSE_CTRL); val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE); val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); val |= MT_EFUSE_CTRL_KICK; mt76_wr(dev, MT_EFUSE_CTRL, val); if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) return -ETIMEDOUT; udelay(2); val = mt76_rr(dev, MT_EFUSE_CTRL); if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { memset(data, 0xff, 16); return 0; } for (i = 0; i < 4; i++) { val = mt76_rr(dev, MT_EFUSE_DATA(i)); put_unaligned_le32(val, data + 4 * i); } return 0; }
int sun8i_tcon_top_set_hdmi_src(struct device *dev, int tcon) { struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev); unsigned long flags; u32 val; if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) { dev_err(dev, "Device is not TCON TOP!\n"); return -EINVAL; } if (tcon < 2 || tcon > 3) { dev_err(dev, "TCON index must be 2 or 3!\n"); return -EINVAL; } spin_lock_irqsave(&tcon_top->reg_lock, flags); val = readl(tcon_top->regs + TCON_TOP_GATE_SRC_REG); val &= ~TCON_TOP_HDMI_SRC_MSK; val |= FIELD_PREP(TCON_TOP_HDMI_SRC_MSK, tcon - 1); writel(val, tcon_top->regs + TCON_TOP_GATE_SRC_REG); spin_unlock_irqrestore(&tcon_top->reg_lock, flags); return 0; }
static int mt7615_load_firmware(struct mt7615_dev *dev) { int ret; u32 val; val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE); if (val != FW_STATE_FW_DOWNLOAD) { dev_err(dev->mt76.dev, "Firmware is not ready for download\n"); return -EIO; } ret = mt7615_load_patch(dev); if (ret) return ret; ret = mt7615_load_ram(dev); if (ret) return ret; if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE, FIELD_PREP(MT_TOP_MISC2_FW_STATE, FW_STATE_CR4_RDY), 500)) { dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); return -EIO; } dev_dbg(dev->mt76.dev, "Firmware init done\n"); return 0; }
void mt76x2_mac_set_tx_protection(struct mt76x2_dev *dev, u32 val) { u32 data = 0; if (val != ~0) data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) | MT_PROT_CFG_RTS_THRESH; mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val); mt76_rmw(dev, MT_CCK_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_OFDM_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_MM20_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_MM40_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_GF20_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_GF40_PROT_CFG, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_TX_PROT_CFG6, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_TX_PROT_CFG7, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); mt76_rmw(dev, MT_TX_PROT_CFG8, MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); }
static int stm32_qspi_setup(struct spi_device *spi) { struct spi_controller *ctrl = spi->master; struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl); struct stm32_qspi_flash *flash; u32 cr, presc; if (ctrl->busy) return -EBUSY; if (!spi->max_speed_hz) return -EINVAL; presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1; flash = &qspi->flash[spi->chip_select]; flash->qspi = qspi; flash->cs = spi->chip_select; flash->presc = presc; mutex_lock(&qspi->lock); writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QSPI_LPTR); cr = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_TCEN | CR_SSHIFT | CR_EN; writel_relaxed(cr, qspi->io_base + QSPI_CR); /* set dcr fsize to max address */ writel_relaxed(DCR_FSIZE_MASK, qspi->io_base + QSPI_DCR); mutex_unlock(&qspi->lock); return 0; }
static void mt7601u_init_usb_dma(struct mt7601u_dev *dev) { u32 val; val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) | FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) | MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN; if (dev->in_max_packet == 512) val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN; mt7601u_wr(dev, MT_USB_DMA_CFG, val); val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP; mt7601u_wr(dev, MT_USB_DMA_CFG, val); val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP; mt7601u_wr(dev, MT_USB_DMA_CFG, val); }
static int mt76x2_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, const struct ieee80211_tx_queue_params *params) { struct mt76x2_dev *dev = hw->priv; u8 cw_min = 5, cw_max = 10, qid; u32 val; qid = dev->mt76.q_tx[queue].hw_idx; if (params->cw_min) cw_min = fls(params->cw_min); if (params->cw_max) cw_max = fls(params->cw_max); val = FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop) | FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); mt76_wr(dev, MT_EDCA_CFG_AC(qid), val); val = mt76_rr(dev, MT_WMM_TXOP(qid)); val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(qid)); val |= params->txop << MT_WMM_TXOP_SHIFT(qid); mt76_wr(dev, MT_WMM_TXOP(qid), val); val = mt76_rr(dev, MT_WMM_AIFSN); val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(qid)); val |= params->aifs << MT_WMM_AIFSN_SHIFT(qid); mt76_wr(dev, MT_WMM_AIFSN, val); val = mt76_rr(dev, MT_WMM_CWMIN); val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(qid)); val |= cw_min << MT_WMM_CWMIN_SHIFT(qid); mt76_wr(dev, MT_WMM_CWMIN, val); val = mt76_rr(dev, MT_WMM_CWMAX); val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(qid)); val |= cw_max << MT_WMM_CWMAX_SHIFT(qid); mt76_wr(dev, MT_WMM_CWMAX, val); return 0; }
static int stm32_vrefbuf_set_voltage_sel(struct regulator_dev *rdev, unsigned sel) { struct stm32_vrefbuf *priv = rdev_get_drvdata(rdev); u32 val = readl_relaxed(priv->base + STM32_VREFBUF_CSR); val = (val & ~STM32_VRS) | FIELD_PREP(STM32_VRS, sel); writel_relaxed(val, priv->base + STM32_VREFBUF_CSR); return 0; }
static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor, bool enable) { struct st_lsm6dsx_hw *hw = sensor->hw; u8 data; data = hw->enable_mask ? ST_LSM6DSX_MAX_FIFO_ODR_VAL : 0; return regmap_update_bits(hw->regmap, ST_LSM6DSX_REG_FIFO_MODE_ADDR, ST_LSM6DSX_FIFO_ODR_MASK, FIELD_PREP(ST_LSM6DSX_FIFO_ODR_MASK, data)); }
static int clk_sccg_pll_set_parent(struct clk_hw *hw, u8 index) { struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); u32 val; val = readl(pll->base + PLL_CFG0); val &= ~SSCG_PLL_BYPASS_MASK; val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, pll->setup.bypass); writel(val, pll->base + PLL_CFG0); return clk_sccg_pll_wait_lock(pll); }
static int clk_sccg_pll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_sccg_pll *pll = to_clk_sccg_pll(hw); struct clk_sccg_pll_setup *setup = &pll->setup; u32 val; /* set bypass here too since the parent might be the same */ val = readl(pll->base + PLL_CFG0); val &= ~SSCG_PLL_BYPASS_MASK; val |= FIELD_PREP(SSCG_PLL_BYPASS_MASK, setup->bypass); writel(val, pll->base + PLL_CFG0); val = readl_relaxed(pll->base + PLL_CFG2); val &= ~(PLL_DIVF1_MASK | PLL_DIVF2_MASK); val &= ~(PLL_DIVR1_MASK | PLL_DIVR2_MASK | PLL_DIVQ_MASK); val |= FIELD_PREP(PLL_DIVF1_MASK, setup->divf1); val |= FIELD_PREP(PLL_DIVF2_MASK, setup->divf2); val |= FIELD_PREP(PLL_DIVR1_MASK, setup->divr1); val |= FIELD_PREP(PLL_DIVR2_MASK, setup->divr2); val |= FIELD_PREP(PLL_DIVQ_MASK, setup->divq); writel_relaxed(val, pll->base + PLL_CFG2); return clk_sccg_pll_wait_lock(pll); }
static int __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, bool wait_resp) { struct usb_interface *intf = to_usb_interface(dev->dev); struct usb_device *udev = interface_to_usbdev(intf); struct mt76_usb *usb = &dev->usb; unsigned int pipe; int ret, sent; u8 seq = 0; u32 info; if (test_bit(MT76_REMOVED, &dev->state)) return 0; pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); if (wait_resp) { seq = ++usb->mcu.msg_seq & 0xf; if (!seq) seq = ++usb->mcu.msg_seq & 0xf; } info = FIELD_PREP(MT_MCU_MSG_CMD_SEQ, seq) | FIELD_PREP(MT_MCU_MSG_CMD_TYPE, cmd) | MT_MCU_MSG_TYPE_CMD; ret = mt76x02u_skb_dma_info(skb, CPU_TX_PORT, info); if (ret) return ret; ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500); if (ret) return ret; if (wait_resp) ret = mt76x02u_mcu_wait_resp(dev, seq); consume_skb(skb); return ret; }
static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv, u32 reg, u32 mask, u32 param) { u32 val; /* read previous data */ val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, reg); uniphier_pciephy_testio_write(priv, val); val = readl(priv->base + PCL_PHY_TEST_O); /* update value */ val &= ~FIELD_PREP(TESTI_DAT_MASK, mask); val = FIELD_PREP(TESTI_DAT_MASK, mask & param); val |= FIELD_PREP(TESTI_ADR_MASK, reg); uniphier_pciephy_testio_write(priv, val); uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN); uniphier_pciephy_testio_write(priv, val); /* read current data as dummy */ val = FIELD_PREP(TESTI_DAT_MASK, 1); val |= FIELD_PREP(TESTI_ADR_MASK, reg); uniphier_pciephy_testio_write(priv, val); readl(priv->base + PCL_PHY_TEST_O); }
static void nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, struct nfp_flower_meta_tci *msk, struct tc_cls_flower_offload *flow, u8 key_type) { struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow); u16 tmp_tci; memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); /* Populate the metadata frame. */ ext->nfp_flow_key_layer = key_type; ext->mask_id = ~0; msk->nfp_flow_key_layer = key_type; msk->mask_id = ~0; if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ if (match.key->vlan_id || match.key->vlan_priority) { tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, match.key->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, match.key->vlan_id) | NFP_FLOWER_MASK_VLAN_CFI; ext->tci = cpu_to_be16(tmp_tci); tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, match.mask->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, match.mask->vlan_id) | NFP_FLOWER_MASK_VLAN_CFI; msk->tci = cpu_to_be16(tmp_tci); } } }
static int meson_gxl_read_reg(struct phy_device *phydev, unsigned int bank, unsigned int reg) { int ret; ret = meson_gxl_open_banks(phydev); if (ret) goto out; ret = phy_write(phydev, TSTCNTL, TSTCNTL_READ | FIELD_PREP(TSTCNTL_REG_BANK_SEL, bank) | TSTCNTL_TEST_MODE | FIELD_PREP(TSTCNTL_READ_ADDRESS, reg)); if (ret) goto out; ret = phy_read(phydev, TSTREAD1); out: /* Close the bank access on our way out */ meson_gxl_close_banks(phydev); return ret; }
static int meson_ao_cec_adap_enable(struct cec_adapter *adap, bool enable) { struct meson_ao_cec_device *ao_cec = adap->priv; int ret; meson_ao_cec_irq_setup(ao_cec, false); writel_bits_relaxed(CEC_GEN_CNTL_RESET, CEC_GEN_CNTL_RESET, ao_cec->base + CEC_GEN_CNTL_REG); if (!enable) return 0; /* Enable gated clock (Normal mode). */ writel_bits_relaxed(CEC_GEN_CNTL_CLK_CTRL_MASK, FIELD_PREP(CEC_GEN_CNTL_CLK_CTRL_MASK, CEC_GEN_CNTL_CLK_ENABLE), ao_cec->base + CEC_GEN_CNTL_REG); udelay(100); /* Release Reset */ writel_bits_relaxed(CEC_GEN_CNTL_RESET, 0, ao_cec->base + CEC_GEN_CNTL_REG); /* Clear buffers */ ret = meson_ao_cec_clear(ao_cec); if (ret) return ret; /* CEC arbitration 3/5/7 bit time set. */ ret = meson_ao_cec_arbit_bit_time_set(ao_cec, CEC_SIGNAL_FREE_TIME_RETRY, 0x118); if (ret) return ret; ret = meson_ao_cec_arbit_bit_time_set(ao_cec, CEC_SIGNAL_FREE_TIME_NEW_INITIATOR, 0x000); if (ret) return ret; ret = meson_ao_cec_arbit_bit_time_set(ao_cec, CEC_SIGNAL_FREE_TIME_NEXT_XFER, 0x2aa); if (ret) return ret; meson_ao_cec_irq_setup(ao_cec, true); return 0; }
static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) { struct nfp_flower_priv *priv = app->priv; u32 freed_stats_id, temp_stats_id; struct circ_buf *ring; ring = &priv->stats_ids.free_list; freed_stats_id = priv->stats_ring_size; /* Check for unallocated entries first. */ if (priv->stats_ids.init_unalloc > 0) { if (priv->active_mem_unit == priv->total_mem_units) { priv->stats_ids.init_unalloc--; priv->active_mem_unit = 0; } *stats_context_id = FIELD_PREP(NFP_FL_STAT_ID_STAT, priv->stats_ids.init_unalloc - 1) | FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, priv->active_mem_unit); priv->active_mem_unit++; return 0; } /* Check if buffer is empty. */ if (ring->head == ring->tail) { *stats_context_id = freed_stats_id; return -ENOENT; } memcpy(&temp_stats_id, &ring->buf[ring->tail], NFP_FL_STATS_ELEM_RS); *stats_context_id = temp_stats_id; memcpy(&ring->buf[ring->tail], &freed_stats_id, NFP_FL_STATS_ELEM_RS); ring->tail = (ring->tail + NFP_FL_STATS_ELEM_RS) % (priv->stats_ring_size * NFP_FL_STATS_ELEM_RS); return 0; }
static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_plat *plat, u8 addr, u8 data) { void __iomem *reg = plat->hrs_addr + SDHCI_CDNS_HRS04; u32 tmp; int ret; tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) | FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr); writel(tmp, reg); tmp |= SDHCI_CDNS_HRS04_WR; writel(tmp, reg); ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 10); if (ret) return ret; tmp &= ~SDHCI_CDNS_HRS04_WR; writel(tmp, reg); return 0; }
int sun8i_tcon_top_de_config(struct device *dev, int mixer, int tcon) { struct sun8i_tcon_top *tcon_top = dev_get_drvdata(dev); unsigned long flags; u32 reg; if (!sun8i_tcon_top_node_is_tcon_top(dev->of_node)) { dev_err(dev, "Device is not TCON TOP!\n"); return -EINVAL; } if (mixer > 1) { dev_err(dev, "Mixer index is too high!\n"); return -EINVAL; } if (tcon > 3) { dev_err(dev, "TCON index is too high!\n"); return -EINVAL; } spin_lock_irqsave(&tcon_top->reg_lock, flags); reg = readl(tcon_top->regs + TCON_TOP_PORT_SEL_REG); if (mixer == 0) { reg &= ~TCON_TOP_PORT_DE0_MSK; reg |= FIELD_PREP(TCON_TOP_PORT_DE0_MSK, tcon); } else { reg &= ~TCON_TOP_PORT_DE1_MSK; reg |= FIELD_PREP(TCON_TOP_PORT_DE1_MSK, tcon); } writel(reg, tcon_top->regs + TCON_TOP_PORT_SEL_REG); spin_unlock_irqrestore(&tcon_top->reg_lock, flags); return 0; }