static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes) { u32 tmp = 0; int i, m, n; const u8 *src_byte = src; m = bytes / 4; n = bytes % 4; /* __iowrite32_copy use 32bit size values so divide by 4 */ __iowrite32_copy((void *)dest, src, m); if (n) { for (i = 0; i < n; i++) tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8); __iowrite32_copy((void *)(dest + m * 4), &tmp, 1); } }
static int alpha_pll_set_rate(struct clk *c, unsigned long rate) { struct alpha_pll_clk *pll = to_alpha_pll_clk(c); struct alpha_pll_masks *masks = pll->masks; unsigned long flags, freq_hz; u32 regval, l_val; int vco_val; u64 a_val; freq_hz = round_rate_up(pll, rate, &l_val, &a_val); if (freq_hz != rate) { pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n"); return -EINVAL; } vco_val = find_vco(pll, freq_hz); if (IS_ERR_VALUE(vco_val)) { pr_err("alpha pll: not in a valid vco range\n"); return -EINVAL; } /* * Ensure PLL is off before changing rate. For optimization reasons, * assume no downstream clock is actively using it. No support * for dynamic update at the moment. */ spin_lock_irqsave(&c->lock, flags); if (c->count) alpha_pll_disable(c); a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH); writel_relaxed(l_val, L_REG(pll)); __iowrite32_copy(A_REG(pll), &a_val, 2); if (masks->vco_mask) { regval = readl_relaxed(VCO_REG(pll)); regval &= ~(masks->vco_mask << masks->vco_shift); regval |= vco_val << masks->vco_shift; writel_relaxed(regval, VCO_REG(pll)); } regval = readl_relaxed(ALPHA_EN_REG(pll)); regval |= masks->alpha_en_mask; writel_relaxed(regval, ALPHA_EN_REG(pll)); if (c->count) alpha_pll_enable(c); spin_unlock_irqrestore(&c->lock, flags); return 0; }
/** * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units * @to: destination, in MMIO space (must be 64-bit aligned) * @from: source (must be 64-bit aligned) * @count: number of 64-bit quantities to copy * * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ void __attribute__((weak)) __iowrite64_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; const u64 *src = from; const u64 *end = src + count; while (src < end) __raw_writeq(*src++, dst++); #else __iowrite32_copy(to, from, count * 2); #endif }
static int dyna_alpha_pll_set_rate(struct clk *c, unsigned long rate) { struct alpha_pll_clk *pll = to_alpha_pll_clk(c); unsigned long freq_hz, flags; u32 l_val, vco_val; u64 a_val; int ret; freq_hz = round_rate_up(pll, rate, &l_val, &a_val); if (freq_hz != rate) { pr_err("alpha_pll: Call clk_set_rate with rounded rates!\n"); return -EINVAL; } vco_val = find_vco(pll, freq_hz); /* * Dynamic pll update will not support switching frequencies across * vco ranges. In those cases fall back to normal alpha set rate. */ if (pll->current_vco_val != vco_val) { ret = alpha_pll_set_rate(c, rate); if (!ret) pll->current_vco_val = vco_val; else return ret; return 0; } spin_lock_irqsave(&c->lock, flags); a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH); writel_relaxed(l_val, L_REG(pll)); __iowrite32_copy(A_REG(pll), &a_val, 2); /* Ensure that the write above goes through before proceeding. */ mb(); if (c->count) dyna_alpha_pll_dynamic_update(pll); spin_unlock_irqrestore(&c->lock, flags); return 0; }
static void setup_alpha_pll_values(u64 a_val, u32 l_val, u32 vco_val, struct alpha_pll_clk *pll) { struct alpha_pll_masks *masks = pll->masks; u32 regval; a_val = a_val << (ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH); writel_relaxed(l_val, L_REG(pll)); __iowrite32_copy(A_REG(pll), &a_val, 2); if (vco_val != UINT_MAX) { regval = readl_relaxed(VCO_REG(pll)); regval &= ~(masks->vco_mask << masks->vco_shift); regval |= vco_val << masks->vco_shift; writel_relaxed(regval, VCO_REG(pll)); } regval = readl_relaxed(ALPHA_EN_REG(pll)); regval |= masks->alpha_en_mask; writel_relaxed(regval, ALPHA_EN_REG(pll)); }
/** * ipath_diagpkt_write - write an IB packet * @fp: the diag data device file pointer * @data: ipath_diag_pkt structure saying where to get the packet * @count: size of data to write * @off: unused by this code */ static ssize_t ipath_diagpkt_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { u32 __iomem *piobuf; u32 plen, clen, pbufn; struct ipath_diag_pkt dp; u32 *tmpbuf = NULL; struct ipath_devdata *dd; ssize_t ret = 0; u64 val; if (count < sizeof(dp)) { ret = -EINVAL; goto bail; } if (copy_from_user(&dp, data, sizeof(dp))) { ret = -EFAULT; goto bail; } /* send count must be an exact number of dwords */ if (dp.len & 3) { ret = -EINVAL; goto bail; } clen = dp.len >> 2; dd = ipath_lookup(dp.unit); if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || !dd->ipath_kregbase) { ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", dp.unit); ret = -ENODEV; goto bail; } if (ipath_diag_inuse && !diag_set_link && !(dd->ipath_flags & IPATH_LINKACTIVE)) { diag_set_link = 1; ipath_cdbg(VERBOSE, "Trying to set to set link active for " "diag pkt\n"); ipath_set_linkstate(dd, IPATH_IB_LINKARM); ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); } if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); ret = -ENODEV; goto bail; } val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) { ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", dd->ipath_unit, (unsigned long long) val); ret = -EINVAL; goto bail; } /* need total length before first word written */ /* +1 word is for the qword padding */ plen = sizeof(u32) + dp.len; if ((plen + 4) > dd->ipath_ibmaxlen) { ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", plen - 4, dd->ipath_ibmaxlen); ret = -EINVAL; goto bail; /* before writing pbc */ } tmpbuf = vmalloc(plen); if (!tmpbuf) { dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " "failing\n"); ret = -ENOMEM; goto bail; } if (copy_from_user(tmpbuf, (const void __user *) (unsigned long) dp.data, dp.len)) { ret = -EFAULT; goto bail; } piobuf = ipath_getpiobuf(dd, &pbufn); if (!piobuf) { ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", dd->ipath_unit); ret = -EBUSY; goto bail; } plen >>= 2; /* in dwords */ if (ipath_debug & __IPATH_PKTDBG) ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", dd->ipath_unit, plen - 1, pbufn); /* we have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order */ writeq(plen, piobuf); ipath_flush_wc(); /* copy all by the trigger word, then flush, so it's written * to chip before trigger word, then write trigger word, then * flush again, so packet is sent. */ __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); ipath_flush_wc(); __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); ipath_flush_wc(); ret = sizeof(dp); bail: vfree(tmpbuf); return ret; }
/** * ipath_diagpkt_write - write an IB packet * @fp: the diag data device file pointer * @data: ipath_diag_pkt structure saying where to get the packet * @count: size of data to write * @off: unused by this code */ static ssize_t ipath_diagpkt_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { u32 __iomem *piobuf; u32 plen, clen, pbufn; struct ipath_diag_pkt odp; struct ipath_diag_xpkt dp; u32 *tmpbuf = NULL; struct ipath_devdata *dd; ssize_t ret = 0; u64 val; u32 l_state, lt_state; /* LinkState, LinkTrainingState */ if (count < sizeof(odp)) { ret = -EINVAL; goto bail; } if (count == sizeof(dp)) { if (copy_from_user(&dp, data, sizeof(dp))) { ret = -EFAULT; goto bail; } } else if (copy_from_user(&odp, data, sizeof(odp))) { ret = -EFAULT; goto bail; } /* * Due to padding/alignment issues (lessened with new struct) * the old and new structs are the same length. We need to * disambiguate them, which we can do because odp.len has never * been less than the total of LRH+BTH+DETH so far, while * dp.unit (same offset) unit is unlikely to get that high. * Similarly, dp.data, the pointer to user at the same offset * as odp.unit, is almost certainly at least one (512byte)page * "above" NULL. The if-block below can be omitted if compatibility * between a new driver and older diagnostic code is unimportant. * compatibility the other direction (new diags, old driver) is * handled in the diagnostic code, with a warning. */ if (dp.unit >= 20 && dp.data < 512) { /* very probable version mismatch. Fix it up */ memcpy(&odp, &dp, sizeof(odp)); /* We got a legacy dp, copy elements to dp */ dp.unit = odp.unit; dp.data = odp.data; dp.len = odp.len; dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */ } /* send count must be an exact number of dwords */ if (dp.len & 3) { ret = -EINVAL; goto bail; } clen = dp.len >> 2; dd = ipath_lookup(dp.unit); if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || !dd->ipath_kregbase) { ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", dp.unit); ret = -ENODEV; goto bail; } if (ipath_diag_inuse && !diag_set_link && !(dd->ipath_flags & IPATH_LINKACTIVE)) { diag_set_link = 1; ipath_cdbg(VERBOSE, "Trying to set to set link active for " "diag pkt\n"); ipath_set_linkstate(dd, IPATH_IB_LINKARM); ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); } if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); ret = -ENODEV; goto bail; } /* * Want to skip check for l_state if using custom PBC, * because we might be trying to force an SM packet out. * first-cut, skip _all_ state checking in that case. */ val = ipath_ib_state(dd, dd->ipath_lastibcstat); lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat); l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat); if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP || (val != dd->ib_init && val != dd->ib_arm && val != dd->ib_active))) { ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", dd->ipath_unit, (unsigned long long) val); ret = -EINVAL; goto bail; } /* need total length before first word written */ /* +1 word is for the qword padding */ plen = sizeof(u32) + dp.len; if ((plen + 4) > dd->ipath_ibmaxlen) { ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", plen - 4, dd->ipath_ibmaxlen); ret = -EINVAL; goto bail; /* before writing pbc */ } tmpbuf = vmalloc(plen); if (!tmpbuf) { dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " "failing\n"); ret = -ENOMEM; goto bail; } if (copy_from_user(tmpbuf, (const void __user *) (unsigned long) dp.data, dp.len)) { ret = -EFAULT; goto bail; } plen >>= 2; /* in dwords */ piobuf = ipath_getpiobuf(dd, plen, &pbufn); if (!piobuf) { ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", dd->ipath_unit); ret = -EBUSY; goto bail; } /* disarm it just to be extra sure */ ipath_disarm_piobufs(dd, pbufn, 1); if (ipath_debug & __IPATH_PKTDBG) ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", dd->ipath_unit, plen - 1, pbufn); if (dp.pbc_wd == 0) dp.pbc_wd = plen; writeq(dp.pbc_wd, piobuf); /* * Copy all by the trigger word, then flush, so it's written * to chip before trigger word, then write trigger word, then * flush again, so packet is sent. */ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { ipath_flush_wc(); __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); ipath_flush_wc(); __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); } else __iowrite32_copy(piobuf + 2, tmpbuf, clen); ipath_flush_wc(); ret = sizeof(dp); bail: vfree(tmpbuf); return ret; }
static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes) { /* __iowrite32_copy use 32bit size values so divide by 4 */ __iowrite32_copy((void *)dest, src, bytes/4); }
int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, struct bnxt_qplib_rcfw *rcfw, int msix_vector, int cp_bar_reg_off, int virt_fn, int (*aeq_handler)(struct bnxt_qplib_rcfw *, struct creq_func_event *)) { resource_size_t res_base; struct cmdq_init init; u16 bmap_size; int rc; /* General */ rcfw->seq_num = 0; rcfw->flags = FIRMWARE_FIRST_FLAG; bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * sizeof(unsigned long)); rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); if (!rcfw->cmdq_bitmap) return -ENOMEM; rcfw->bmap_size = bmap_size; /* CMDQ */ rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION; res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg); if (!res_base) return -ENOMEM; rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base + RCFW_COMM_BASE_OFFSET, RCFW_COMM_SIZE); if (!rcfw->cmdq_bar_reg_iomem) { dev_err(&rcfw->pdev->dev, "QPLIB: CMDQ BAR region %d mapping failed", rcfw->cmdq_bar_reg); return -ENOMEM; } rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET : RCFW_PF_COMM_PROD_OFFSET; rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; /* CREQ */ rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); if (!res_base) dev_err(&rcfw->pdev->dev, "QPLIB: CREQ BAR region %d resc start is 0!", rcfw->creq_bar_reg); rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off, 4); if (!rcfw->creq_bar_reg_iomem) { dev_err(&rcfw->pdev->dev, "QPLIB: CREQ BAR region %d mapping failed", rcfw->creq_bar_reg); return -ENOMEM; } rcfw->creq_qp_event_processed = 0; rcfw->creq_func_event_processed = 0; rcfw->vector = msix_vector; if (aeq_handler) rcfw->aeq_handler = aeq_handler; tasklet_init(&rcfw->worker, bnxt_qplib_service_creq, (unsigned long)rcfw); rcfw->requested = false; rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0, "bnxt_qplib_creq", rcfw); if (rc) { dev_err(&rcfw->pdev->dev, "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc); bnxt_qplib_disable_rcfw_channel(rcfw); return rc; } rcfw->requested = true; init_waitqueue_head(&rcfw->waitq); CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements); init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]); init.cmdq_size_cmdq_lvl = cpu_to_le16( ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) & CMDQ_INIT_CMDQ_SIZE_MASK) | ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) & CMDQ_INIT_CMDQ_LVL_MASK)); init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id); /* Write to the Bono mailbox register */ __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); return 0; }
/** * ipath_diagpkt_write - write an IB packet * @fp: the diag data device file pointer * @data: ipath_diag_pkt structure saying where to get the packet * @count: size of data to write * @off: unused by this code */ static ssize_t ipath_diagpkt_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { u32 __iomem *piobuf; u32 plen, pbufn, maxlen_reserve; struct ipath_diag_pkt odp; struct ipath_diag_xpkt dp; u32 *tmpbuf = NULL; struct ipath_devdata *dd; ssize_t ret = 0; u64 val; u32 l_state, lt_state; /* LinkState, LinkTrainingState */ if (count == sizeof(dp)) { if (copy_from_user(&dp, data, sizeof(dp))) { ret = -EFAULT; goto bail; } } else if (count == sizeof(odp)) { if (copy_from_user(&odp, data, sizeof(odp))) { ret = -EFAULT; goto bail; } } else { ret = -EINVAL; goto bail; } /* send count must be an exact number of dwords */ if (dp.len & 3) { ret = -EINVAL; goto bail; } plen = dp.len >> 2; dd = ipath_lookup(dp.unit); if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || !dd->ipath_kregbase) { ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", dp.unit); ret = -ENODEV; goto bail; } if (ipath_diag_inuse && !diag_set_link && !(dd->ipath_flags & IPATH_LINKACTIVE)) { diag_set_link = 1; ipath_cdbg(VERBOSE, "Trying to set to set link active for " "diag pkt\n"); ipath_set_linkstate(dd, IPATH_IB_LINKARM); ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); } if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); ret = -ENODEV; goto bail; } /* * Want to skip check for l_state if using custom PBC, * because we might be trying to force an SM packet out. * first-cut, skip _all_ state checking in that case. */ val = ipath_ib_state(dd, dd->ipath_lastibcstat); lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat); l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat); if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP || (val != dd->ib_init && val != dd->ib_arm && val != dd->ib_active))) { ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", dd->ipath_unit, (unsigned long long) val); ret = -EINVAL; goto bail; } /* * need total length before first word written, plus 2 Dwords. One Dword * is for padding so we get the full user data when not aligned on * a word boundary. The other Dword is to make sure we have room for the * ICRC which gets tacked on later. */ maxlen_reserve = 2 * sizeof(u32); if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) { ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", dp.len, dd->ipath_ibmaxlen); ret = -EINVAL; goto bail; } plen = sizeof(u32) + dp.len; tmpbuf = vmalloc(plen); if (!tmpbuf) { dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " "failing\n"); ret = -ENOMEM; goto bail; } if (copy_from_user(tmpbuf, (const void __user *) (unsigned long) dp.data, dp.len)) { ret = -EFAULT; goto bail; } plen >>= 2; /* in dwords */ piobuf = ipath_getpiobuf(dd, plen, &pbufn); if (!piobuf) { ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", dd->ipath_unit); ret = -EBUSY; goto bail; } /* disarm it just to be extra sure */ ipath_disarm_piobufs(dd, pbufn, 1); if (ipath_debug & __IPATH_PKTDBG) ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", dd->ipath_unit, plen - 1, pbufn); if (dp.pbc_wd == 0) dp.pbc_wd = plen; writeq(dp.pbc_wd, piobuf); /* * Copy all by the trigger word, then flush, so it's written * to chip before trigger word, then write trigger word, then * flush again, so packet is sent. */ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { ipath_flush_wc(); __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1); ipath_flush_wc(); __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1); } else __iowrite32_copy(piobuf + 2, tmpbuf, plen); ipath_flush_wc(); ret = sizeof(dp); bail: vfree(tmpbuf); return ret; }
static void write_data(struct mt76_dev *dev, u32 offset, __le32 *data, int len) { __iowrite32_copy(dev->regs + offset, data, len / 4); }