static int __init i7300_idle_ioat_selftest(u8 *ctl, struct ioat_dma_descriptor *desc, unsigned long desc_phys) { u64 chan_sts; memset(desc, 0, 2048); memset((u8 *) desc + 2048, 0xab, 1024); desc[0].size = 1024; desc[0].ctl = 0; desc[0].src_addr = desc_phys + 2048; desc[0].dst_addr = desc_phys + 1024; desc[0].next = 0; writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET); writeb(IOAT_CHANCMD_START, ioat_chanbase + IOAT1_CHANCMD_OFFSET); udelay(1000); chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & IOAT_CHANSTS_STATUS; if (chan_sts != IOAT_CHANSTS_DONE) { writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET); return -1; } if (*(u32 *) ((u8 *) desc + 3068) != 0xabababab || *(u32 *) ((u8 *) desc + 2044) != 0xabababab) { dprintk("Data values src 0x%x, dest 0x%x, memset 0x%x\n", *(u32 *) ((u8 *) desc + 2048), *(u32 *) ((u8 *) desc + 1024), *(u32 *) ((u8 *) desc + 3072)); return -1; } return 0; }
static int regmap_mmio_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { struct regmap_mmio_context *ctx = context; u32 offset; BUG_ON(reg_size != 4); offset = be32_to_cpup(reg); while (val_size) { switch (ctx->val_bytes) { case 1: *(u8 *)val = readb(ctx->regs + offset); break; case 2: *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset)); break; case 4: *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset)); break; #ifdef CONFIG_64BIT case 8: *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset)); break; #endif default: /* Should be caught by regmap_mmio_check_config */ BUG(); } val_size -= ctx->val_bytes; val += ctx->val_bytes; offset += ctx->val_bytes; } return 0; }
/* Stop I/O AT memory copy */ static void i7300_idle_ioat_stop(void) { int i; u64 sts; for (i = 0; i < MAX_STOP_RETRIES; i++) { writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET); udelay(10); sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & IOAT_CHANSTS_DMA_TRANSFER_STATUS; if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) break; } if (i == MAX_STOP_RETRIES) { dprintk("failed to stop I/O AT after %d retries\n", MAX_STOP_RETRIES); } }
/** * ismt_gen_reg_dump() - dump the iSMT General Registers * @priv: iSMT private data */ static void ismt_gen_reg_dump(struct ismt_priv *priv) { struct device *dev = &priv->pci_dev->dev; dev_dbg(dev, "Dump of the iSMT General Registers\n"); dev_dbg(dev, " GCTRL.... : (0x%p)=0x%X\n", priv->smba + ISMT_GR_GCTRL, readl(priv->smba + ISMT_GR_GCTRL)); dev_dbg(dev, " SMTICL... : (0x%p)=0x%016llX\n", priv->smba + ISMT_GR_SMTICL, (long long unsigned int)readq(priv->smba + ISMT_GR_SMTICL)); dev_dbg(dev, " ERRINTMSK : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRINTMSK, readl(priv->smba + ISMT_GR_ERRINTMSK)); dev_dbg(dev, " ERRAERMSK : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRAERMSK, readl(priv->smba + ISMT_GR_ERRAERMSK)); dev_dbg(dev, " ERRSTS... : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRSTS, readl(priv->smba + ISMT_GR_ERRSTS)); dev_dbg(dev, " ERRINFO.. : (0x%p)=0x%X\n", priv->smba + ISMT_GR_ERRINFO, readl(priv->smba + ISMT_GR_ERRINFO)); }
/** * ismt_mstr_reg_dump() - dump the iSMT Master Registers * @priv: iSMT private data */ static void ismt_mstr_reg_dump(struct ismt_priv *priv) { struct device *dev = &priv->pci_dev->dev; dev_dbg(dev, "Dump of the iSMT Master Registers\n"); dev_dbg(dev, " MDBA..... : (0x%p)=0x%016llX\n", priv->smba + ISMT_MSTR_MDBA, (long long unsigned int)readq(priv->smba + ISMT_MSTR_MDBA)); dev_dbg(dev, " MCTRL.... : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_MCTRL, readl(priv->smba + ISMT_MSTR_MCTRL)); dev_dbg(dev, " MSTS..... : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_MSTS, readl(priv->smba + ISMT_MSTR_MSTS)); dev_dbg(dev, " MDS...... : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_MDS, readl(priv->smba + ISMT_MSTR_MDS)); dev_dbg(dev, " RPOLICY.. : (0x%p)=0x%X\n", priv->smba + ISMT_MSTR_RPOLICY, readl(priv->smba + ISMT_MSTR_RPOLICY)); dev_dbg(dev, " SPGT..... : (0x%p)=0x%X\n", priv->smba + ISMT_SPGT, readl(priv->smba + ISMT_SPGT)); }
static u64 cn23xx_vf_msix_interrupt_handler(void *dev) { struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; struct octeon_device *oct = ioq_vector->oct_dev; struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; u64 pkts_sent; u64 ret = 0; dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct); pkts_sent = readq(droq->pkts_sent_reg); /* If our device has interrupted, then proceed. Also check * for all f's if interrupt was triggered on an error * and the PCI read fails. */ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) return ret; /* Write count reg in sli_pkt_cnts to clear these int. */ if ((pkts_sent & CN23XX_INTR_PO_INT) || (pkts_sent & CN23XX_INTR_PI_INT)) { if (pkts_sent & CN23XX_INTR_PO_INT) ret |= MSIX_PO_INT; } if (pkts_sent & CN23XX_INTR_PI_INT) /* We will clear the count when we update the read_index. */ ret |= MSIX_PI_INT; if (pkts_sent & CN23XX_INTR_MBOX_INT) { cn23xx_handle_vf_mbox_intr(ioq_vector); ret |= MSIX_MBOX_INT; } return ret; }
/** * octeon_mbox_write: * @oct: Pointer Octeon Device * @mbox_cmd: Cmd to send to mailbox. * * Populates the queue specific mbox structure * with cmd information. * Write the cmd to mbox register */ int octeon_mbox_write(struct octeon_device *oct, struct octeon_mbox_cmd *mbox_cmd) { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); if ((mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) && !(mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED)) { spin_unlock_irqrestore(&mbox->lock, flags); return OCTEON_MBOX_STATUS_FAILED; } if ((mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) && !(mbox->state & OCTEON_MBOX_STATE_IDLE)) { spin_unlock_irqrestore(&mbox->lock, flags); return OCTEON_MBOX_STATUS_BUSY; } if (mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) { memcpy(&mbox->mbox_resp, mbox_cmd, sizeof(struct octeon_mbox_cmd)); mbox->state = OCTEON_MBOX_STATE_RESPONSE_PENDING; } spin_unlock_irqrestore(&mbox->lock, flags); count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; } } if (ret == OCTEON_MBOX_STATUS_SUCCESS) { writeq(mbox_cmd->msg.u64, mbox->mbox_write_reg); for (i = 0; i < (u32)(mbox_cmd->msg.s.len - 1); i++) { count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; } } if (ret == OCTEON_MBOX_STATUS_SUCCESS) writeq(mbox_cmd->data[i], mbox->mbox_write_reg); else break; } } spin_lock_irqsave(&mbox->lock, flags); if (mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) { mbox->state = OCTEON_MBOX_STATE_IDLE; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); } else { if ((!mbox_cmd->msg.s.resp_needed) || (ret == OCTEON_MBOX_STATUS_FAILED)) { mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING; if (!(mbox->state & (OCTEON_MBOX_STATE_REQUEST_RECEIVING | OCTEON_MBOX_STATE_REQUEST_RECEIVED))) mbox->state = OCTEON_MBOX_STATE_IDLE; } } spin_unlock_irqrestore(&mbox->lock, flags); return ret; }
static inline u64 read64(const volatile void __iomem *addr) { return readq(addr); }
static cycle_t read_sn2(struct clocksource *cs) { return (cycle_t)readq(RTC_COUNTER_ADDR); }
static uint64_t qvirtio_mmio_config_readq(QVirtioDevice *d, uint64_t addr) { QVirtioMMIODevice *dev = (QVirtioMMIODevice *)d; return readq(dev->addr + addr); }
static u64 tmio_sd_readq(struct tmio_sd_priv *priv, unsigned int reg) { return readq(priv->regbase + (reg << 1)); }
/** * genwqe_health_thread() - Health checking thread * * This thread is only started for the PF of the card. * * This thread monitors the health of the card. A critical situation * is when we read registers which contain -1 (IO_ILLEGAL_VALUE). In * this case we need to be recovered from outside. Writing to * registers will very likely not work either. * * This thread must only exit if kthread_should_stop() becomes true. * * Condition for the health-thread to trigger: * a) when a kthread_stop() request comes in or * b) a critical GFIR occured * * Informational GFIRs are checked and potentially printed in * health_check_interval seconds. */ static int genwqe_health_thread(void *data) { int rc, should_stop = 0; struct genwqe_dev *cd = data; struct pci_dev *pci_dev = cd->pci_dev; u64 gfir, gfir_masked, slu_unitcfg, app_unitcfg; health_thread_begin: while (!kthread_should_stop()) { rc = wait_event_interruptible_timeout(cd->health_waitq, (genwqe_health_check_cond(cd, &gfir) || (should_stop = kthread_should_stop())), genwqe_health_check_interval * HZ); if (should_stop) break; if (gfir == IO_ILLEGAL_VALUE) { dev_err(&pci_dev->dev, "[%s] GFIR=%016llx\n", __func__, gfir); goto fatal_error; } slu_unitcfg = __genwqe_readq(cd, IO_SLU_UNITCFG); if (slu_unitcfg == IO_ILLEGAL_VALUE) { dev_err(&pci_dev->dev, "[%s] SLU_UNITCFG=%016llx\n", __func__, slu_unitcfg); goto fatal_error; } app_unitcfg = __genwqe_readq(cd, IO_APP_UNITCFG); if (app_unitcfg == IO_ILLEGAL_VALUE) { dev_err(&pci_dev->dev, "[%s] APP_UNITCFG=%016llx\n", __func__, app_unitcfg); goto fatal_error; } gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR); if (gfir == IO_ILLEGAL_VALUE) { dev_err(&pci_dev->dev, "[%s] %s: GFIR=%016llx\n", __func__, (gfir & GFIR_ERR_TRIGGER) ? "err" : "info", gfir); goto fatal_error; } gfir_masked = genwqe_fir_checking(cd); if (gfir_masked == IO_ILLEGAL_VALUE) goto fatal_error; /* * GFIR ErrorTrigger bits set => reset the card! * Never do this for old/manufacturing images! */ if ((gfir_masked) && !cd->skip_recovery && genwqe_recovery_on_fatal_gfir_required(cd)) { cd->card_state = GENWQE_CARD_FATAL_ERROR; rc = genwqe_recover_card(cd, 0); if (rc < 0) { /* FIXME Card is unusable and needs unbind! */ goto fatal_error; } } if (cd->card_state == GENWQE_CARD_RELOAD_BITSTREAM) { /* Userspace requested card bitstream reload */ rc = genwqe_reload_bistream(cd); if (rc) goto fatal_error; } cd->last_gfir = gfir; cond_resched(); } return 0; fatal_error: if (cd->use_platform_recovery) { /* * Since we use raw accessors, EEH errors won't be detected * by the platform until we do a non-raw MMIO or config space * read */ readq(cd->mmio + IO_SLC_CFGREG_GFIR); /* We do nothing if the card is going over PCI recovery */ if (pci_channel_offline(pci_dev)) return -EIO; /* * If it's supported by the platform, we try a fundamental reset * to recover from a fatal error. Otherwise, we continue to wait * for an external recovery procedure to take care of it. */ rc = genwqe_platform_recovery(cd); if (!rc) goto health_thread_begin; } dev_err(&pci_dev->dev, "[%s] card unusable. Please trigger unbind!\n", __func__); /* Bring down logical devices to inform user space via udev remove. */ cd->card_state = GENWQE_CARD_FATAL_ERROR; genwqe_stop(cd); /* genwqe_bus_reset failed(). Now wait for genwqe_remove(). */ while (!kthread_should_stop()) cond_resched(); return -EIO; }
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (status & STAT_BUSY) { pr_err("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) writeq(vdev->args[i], &devcmd->args[i]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = (int)readq(&devcmd->args[0]); if (err == ERR_EINVAL && cmd == CMD_CAPABILITY) return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
static cycle_t read_sn2(void) { return (cycle_t)readq(RTC_COUNTER_ADDR); }
/** * octeon_mbox_read: * @oct: Pointer mailbox * * Reads the 8-bytes of data from the mbox register * Writes back the acknowldgement inidcating completion of read */ int octeon_mbox_read(struct octeon_mbox *mbox) { union octeon_mbox_message msg; int ret = 0; spin_lock(&mbox->lock); msg.u64 = readq(mbox->mbox_read_reg); if ((msg.u64 == OCTEON_PFVFACK) || (msg.u64 == OCTEON_PFVFSIG)) { spin_unlock(&mbox->lock); return 0; } if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64; mbox->mbox_req.recv_len++; } else { if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] = msg.u64; mbox->mbox_resp.recv_len++; } else { if ((mbox->state & OCTEON_MBOX_STATE_IDLE) && (msg.s.type == OCTEON_MBOX_REQUEST)) { mbox->state &= ~OCTEON_MBOX_STATE_IDLE; mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVING; mbox->mbox_req.msg.u64 = msg.u64; mbox->mbox_req.q_no = mbox->q_no; mbox->mbox_req.recv_len = 1; } else { if ((mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING) && (msg.s.type == OCTEON_MBOX_RESPONSE)) { mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING; mbox->state |= OCTEON_MBOX_STATE_RESPONSE_RECEIVING ; mbox->mbox_resp.msg.u64 = msg.u64; mbox->mbox_resp.q_no = mbox->q_no; mbox->mbox_resp.recv_len = 1; } else { writeq(OCTEON_PFVFERR, mbox->mbox_read_reg); mbox->state |= OCTEON_MBOX_STATE_ERROR; spin_unlock(&mbox->lock); return 1; } } } } if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { if (mbox->mbox_req.recv_len < mbox->mbox_req.msg.s.len) { ret = 0; } else { mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING; mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVED; ret = 1; } } else { if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { if (mbox->mbox_resp.recv_len < mbox->mbox_resp.msg.s.len) { ret = 0; } else { mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_RECEIVING; mbox->state |= OCTEON_MBOX_STATE_RESPONSE_RECEIVED; ret = 1; } } else { WARN_ON(1); } } writeq(OCTEON_PFVFACK, mbox->mbox_read_reg); spin_unlock(&mbox->lock); return ret; }
int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; u32 status; int dev_cmd_err[] = { /* convert from fw's version of error.h to host's version */ 0, /* ERR_SUCCESS */ EINVAL, /* ERR_EINVAL */ EFAULT, /* ERR_EFAULT */ EPERM, /* ERR_EPERM */ EBUSY, /* ERR_EBUSY */ }; int err; u64 *a0 = &vdev->args[0]; u64 *a1 = &vdev->args[1]; status = ioread32(&devcmd->status); if (status & STAT_BUSY) { printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { writeq(*a0, &devcmd->args[0]); writeq(*a1, &devcmd->args[1]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = dev_cmd_err[(int)readq(&devcmd->args[0])]; printk(KERN_ERR "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); *a0 = readq(&devcmd->args[0]); *a1 = readq(&devcmd->args[1]); } return 0; } } printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { #if defined(CONFIG_MIPS) || defined(MGMT_VNIC) return 0; #else struct vnic_devcmd __iomem *devcmd = vdev->devcmd; unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (status & STAT_BUSY) { #ifndef __WINDOWS__ pr_err("%s: Busy devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); #else pr_err("Busy devcmd %d\n", _CMD_N(cmd)); #endif return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) writeq(vdev->args[i], &devcmd->args[i]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = -(int)readq(&devcmd->args[0]); if (cmd != CMD_CAPABILITY) #ifndef __WINDOWS__ pr_err("%s: Devcmd %d failed " "with error code %d\n", pci_name(vdev->pdev), _CMD_N(cmd), err); #else pr_err("Devcmd %d failed " "with error code %d\n", _CMD_N(cmd), err); #endif return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } #ifndef __WINDOWS__ pr_err("%s: Timedout devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); #else pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); #endif return -ETIMEDOUT; #endif }
/* Read or Write arbitrary length sequency starting at read_addr and put it into * user space at dest_addr. if 'reading' is set to 1, doing the read. If 0, doing * the write. */ static ssize_t aclpci_rw_large (void *dev_addr, void __user* user_addr, ssize_t len, char *buffer, int reading) { size_t bytes_left = len; size_t i, num_missed; u64 *ibuffer = (u64*)buffer; char *cbuffer; size_t offset, num_to_read; size_t chunk = BUF_SIZE; u64 startj, ej; u64 sj = 0, acc_readj = 0, acc_transfj = 0; startj = get_jiffies_64(); /* Reading upto BUF_SIZE values, one int at a time, and then transfer * the buffer at once to user space. Repeat as necessary. */ while (bytes_left > 0) { if (bytes_left < BUF_SIZE) { chunk = bytes_left; } else { chunk = BUF_SIZE; } if (!reading) { sj = get_jiffies_64(); if (copy_from_user (ibuffer, user_addr, chunk)) { return -EFAULT; } acc_transfj += get_jiffies_64() - sj; } /* Read one u64 at a time until fill the buffer. Then copy the whole * buffer at once to user space. */ sj = get_jiffies_64(); num_to_read = chunk / sizeof(u64); for (i = 0; i < num_to_read; i++) { if (reading) { ibuffer[i] = readq ( ((u64*)dev_addr) + i); } else { writeq ( ibuffer[i], ((u64*)dev_addr) + i ); } } /* If length is not a multiple of sizeof(u64), will miss last few bytes. * In that case, read it one byte at a time. This can only happen on * last iteration of the while() loop. */ offset = num_to_read * sizeof(u64); num_missed = chunk - offset; cbuffer = (char*)(ibuffer + num_to_read); for (i = 0; i < num_missed; i++) { if (reading) { cbuffer[i] = readb ( (u8*)(dev_addr) + offset + i ); } else { writeb ( cbuffer[i], (u8*)(dev_addr) + offset + i ); } } acc_readj += get_jiffies_64() - sj; if (reading) { sj = get_jiffies_64(); if (copy_to_user (user_addr, ibuffer, chunk)) { return -EFAULT; } acc_transfj += get_jiffies_64() - sj; } dev_addr += chunk; user_addr += chunk; bytes_left -= chunk; } ej = get_jiffies_64(); ACL_VERBOSE_DEBUG (KERN_DEBUG "Spent %u msec %sing %lu bytes", jiffies_to_msecs(ej - startj), reading ? "read" : "writ", len); ACL_VERBOSE_DEBUG (KERN_DEBUG " Dev access %u msec. User space transfer %u msec", jiffies_to_msecs(acc_readj), jiffies_to_msecs(acc_transfj)); return 0; }
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) { struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL; struct nfp_net_ring_set rx = { .n_rings = nn->num_rx_rings, .mtu = nn->netdev->mtu, .dcnt = rxd_cnt, }; struct nfp_net_ring_set tx = { .n_rings = nn->num_tx_rings, .dcnt = txd_cnt, }; if (nn->rxd_cnt != rxd_cnt) reconfig_rx = ℞ if (nn->txd_cnt != txd_cnt) reconfig_tx = &tx; return nfp_net_ring_reconfig(nn, &nn->xdp_prog, reconfig_rx, reconfig_tx); } static int nfp_net_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct nfp_net *nn = netdev_priv(netdev); u32 rxd_cnt, txd_cnt; /* We don't have separate queues/rings for small/large frames. */ if (ring->rx_mini_pending || ring->rx_jumbo_pending) return -EINVAL; /* Round up to supported values */ rxd_cnt = roundup_pow_of_two(ring->rx_pending); txd_cnt = roundup_pow_of_two(ring->tx_pending); if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS || txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS) return -EINVAL; if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt) return 0; nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n", nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt); return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } static void nfp_net_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct nfp_net *nn = netdev_priv(netdev); u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < nn->num_r_vecs; i++) { sprintf(p, "rvec_%u_rx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rvec_%u_tx_busy", i); p += ETH_GSTRING_LEN; } strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "tx_gather", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; strncpy(p, "tx_lso", ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; for (i = 0; i < nn->num_tx_rings; i++) { sprintf(p, "txq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "txq_%u_bytes", i); p += ETH_GSTRING_LEN; } for (i = 0; i < nn->num_rx_rings; i++) { sprintf(p, "rxq_%u_pkts", i); p += ETH_GSTRING_LEN; sprintf(p, "rxq_%u_bytes", i); p += ETH_GSTRING_LEN; } break; } } static void nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); struct rtnl_link_stats64 *netdev_stats; struct rtnl_link_stats64 temp = {}; u64 tmp[NN_ET_RVEC_GATHER_STATS]; u8 __iomem *io_p; int i, j, k; u8 *p; netdev_stats = dev_get_stats(netdev, &temp); for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { switch (nfp_net_et_stats[i].type) { case NETDEV_ET_STATS: p = (char *)netdev_stats + nfp_net_et_stats[i].off; data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? *(u64 *)p : *(u32 *)p; break; case NFP_NET_DEV_ET_STATS: io_p = nn->ctrl_bar + nfp_net_et_stats[i].off; data[i] = readq(io_p); break; } } for (j = 0; j < nn->num_r_vecs; j++) { unsigned int start; do { start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); data[i++] = nn->r_vecs[j].rx_pkts; tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; tmp[2] = nn->r_vecs[j].hw_csum_rx_error; } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); do { start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); data[i++] = nn->r_vecs[j].tx_pkts; data[i++] = nn->r_vecs[j].tx_busy; tmp[3] = nn->r_vecs[j].hw_csum_tx; tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; tmp[5] = nn->r_vecs[j].tx_gather; tmp[6] = nn->r_vecs[j].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) gathered_stats[k] += tmp[k]; } for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) data[i++] = gathered_stats[j]; for (j = 0; j < nn->num_tx_rings; j++) { io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j); data[i++] = readq(io_p); io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; data[i++] = readq(io_p); } for (j = 0; j < nn->num_rx_rings; j++) { io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j); data[i++] = readq(io_p); io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; data[i++] = readq(io_p); } } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) { struct nfp_net *nn = netdev_priv(netdev); switch (sset) { case ETH_SS_STATS: return NN_ET_STATS_LEN; default: return -EOPNOTSUPP; } } /* RX network flow classification (RSS, filters, etc) */ static u32 ethtool_flow_to_nfp_flag(u32 flow_type) { static const u32 xlate_ethtool_to_nfp[IPV6_FLOW + 1] = { [TCP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_TCP, [TCP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_TCP, [UDP_V4_FLOW] = NFP_NET_CFG_RSS_IPV4_UDP, [UDP_V6_FLOW] = NFP_NET_CFG_RSS_IPV6_UDP, [IPV4_FLOW] = NFP_NET_CFG_RSS_IPV4, [IPV6_FLOW] = NFP_NET_CFG_RSS_IPV6, }; if (flow_type >= ARRAY_SIZE(xlate_ethtool_to_nfp)) return 0; return xlate_ethtool_to_nfp[flow_type]; }
static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx, unsigned int reg) { return readq(ctx->regs + reg); }
static struct _vop_vdev *vop_dc_to_vdev(struct mic_device_ctrl *dc) { return (struct _vop_vdev *)(unsigned long)readq(&dc->vdev); }
int __init init_cyclone_clock(void) { u64* reg; u64 base; /* saved cyclone base address */ u64 offset; /* offset from pageaddr to cyclone_timer register */ int i; u32* volatile cyclone_timer; /* Cyclone MPMC0 register */ if (!use_cyclone) return -ENODEV; printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n"); /* find base address */ offset = (CYCLONE_CBAR_ADDR); reg = (u64*)ioremap_nocache(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n"); use_cyclone = 0; return -ENODEV; } base = readq(reg); if(!base){ printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n"); use_cyclone = 0; return -ENODEV; } iounmap(reg); /* setup PMCC */ offset = (base + CYCLONE_PMCC_OFFSET); reg = (u64*)ioremap_nocache(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n"); use_cyclone = 0; return -ENODEV; } writel(0x00000001,reg); iounmap(reg); /* setup MPCS */ offset = (base + CYCLONE_MPCS_OFFSET); reg = (u64*)ioremap_nocache(offset, sizeof(u64)); if(!reg){ printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n"); use_cyclone = 0; return -ENODEV; } writel(0x00000001,reg); iounmap(reg); /* map in cyclone_timer */ offset = (base + CYCLONE_MPMC_OFFSET); cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32)); if(!cyclone_timer){ printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); use_cyclone = 0; return -ENODEV; } /*quick test to make sure its ticking*/ for(i=0; i<3; i++){ u32 old = readl(cyclone_timer); int stall = 100; while(stall--) barrier(); if(readl(cyclone_timer) == old){ printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n"); iounmap(cyclone_timer); cyclone_timer = 0; use_cyclone = 0; return -ENODEV; } } /* initialize last tick */ cyclone_interpolator.addr = cyclone_timer; register_time_interpolator(&cyclone_interpolator); return 0; }
void zlist_uncompress(OSTask_t *task) { int z; static int cnt=1; static int lastframe=-1; static int firsttime=1; readdata(&zdata,task->m_data_ptr,sizeof(ZData)); if((zdata.m_pic&0x7f000000)!=0 || (zdata.m_q1 &0x7f000000)!=0 || (zdata.m_q2 &0x7f000000)!=0 || (zdata.m_q3 &0x7f000000)!=0 || !zdata.m_pic || !zdata.m_q1 || !zdata.m_q2 || !zdata.m_q3) { print("zelda: JPEG-DCT (buffer %08X, quant %08X) INVALID, ignored.\n", zdata.m_pic,zdata.m_q1,zdata.m_q2,zdata.m_q3); return; } readq(q1,zdata.m_q1); readq(q2,zdata.m_q2); readq(q3,zdata.m_q3); if(firsttime) { firsttime=0; disasm_dumpucode("rspzlist.log", task->m_ucode ,task->ucode_size, task->m_ucode_data,task->ucode_data_size,0x80); logd("RSP microcode/data dumped to RSPZLIST.LOG\n"); } if(st.frames!=lastframe) { print("zelda: JPEG-DCT (buffer %08X, quant %08X %08X %08X)\n", zdata.m_pic,zdata.m_q1,zdata.m_q2,zdata.m_q3); lastframe=st.frames; } for(z=0;z<4;z++) { readshort ((int *)in ,zdata.m_pic+z*768,8*8*2*6); uncompress(); writeshort((int *)out,zdata.m_pic+z*768,16*16*2); } /* if(cnt==1) { FILE *f1; f1=fopen("bgin.dat","wb"); for(z=0;z<3072*2;z+=4) { d=mem_read32(zdata.m_pic+z); d=FLIP32(d); fwrite(&d,1,4,f1); } fclose(f1); } d1=(cnt)+(cnt<<8)+(cnt<<16)+(cnt<<24); d2=d1^-1; for(z=0;z<4;z++) { for(y=0;y<16;y++) for(x=0;x<16;x++) { if(x==0 || y==0 || x==15 || y==15) { mem_write16(zdata.m_pic+z*768+x*2+y*2*16,d1); } } } */ cnt++; }
static unsigned long bgpio_read64(void __iomem *reg) { return readq(reg); }
static int __init ibm_rtl_init(void) { unsigned long ebda_addr, ebda_size; unsigned int ebda_kb; int ret = -ENODEV, i; if (force) pr_warn("module loaded by force\n"); /* first ensure that we are running on IBM HW */ else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) return -ENODEV; /* Get the address for the Extended BIOS Data Area */ ebda_addr = get_bios_ebda(); if (!ebda_addr) { RTL_DEBUG("no BIOS EBDA found\n"); return -ENODEV; } ebda_map = ioremap(ebda_addr, 4); if (!ebda_map) return -ENOMEM; /* First word in the EDBA is the Size in KB */ ebda_kb = ioread16(ebda_map); RTL_DEBUG("EBDA is %d kB\n", ebda_kb); if (ebda_kb == 0) goto out; iounmap(ebda_map); ebda_size = ebda_kb*1024; /* Remap the whole table */ ebda_map = ioremap(ebda_addr, ebda_size); if (!ebda_map) return -ENOMEM; /* search for the _RTL_ signature at the start of the table */ for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) { struct ibm_rtl_table __iomem * tmp; tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i); if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) { phys_addr_t addr; unsigned int plen; RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp); rtl_table = tmp; /* The address, value, width and offset are platform * dependent and found in the ibm_rtl_table */ rtl_cmd_width = ioread8(&rtl_table->cmd_granularity); rtl_cmd_type = ioread8(&rtl_table->cmd_address_type); RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", rtl_cmd_width, rtl_cmd_type); addr = ioread32(&rtl_table->cmd_port_address); RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr); plen = rtl_cmd_width/sizeof(char); rtl_cmd_addr = rtl_port_map(addr, plen); RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr); if (!rtl_cmd_addr) { ret = -ENOMEM; break; } ret = rtl_setup_sysfs(); break; } } out: if (ret) { iounmap(ebda_map); rtl_port_unmap(rtl_cmd_addr); } return ret; }
/* Clear All Port Errors. */ int port_err_clear(struct ifpga_port_hw *port, u64 err) { struct feature_port_header *port_hdr; struct feature_port_error *port_err; struct feature_port_err_key mask; struct feature_port_first_err_key first; struct feature_port_status status; int ret = 0; port_err = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_ERROR); port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_HEADER); /* * Clear All Port Errors * * - Check for AP6 State * - Halt Port by keeping Port in reset * - Set PORT Error mask to all 1 to mask errors * - Clear all errors * - Set Port mask to all 0 to enable errors * - All errors start capturing new errors * - Enable Port by pulling the port out of reset */ /* If device is still in AP6 state, can not clear any error.*/ status.csr = readq(&port_hdr->status); if (status.power_state == PORT_POWER_STATE_AP6) { dev_err(dev, "Could not clear errors, device in AP6 state.\n"); return -EBUSY; } /* Halt Port by keeping Port in reset */ ret = __fpga_port_disable(port); if (ret) return ret; /* Mask all errors */ port_err_mask(port, true); /* Clear errors if err input matches with current port errors.*/ mask.csr = readq(&port_err->port_error); if (mask.csr == err) { writeq(mask.csr, &port_err->port_error); first.csr = readq(&port_err->port_first_error); writeq(first.csr, &port_err->port_first_error); } else { ret = -EBUSY; } /* Clear mask */ port_err_mask(port, false); /* Enable the Port by clear the reset */ __fpga_port_enable(port); return ret; }
static int __init ibm_rtl_init(void) { unsigned long ebda_addr, ebda_size; unsigned int ebda_kb; int ret = -ENODEV, i; if (force) pr_warn("module loaded by force\n"); else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) return -ENODEV; ebda_addr = get_bios_ebda(); if (!ebda_addr) { RTL_DEBUG("no BIOS EBDA found\n"); return -ENODEV; } ebda_map = ioremap(ebda_addr, 4); if (!ebda_map) return -ENOMEM; ebda_kb = ioread16(ebda_map); RTL_DEBUG("EBDA is %d kB\n", ebda_kb); if (ebda_kb == 0) goto out; iounmap(ebda_map); ebda_size = ebda_kb*1024; ebda_map = ioremap(ebda_addr, ebda_size); if (!ebda_map) return -ENOMEM; for (i = 0 ; i < ebda_size/sizeof(unsigned int); i++) { struct ibm_rtl_table __iomem * tmp; tmp = (struct ibm_rtl_table __iomem *) (ebda_map+i); if ((readq(&tmp->signature) & RTL_MASK) == RTL_SIGNATURE) { phys_addr_t addr; unsigned int plen; RTL_DEBUG("found RTL_SIGNATURE at %p\n", tmp); rtl_table = tmp; rtl_cmd_width = ioread8(&rtl_table->cmd_granularity); rtl_cmd_type = ioread8(&rtl_table->cmd_address_type); RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", rtl_cmd_width, rtl_cmd_type); addr = ioread32(&rtl_table->cmd_port_address); RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr); plen = rtl_cmd_width/sizeof(char); rtl_cmd_addr = rtl_port_map(addr, plen); RTL_DEBUG("rtl_cmd_addr = %p\n", rtl_cmd_addr); if (!rtl_cmd_addr) { ret = -ENOMEM; break; } ret = rtl_setup_sysfs(); break; } } out: if (ret) { iounmap(ebda_map); rtl_port_unmap(rtl_cmd_addr); } return ret; }