static ssize_t cpld_reconfigure(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kp2000_device *pcard = dev_get_drvdata(dev); long wr_val; int rv; rv = kstrtol(buf, 0, &wr_val); if (rv < 0) return rv; if (wr_val > 7) return -EINVAL; wr_val = wr_val << 8; wr_val |= 0x1; // Set the "Configure Go" bit writeq(wr_val, pcard->sysinfo_regs_base + REG_CPLD_CONFIG); return count; }
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot) { struct cvm_mmc_host *host = slot->host; u64 emm_switch, wdog; emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host)); emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 | MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2); set_bus_id(&emm_switch, slot->bus_id); wdog = readq(slot->host->base + MIO_EMM_WDOG(host)); do_switch(slot->host, emm_switch); slot->cached_switch = emm_switch; msleep(20); writeq(wdog, slot->host->base + MIO_EMM_WDOG(host)); }
static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data) { u64 fifo_cfg; int count; /* Check if there are any pending requests left */ fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg); if (count) dev_err(host->dev, "%u requests still pending\n", count); data->bytes_xfered = data->blocks * data->blksz; data->error = 0; /* Clear and disable FIFO */ writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host)); dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data)); return 1; }
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id) { struct cvm_mmc_host *host = dev_id; struct mmc_request *req; unsigned long flags = 0; u64 emm_int, rsp_sts; bool host_done; if (host->need_irq_handler_lock) spin_lock_irqsave(&host->irq_handler_lock, flags); else __acquire(&host->irq_handler_lock); /* Clear interrupt bits (write 1 clears ). */ emm_int = readq(host->base + MIO_EMM_INT(host)); writeq(emm_int, host->base + MIO_EMM_INT(host)); if (emm_int & MIO_EMM_INT_SWITCH_ERR) check_switch_errors(host); req = host->current_req; if (!req) goto out; rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host)); /* * dma_val set means DMA is still in progress. Don't touch * the request and wait for the interrupt indicating that * the DMA is finished. */ if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active) goto out; if (!host->dma_active && req->data && (emm_int & MIO_EMM_INT_BUF_DONE)) { unsigned int type = (rsp_sts >> 7) & 3; if (type == 1) do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF); else if (type == 2) do_write(req); }
static int mlxbf_gpio_resume(struct platform_device *pdev) { struct mlxbf_gpio_state *gs = platform_get_drvdata(pdev); writeq(gs->csave_regs.scratchpad, gs->base + MLXBF_GPIO_SCRATCHPAD); writeq(gs->csave_regs.pad_control[0], gs->base + MLXBF_GPIO_PAD_CONTROL_FIRST_WORD); writeq(gs->csave_regs.pad_control[1], gs->base + MLXBF_GPIO_PAD_CONTROL_1_FIRST_WORD); writeq(gs->csave_regs.pad_control[2], gs->base + MLXBF_GPIO_PAD_CONTROL_2_FIRST_WORD); writeq(gs->csave_regs.pad_control[3], gs->base + MLXBF_GPIO_PAD_CONTROL_3_FIRST_WORD); writeq(gs->csave_regs.pin_dir_i, gs->base + MLXBF_GPIO_PIN_DIR_I); writeq(gs->csave_regs.pin_dir_o, gs->base + MLXBF_GPIO_PIN_DIR_O); return 0; }
void UmlActivityPartition::html(Q3CString pfix, unsigned int rank, unsigned int level) { define(); chapter("Activity partition", pfix, rank, "activitypartition", level); Q3CString s = description(); if (!s.isEmpty()) { fw.write("<p>"); writeq(s); fw.write("<br /></p>"); } if (isDimension()) fw.write((isExternal()) ? "<p>is dimension, is external</p>\n" : "<p>is dimension</p>\n"); else if (isExternal()) fw.write("<p>is external</p>\n"); if (represents() != 0) { fw.write("<p>represents "); represents()->write(); fw.write("</p>"); } write_dependencies(); UmlDiagram * d = associatedDiagram(); if (d != 0) { fw.write("<p>Diagram : "); d->write(); fw.write("</p>\n"); } write_properties(); write_children(pfix, rank, level); unload(FALSE, FALSE); }
acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width) { void __iomem *virt_addr; unsigned int size = width / 8; bool unmap = false; rcu_read_lock(); virt_addr = acpi_map_vaddr_lookup(phys_addr, size); if (!virt_addr) { rcu_read_unlock(); virt_addr = acpi_os_ioremap(phys_addr, size); if (!virt_addr) return AE_BAD_ADDRESS; unmap = true; } switch (width) { case 8: writeb(value, virt_addr); break; case 16: writew(value, virt_addr); break; case 32: writel(value, virt_addr); break; case 64: writeq(value, virt_addr); break; default: BUG(); } if (unmap) iounmap(virt_addr); else rcu_read_unlock(); return AE_OK; }
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = wq->ring.desc_count; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &wq->ctrl->ring_base); iowrite32(count, &wq->ctrl->ring_size); iowrite32(fetch_index, &wq->ctrl->fetch_index); iowrite32(posted_index, &wq->ctrl->posted_index); iowrite32(cq_index, &wq->ctrl->cq_index); iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); wq->head_idx = fetch_index; wq->tail_idx = wq->head_idx; }
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &wq->ctrl->ring_base); iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); iowrite32(fetch_index, &wq->ctrl->fetch_index); iowrite32(posted_index, &wq->ctrl->posted_index); iowrite32(cq_index, &wq->ctrl->cq_index); iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); wq->to_use = wq->to_clean = &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES]; }
int octeon_mbox_cancel(struct octeon_device *oct, int q_no) { struct octeon_mbox *mbox = oct->mbox[q_no]; struct octeon_mbox_cmd *mbox_cmd; unsigned long flags = 0; spin_lock_irqsave(&mbox->lock, flags); mbox_cmd = &mbox->mbox_resp; if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) { spin_unlock_irqrestore(&mbox->lock, flags); return 1; } mbox->state = OCTEON_MBOX_STATE_IDLE; memset(mbox_cmd, 0, sizeof(*mbox_cmd)); writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); spin_unlock_irqrestore(&mbox->lock, flags); return 0; }
void ReaderQueue::insertQueL(Xtray *xt, Xtray::QueId off) { Xdeque &q = writeq(); if (q.size() == off) { // the nicely growing queue q.push_back(xt); lastId_++; } else { if (q.size() < off) { q.resize(off+1); lastId_ = prevId_ + off + 1; wrhole_ = true; // this created a hole in the middle } q[off] = xt; } if (off == 0) { // the front of the queue became readable wrReady_ = true; // before notification! qev_->signal(); } }
QVRingIndirectDesc *qvring_indirect_desc_setup(QVirtioDevice *d, QGuestAllocator *alloc, uint16_t elem) { int i; QVRingIndirectDesc *indirect = g_malloc(sizeof(*indirect)); indirect->index = 0; indirect->elem = elem; indirect->desc = guest_alloc(alloc, sizeof(QVRingDesc)*elem); for (i = 0; i < elem - 1; ++i) { /* indirect->desc[i].addr */ writeq(indirect->desc + (16 * i), 0); /* indirect->desc[i].flags */ writew(indirect->desc + (16 * i) + 12, QVRING_DESC_F_NEXT); /* indirect->desc[i].next */ writew(indirect->desc + (16 * i) + 14, i + 1); } return indirect; }
static int cn23xx_setup_vf_mbox(struct octeon_device *oct) { struct octeon_mbox *mbox = NULL; mbox = vmalloc(sizeof(*mbox)); if (!mbox) return 1; memset(mbox, 0, sizeof(struct octeon_mbox)); spin_lock_init(&mbox->lock); mbox->oct_dev = oct; mbox->q_no = 0; mbox->state = OCTEON_MBOX_STATE_IDLE; /* VF mbox interrupt reg */ mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0); /* VF reads from SIG0 reg */ mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0); /* VF writes into SIG1 reg */ mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1); INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, cn23xx_vf_mbox_thread); mbox->mbox_poll_wk.ctxptr = mbox; oct->mbox[0] = mbox; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); return 0; }
void qvring_indirect_desc_add(QVRingIndirectDesc *indirect, uint64_t data, uint32_t len, bool write) { uint16_t flags; g_assert_cmpint(indirect->index, <, indirect->elem); flags = readw(indirect->desc + (16 * indirect->index) + 12); if (write) { flags |= QVRING_DESC_F_WRITE; } /* indirect->desc[indirect->index].addr */ writeq(indirect->desc + (16 * indirect->index), data); /* indirect->desc[indirect->index].len */ writel(indirect->desc + (16 * indirect->index) + 8, len); /* indirect->desc[indirect->index].flags */ writew(indirect->desc + (16 * indirect->index) + 12, flags); indirect->index++; }
/* Write a small number of bytes taken from user space */ ssize_t aclpci_write_small (void *write_addr, void __user* src_addr, ssize_t len) { ssize_t copy_res = 0; switch (len) { case 1: { u8 d; copy_res = copy_from_user ( &d, src_addr, sizeof(d) ); writeb ( d, write_addr ); break; } case 2: { u16 d; copy_res = copy_from_user ( &d, src_addr, sizeof(d) ); writew ( d, write_addr ); break; } case 4: { u32 d; copy_res = copy_from_user ( &d, src_addr, sizeof(d) ); writel ( d, write_addr ); break; } case 8: { u64 d; copy_res = copy_from_user ( &d, src_addr, sizeof(d) ); writeq ( d, write_addr ); break; } default: break; } if (copy_res) { return -EFAULT; } else { return 0; } }
static int regmap_mmio_gather_write(void *context, const void *reg, size_t reg_size, const void *val, size_t val_size) { struct regmap_mmio_context *ctx = context; u32 offset; BUG_ON(reg_size != 4); offset = be32_to_cpup(reg); while (val_size) { switch (ctx->val_bytes) { case 1: writeb(*(u8 *)val, ctx->regs + offset); break; case 2: writew(be16_to_cpup(val), ctx->regs + offset); break; case 4: writel(be32_to_cpup(val), ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: writeq(be64_to_cpup(val), ctx->regs + offset); break; #endif default: /* Should be caught by regmap_mmio_check_config */ BUG(); } val_size -= ctx->val_bytes; val += ctx->val_bytes; offset += ctx->val_bytes; } return 0; }
void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, unsigned int fetch_index, unsigned int posted_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { u64 paddr; unsigned int count = rq->ring.desc_count; paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET; writeq(paddr, &rq->ctrl->ring_base); iowrite32(count, &rq->ctrl->ring_size); iowrite32(cq_index, &rq->ctrl->cq_index); iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); iowrite32(0, &rq->ctrl->dropped_packet_count); iowrite32(0, &rq->ctrl->error_status); iowrite32(fetch_index, &rq->ctrl->fetch_index); iowrite32(posted_index, &rq->ctrl->posted_index); rq->to_use = rq->to_clean = &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; }
uint32_t qvirtqueue_add(QVirtQueue *vq, uint64_t data, uint32_t len, bool write, bool next) { uint16_t flags = 0; vq->num_free--; if (write) { flags |= QVRING_DESC_F_WRITE; } if (next) { flags |= QVRING_DESC_F_NEXT; } /* vq->desc[vq->free_head].addr */ writeq(vq->desc + (16 * vq->free_head), data); /* vq->desc[vq->free_head].len */ writel(vq->desc + (16 * vq->free_head) + 8, len); /* vq->desc[vq->free_head].flags */ writew(vq->desc + (16 * vq->free_head) + 12, flags); return vq->free_head++; /* Return and increase, in this order */ }
/** * octeon_mbox_write: * @oct: Pointer Octeon Device * @mbox_cmd: Cmd to send to mailbox. * * Populates the queue specific mbox structure * with cmd information. * Write the cmd to mbox register */ int octeon_mbox_write(struct octeon_device *oct, struct octeon_mbox_cmd *mbox_cmd) { struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; long timeout = LIO_MBOX_WRITE_WAIT_TIME; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); if ((mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) && !(mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED)) { spin_unlock_irqrestore(&mbox->lock, flags); return OCTEON_MBOX_STATUS_FAILED; } if ((mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) && !(mbox->state & OCTEON_MBOX_STATE_IDLE)) { spin_unlock_irqrestore(&mbox->lock, flags); return OCTEON_MBOX_STATUS_BUSY; } if (mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) { memcpy(&mbox->mbox_resp, mbox_cmd, sizeof(struct octeon_mbox_cmd)); mbox->state = OCTEON_MBOX_STATE_RESPONSE_PENDING; } spin_unlock_irqrestore(&mbox->lock, flags); count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; } } if (ret == OCTEON_MBOX_STATUS_SUCCESS) { writeq(mbox_cmd->msg.u64, mbox->mbox_write_reg); for (i = 0; i < (u32)(mbox_cmd->msg.s.len - 1); i++) { count = 0; while (readq(mbox->mbox_write_reg) != OCTEON_PFVFACK) { schedule_timeout_uninterruptible(timeout); if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { ret = OCTEON_MBOX_STATUS_FAILED; break; } } if (ret == OCTEON_MBOX_STATUS_SUCCESS) writeq(mbox_cmd->data[i], mbox->mbox_write_reg); else break; } } spin_lock_irqsave(&mbox->lock, flags); if (mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) { mbox->state = OCTEON_MBOX_STATE_IDLE; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); } else { if ((!mbox_cmd->msg.s.resp_needed) || (ret == OCTEON_MBOX_STATUS_FAILED)) { mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING; if (!(mbox->state & (OCTEON_MBOX_STATE_REQUEST_RECEIVING | OCTEON_MBOX_STATE_REQUEST_RECEIVED))) mbox->state = OCTEON_MBOX_STATE_IDLE; } } spin_unlock_irqrestore(&mbox->lock, flags); return ret; }
static void bgpio_write64(void __iomem *reg, unsigned long data) { writeq(data, reg); }
static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) { writeq(value, chan->xdev->regs + reg); }
int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; u32 status; int dev_cmd_err[] = { /* convert from fw's version of error.h to host's version */ 0, /* ERR_SUCCESS */ EINVAL, /* ERR_EINVAL */ EFAULT, /* ERR_EFAULT */ EPERM, /* ERR_EPERM */ EBUSY, /* ERR_EBUSY */ }; int err; u64 *a0 = &vdev->args[0]; u64 *a1 = &vdev->args[1]; status = ioread32(&devcmd->status); if (status & STAT_BUSY) { printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { writeq(*a0, &devcmd->args[0]); writeq(*a1, &devcmd->args[1]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = dev_cmd_err[(int)readq(&devcmd->args[0])]; printk(KERN_ERR "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); *a0 = readq(&devcmd->args[0]); *a1 = readq(&devcmd->args[1]); } return 0; } } printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
/* Clear All Port Errors. */ int port_err_clear(struct ifpga_port_hw *port, u64 err) { struct feature_port_header *port_hdr; struct feature_port_error *port_err; struct feature_port_err_key mask; struct feature_port_first_err_key first; struct feature_port_status status; int ret = 0; port_err = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_ERROR); port_hdr = get_port_feature_ioaddr_by_index(port, PORT_FEATURE_ID_HEADER); /* * Clear All Port Errors * * - Check for AP6 State * - Halt Port by keeping Port in reset * - Set PORT Error mask to all 1 to mask errors * - Clear all errors * - Set Port mask to all 0 to enable errors * - All errors start capturing new errors * - Enable Port by pulling the port out of reset */ /* If device is still in AP6 state, can not clear any error.*/ status.csr = readq(&port_hdr->status); if (status.power_state == PORT_POWER_STATE_AP6) { dev_err(dev, "Could not clear errors, device in AP6 state.\n"); return -EBUSY; } /* Halt Port by keeping Port in reset */ ret = __fpga_port_disable(port); if (ret) return ret; /* Mask all errors */ port_err_mask(port, true); /* Clear errors if err input matches with current port errors.*/ mask.csr = readq(&port_err->port_error); if (mask.csr == err) { writeq(mask.csr, &port_err->port_error); first.csr = readq(&port_err->port_first_error); writeq(first.csr, &port_err->port_first_error); } else { ret = -EBUSY; } /* Clear mask */ port_err_mask(port, false); /* Enable the Port by clear the reset */ __fpga_port_enable(port); return ret; }
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (status & STAT_BUSY) { pr_err("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) writeq(vdev->args[i], &devcmd->args[i]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = (int)readq(&devcmd->args[0]); if (err == ERR_EINVAL && cmd == CMD_CAPABILITY) return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
void UmlState::html(Q3CString pfix, unsigned int rank, unsigned int level) { define(); chapter((parent()->kind() == aClassView) ? "StateMachine" : "State", pfix, rank, "state", level); Q3CString s = description(); if (!s.isEmpty()) { fw.write("<p>"); writeq(description()); fw.write("<br /></p>"); } UmlState * ref = reference(); if (ref != 0) { fw.write("<p>References "); ref->write(); fw.write("</p>"); } else { if (isActive()) fw.write("<p>Active state</p>\n"); UmlOperation * beh = specification(); if (beh != 0) { fw.write("<p>Implements "); beh->write(); fw.write("</p>"); } Q3CString scpp, sjava; s = entryBehavior(); scpp = cppEntryBehavior(); sjava = javaEntryBehavior(); if (!s.isEmpty() || !scpp.isEmpty() || !sjava.isEmpty()) { fw.write("<p>Entry Behavior :</p><ul>"); if (!s.isEmpty()) { fw.write("<li>OCL : <pre>\n"); writeq(s); fw.write("</pre></li>"); } if (!scpp.isEmpty()) { fw.write("<li>C++ : <pre>\n"); writeq(scpp); fw.write("</pre></li>"); } if (!sjava.isEmpty()) { fw.write("<li>Java : <pre>\n"); writeq(sjava); fw.write("</pre></li>"); } fw.write("</ul>"); } s = exitBehavior(); scpp = cppExitBehavior(); sjava = javaExitBehavior(); if (!s.isEmpty() || !scpp.isEmpty() || !sjava.isEmpty()) { fw.write("<p>Exit Behavior :</p><ul>"); if (!s.isEmpty()) { fw.write("<li>OCL : <pre>\n"); writeq(s); fw.write("</pre></li>"); } if (!scpp.isEmpty()) { fw.write("<li>C++ : <pre>\n"); writeq(scpp); fw.write("</pre></li>"); } if (!sjava.isEmpty()) { fw.write("<li>Java : <pre>\n"); writeq(sjava); fw.write("</pre></li>"); } fw.write("</ul>"); } s = doActivity(); scpp = cppDoActivity(); sjava = javaDoActivity(); if (!s.isEmpty() || !scpp.isEmpty() || !sjava.isEmpty()) { fw.write("<p>Do activity :</p><ul>"); if (!s.isEmpty()) { fw.write("<li>OCL : <pre>\n"); writeq(s); fw.write("</pre></li>"); } if (!scpp.isEmpty()) { fw.write("<li>C++ : <pre>\n"); writeq(scpp); fw.write("</pre></li>"); } if (!sjava.isEmpty()) { fw.write("<li>Java : <pre>\n"); writeq(sjava); fw.write("</pre></li>"); } fw.write("</ul>"); } } UmlStateDiagram * d = associatedDiagram(); if (d != 0) { fw.write("<p>Diagram : "); d->write(); fw.write("</p>"); } write_properties(); write_children(pfix, rank, level); unload(FALSE, FALSE); }
/** * octeon_mbox_read: * @oct: Pointer mailbox * * Reads the 8-bytes of data from the mbox register * Writes back the acknowldgement inidcating completion of read */ int octeon_mbox_read(struct octeon_mbox *mbox) { union octeon_mbox_message msg; int ret = 0; spin_lock(&mbox->lock); msg.u64 = readq(mbox->mbox_read_reg); if ((msg.u64 == OCTEON_PFVFACK) || (msg.u64 == OCTEON_PFVFSIG)) { spin_unlock(&mbox->lock); return 0; } if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64; mbox->mbox_req.recv_len++; } else { if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] = msg.u64; mbox->mbox_resp.recv_len++; } else { if ((mbox->state & OCTEON_MBOX_STATE_IDLE) && (msg.s.type == OCTEON_MBOX_REQUEST)) { mbox->state &= ~OCTEON_MBOX_STATE_IDLE; mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVING; mbox->mbox_req.msg.u64 = msg.u64; mbox->mbox_req.q_no = mbox->q_no; mbox->mbox_req.recv_len = 1; } else { if ((mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING) && (msg.s.type == OCTEON_MBOX_RESPONSE)) { mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING; mbox->state |= OCTEON_MBOX_STATE_RESPONSE_RECEIVING ; mbox->mbox_resp.msg.u64 = msg.u64; mbox->mbox_resp.q_no = mbox->q_no; mbox->mbox_resp.recv_len = 1; } else { writeq(OCTEON_PFVFERR, mbox->mbox_read_reg); mbox->state |= OCTEON_MBOX_STATE_ERROR; spin_unlock(&mbox->lock); return 1; } } } } if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { if (mbox->mbox_req.recv_len < mbox->mbox_req.msg.s.len) { ret = 0; } else { mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING; mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVED; ret = 1; } } else { if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { if (mbox->mbox_resp.recv_len < mbox->mbox_resp.msg.s.len) { ret = 0; } else { mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_RECEIVING; mbox->state |= OCTEON_MBOX_STATE_RESPONSE_RECEIVED; ret = 1; } } else { WARN_ON(1); } } writeq(OCTEON_PFVFACK, mbox->mbox_read_reg); spin_unlock(&mbox->lock); return ret; }
/** *octeon_mbox_process_message: * * Process the received mbox message. */ int octeon_mbox_process_message(struct octeon_mbox *mbox) { struct octeon_mbox_cmd mbox_cmd; unsigned long flags; spin_lock_irqsave(&mbox->lock, flags); if (mbox->state & OCTEON_MBOX_STATE_ERROR) { if (mbox->state & (OCTEON_MBOX_STATE_RESPONSE_PENDING | OCTEON_MBOX_STATE_RESPONSE_RECEIVING)) { memcpy(&mbox_cmd, &mbox->mbox_resp, sizeof(struct octeon_mbox_cmd)); mbox->state = OCTEON_MBOX_STATE_IDLE; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); spin_unlock_irqrestore(&mbox->lock, flags); mbox_cmd.recv_status = 1; if (mbox_cmd.fn) mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, mbox_cmd.fn_arg); return 0; } mbox->state = OCTEON_MBOX_STATE_IDLE; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); spin_unlock_irqrestore(&mbox->lock, flags); return 0; } if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVED) { memcpy(&mbox_cmd, &mbox->mbox_resp, sizeof(struct octeon_mbox_cmd)); mbox->state = OCTEON_MBOX_STATE_IDLE; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); spin_unlock_irqrestore(&mbox->lock, flags); mbox_cmd.recv_status = 0; if (mbox_cmd.fn) mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, mbox_cmd.fn_arg); return 0; } if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED) { memcpy(&mbox_cmd, &mbox->mbox_req, sizeof(struct octeon_mbox_cmd)); if (!mbox_cmd.msg.s.resp_needed) { mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVED; if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) mbox->state = OCTEON_MBOX_STATE_IDLE; writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); } spin_unlock_irqrestore(&mbox->lock, flags); octeon_mbox_process_cmd(mbox, &mbox_cmd); return 0; } spin_unlock_irqrestore(&mbox->lock, flags); WARN_ON(1); return 0; }
/** * ipath_diagpkt_write - write an IB packet * @fp: the diag data device file pointer * @data: ipath_diag_pkt structure saying where to get the packet * @count: size of data to write * @off: unused by this code */ static ssize_t ipath_diagpkt_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { u32 __iomem *piobuf; u32 plen, clen, pbufn; struct ipath_diag_pkt odp; struct ipath_diag_xpkt dp; u32 *tmpbuf = NULL; struct ipath_devdata *dd; ssize_t ret = 0; u64 val; u32 l_state, lt_state; /* LinkState, LinkTrainingState */ if (count < sizeof(odp)) { ret = -EINVAL; goto bail; } if (count == sizeof(dp)) { if (copy_from_user(&dp, data, sizeof(dp))) { ret = -EFAULT; goto bail; } } else if (copy_from_user(&odp, data, sizeof(odp))) { ret = -EFAULT; goto bail; } /* * Due to padding/alignment issues (lessened with new struct) * the old and new structs are the same length. We need to * disambiguate them, which we can do because odp.len has never * been less than the total of LRH+BTH+DETH so far, while * dp.unit (same offset) unit is unlikely to get that high. * Similarly, dp.data, the pointer to user at the same offset * as odp.unit, is almost certainly at least one (512byte)page * "above" NULL. The if-block below can be omitted if compatibility * between a new driver and older diagnostic code is unimportant. * compatibility the other direction (new diags, old driver) is * handled in the diagnostic code, with a warning. */ if (dp.unit >= 20 && dp.data < 512) { /* very probable version mismatch. Fix it up */ memcpy(&odp, &dp, sizeof(odp)); /* We got a legacy dp, copy elements to dp */ dp.unit = odp.unit; dp.data = odp.data; dp.len = odp.len; dp.pbc_wd = 0; /* Indicate we need to compute PBC wd */ } /* send count must be an exact number of dwords */ if (dp.len & 3) { ret = -EINVAL; goto bail; } clen = dp.len >> 2; dd = ipath_lookup(dp.unit); if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || !dd->ipath_kregbase) { ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", dp.unit); ret = -ENODEV; goto bail; } if (ipath_diag_inuse && !diag_set_link && !(dd->ipath_flags & IPATH_LINKACTIVE)) { diag_set_link = 1; ipath_cdbg(VERBOSE, "Trying to set to set link active for " "diag pkt\n"); ipath_set_linkstate(dd, IPATH_IB_LINKARM); ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); } if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); ret = -ENODEV; goto bail; } /* * Want to skip check for l_state if using custom PBC, * because we might be trying to force an SM packet out. * first-cut, skip _all_ state checking in that case. */ val = ipath_ib_state(dd, dd->ipath_lastibcstat); lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat); l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat); if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP || (val != dd->ib_init && val != dd->ib_arm && val != dd->ib_active))) { ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", dd->ipath_unit, (unsigned long long) val); ret = -EINVAL; goto bail; } /* need total length before first word written */ /* +1 word is for the qword padding */ plen = sizeof(u32) + dp.len; if ((plen + 4) > dd->ipath_ibmaxlen) { ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", plen - 4, dd->ipath_ibmaxlen); ret = -EINVAL; goto bail; /* before writing pbc */ } tmpbuf = vmalloc(plen); if (!tmpbuf) { dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " "failing\n"); ret = -ENOMEM; goto bail; } if (copy_from_user(tmpbuf, (const void __user *) (unsigned long) dp.data, dp.len)) { ret = -EFAULT; goto bail; } plen >>= 2; /* in dwords */ piobuf = ipath_getpiobuf(dd, plen, &pbufn); if (!piobuf) { ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", dd->ipath_unit); ret = -EBUSY; goto bail; } /* disarm it just to be extra sure */ ipath_disarm_piobufs(dd, pbufn, 1); if (ipath_debug & __IPATH_PKTDBG) ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", dd->ipath_unit, plen - 1, pbufn); if (dp.pbc_wd == 0) dp.pbc_wd = plen; writeq(dp.pbc_wd, piobuf); /* * Copy all by the trigger word, then flush, so it's written * to chip before trigger word, then write trigger word, then * flush again, so packet is sent. */ if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) { ipath_flush_wc(); __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); ipath_flush_wc(); __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); } else __iowrite32_copy(piobuf + 2, tmpbuf, clen); ipath_flush_wc(); ret = sizeof(dp); bail: vfree(tmpbuf); return ret; }
static inline void write64(u64 val, volatile void __iomem *addr) { writeq(val, addr); }
/** * ipath_diagpkt_write - write an IB packet * @fp: the diag data device file pointer * @data: ipath_diag_pkt structure saying where to get the packet * @count: size of data to write * @off: unused by this code */ static ssize_t ipath_diagpkt_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { u32 __iomem *piobuf; u32 plen, clen, pbufn; struct ipath_diag_pkt dp; u32 *tmpbuf = NULL; struct ipath_devdata *dd; ssize_t ret = 0; u64 val; if (count < sizeof(dp)) { ret = -EINVAL; goto bail; } if (copy_from_user(&dp, data, sizeof(dp))) { ret = -EFAULT; goto bail; } /* send count must be an exact number of dwords */ if (dp.len & 3) { ret = -EINVAL; goto bail; } clen = dp.len >> 2; dd = ipath_lookup(dp.unit); if (!dd || !(dd->ipath_flags & IPATH_PRESENT) || !dd->ipath_kregbase) { ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n", dp.unit); ret = -ENODEV; goto bail; } if (ipath_diag_inuse && !diag_set_link && !(dd->ipath_flags & IPATH_LINKACTIVE)) { diag_set_link = 1; ipath_cdbg(VERBOSE, "Trying to set to set link active for " "diag pkt\n"); ipath_set_linkstate(dd, IPATH_IB_LINKARM); ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE); } if (!(dd->ipath_flags & IPATH_INITTED)) { /* no hardware, freeze, etc. */ ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit); ret = -ENODEV; goto bail; } val = dd->ipath_lastibcstat & IPATH_IBSTATE_MASK; if (val != IPATH_IBSTATE_INIT && val != IPATH_IBSTATE_ARM && val != IPATH_IBSTATE_ACTIVE) { ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n", dd->ipath_unit, (unsigned long long) val); ret = -EINVAL; goto bail; } /* need total length before first word written */ /* +1 word is for the qword padding */ plen = sizeof(u32) + dp.len; if ((plen + 4) > dd->ipath_ibmaxlen) { ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n", plen - 4, dd->ipath_ibmaxlen); ret = -EINVAL; goto bail; /* before writing pbc */ } tmpbuf = vmalloc(plen); if (!tmpbuf) { dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, " "failing\n"); ret = -ENOMEM; goto bail; } if (copy_from_user(tmpbuf, (const void __user *) (unsigned long) dp.data, dp.len)) { ret = -EFAULT; goto bail; } piobuf = ipath_getpiobuf(dd, &pbufn); if (!piobuf) { ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n", dd->ipath_unit); ret = -EBUSY; goto bail; } plen >>= 2; /* in dwords */ if (ipath_debug & __IPATH_PKTDBG) ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n", dd->ipath_unit, plen - 1, pbufn); /* we have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order */ writeq(plen, piobuf); ipath_flush_wc(); /* copy all by the trigger word, then flush, so it's written * to chip before trigger word, then write trigger word, then * flush again, so packet is sent. */ __iowrite32_copy(piobuf + 2, tmpbuf, clen - 1); ipath_flush_wc(); __raw_writel(tmpbuf[clen - 1], piobuf + clen + 1); ipath_flush_wc(); ret = sizeof(dp); bail: vfree(tmpbuf); return ret; }