int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; u32 status; int dev_cmd_err[] = { /* convert from fw's version of error.h to host's version */ 0, /* ERR_SUCCESS */ EINVAL, /* ERR_EINVAL */ EFAULT, /* ERR_EFAULT */ EPERM, /* ERR_EPERM */ EBUSY, /* ERR_EBUSY */ }; int err; status = ioread32(&devcmd->status); if (status & STAT_BUSY) { printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { writeq(*a0, &devcmd->args[0]); writeq(*a1, &devcmd->args[1]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = dev_cmd_err[(int)readq(&devcmd->args[0])]; printk(KERN_ERR "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); *a0 = readq(&devcmd->args[0]); *a1 = readq(&devcmd->args[1]); } return 0; } } printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status & STAT_BUSY) { printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { writeq(*a0, &devcmd->args[0]); writeq(*a1, &devcmd->args[1]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = (int)readq(&devcmd->args[0]); if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) printk(KERN_ERR "Error %d devcmd %d\n", err, _CMD_N(cmd)); return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); *a0 = readq(&devcmd->args[0]); *a1 = readq(&devcmd->args[1]); } return 0; } } printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct devcmd2_controller *dc2c = vdev->devcmd2; struct devcmd2_result *result; u8 color; unsigned int i; int delay; int err; u32 fetch_index; u32 posted; u32 new_posted; posted = ioread32(&dc2c->wq_ctrl->posted_index); fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) { /* Hardware surprise removal: return error */ pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); pr_err("%s: fetch index: %u, posted index: %u\n", pci_name(vdev->pdev), fetch_index, posted); return -ENODEV; } new_posted = (posted + 1) % DEVCMD2_RING_SIZE; if (new_posted == fetch_index) { pr_err("%s: devcmd2 wq full while issuing cmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); pr_err("%s: fetch index: %u, posted index: %u\n", pci_name(vdev->pdev), fetch_index, posted); return -EBUSY; } dc2c->cmd_ring[posted].cmd = cmd; dc2c->cmd_ring[posted].flags = 0; if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) dc2c->cmd_ring[posted].args[i] = vdev->args[i]; } /* Adding write memory barrier prevents compiler and/or CPU * reordering, thus avoiding descriptor posting before * descriptor is initialized. Otherwise, hardware can read * stale descriptor fields. */ wmb(); iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) return 0; result = dc2c->result + dc2c->next_result; color = dc2c->color; dc2c->next_result++; if (dc2c->next_result == dc2c->result_size) { dc2c->next_result = 0; dc2c->color = dc2c->color ? 0 : 1; } for (delay = 0; delay < wait; delay++) { udelay(100); if (result->color == color) { if (result->error) { err = -(int) result->error; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("%s:Error %d devcmd %d\n", pci_name(vdev->pdev), err, _CMD_N(cmd)); return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); /*prevent reorder while reding result*/ for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = result->results[i]; } return 0; } } pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); return -ETIMEDOUT; }
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct vnic_devcmd __iomem *devcmd = vdev->devcmd; unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (status & STAT_BUSY) { pr_err("Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) writeq(vdev->args[i], &devcmd->args[i]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = (int)readq(&devcmd->args[0]); if (err == ERR_EINVAL && cmd == CMD_CAPABILITY) return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) pr_err("Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; }
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { #if defined(CONFIG_MIPS) || defined(MGMT_VNIC) return 0; #else struct vnic_devcmd __iomem *devcmd = vdev->devcmd; unsigned int i; int delay; u32 status; int err; status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (status & STAT_BUSY) { #ifndef __WINDOWS__ pr_err("%s: Busy devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); #else pr_err("Busy devcmd %d\n", _CMD_N(cmd)); #endif return -EBUSY; } if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { for (i = 0; i < VNIC_DEVCMD_NARGS; i++) writeq(vdev->args[i], &devcmd->args[i]); wmb(); } iowrite32(cmd, &devcmd->cmd); if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) return 0; for (delay = 0; delay < wait; delay++) { udelay(100); status = ioread32(&devcmd->status); if (status == 0xFFFFFFFF) { /* PCI-e target device is gone */ return -ENODEV; } if (!(status & STAT_BUSY)) { if (status & STAT_ERROR) { err = -(int)readq(&devcmd->args[0]); if (cmd != CMD_CAPABILITY) #ifndef __WINDOWS__ pr_err("%s: Devcmd %d failed " "with error code %d\n", pci_name(vdev->pdev), _CMD_N(cmd), err); #else pr_err("Devcmd %d failed " "with error code %d\n", _CMD_N(cmd), err); #endif return err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) { rmb(); for (i = 0; i < VNIC_DEVCMD_NARGS; i++) vdev->args[i] = readq(&devcmd->args[i]); } return 0; } } #ifndef __WINDOWS__ pr_err("%s: Timedout devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd)); #else pr_err("Timedout devcmd %d\n", _CMD_N(cmd)); #endif return -ETIMEDOUT; #endif }
static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct devcmd2_controller *dc2c = vdev->devcmd2; struct devcmd2_result *result; u8 color; unsigned int i; int delay, err; u32 fetch_index, new_posted; u32 posted = dc2c->posted; fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) return -ENODEV; new_posted = (posted + 1) % DEVCMD2_RING_SIZE; if (new_posted == fetch_index) { vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", _CMD_N(cmd), fetch_index, posted); return -EBUSY; } dc2c->cmd_ring[posted].cmd = cmd; dc2c->cmd_ring[posted].flags = 0; if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT; if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) for (i = 0; i < VNIC_DEVCMD_NARGS; i++) dc2c->cmd_ring[posted].args[i] = vdev->args[i]; /* Adding write memory barrier prevents compiler and/or CPU reordering, * thus avoiding descriptor posting before descriptor is initialized. * Otherwise, hardware can read stale descriptor fields. */ wmb(); iowrite32(new_posted, &dc2c->wq_ctrl->posted_index); dc2c->posted = new_posted; if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) return 0; result = dc2c->result + dc2c->next_result; color = dc2c->color; dc2c->next_result++; if (dc2c->next_result == dc2c->result_size) { dc2c->next_result = 0; dc2c->color = dc2c->color ? 0 : 1; } for (delay = 0; delay < wait; delay++) { if (result->color == color) { if (result->error) { err = result->error; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) vdev_neterr(vdev, "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } if (_CMD_DIR(cmd) & _CMD_DIR_READ) for (i = 0; i < VNIC_DEVCMD2_NARGS; i++) vdev->args[i] = result->results[i]; return 0; } udelay(100); } vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd)); return -ETIMEDOUT; }