static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, unsigned int count, uint32_t flags, void *data) { /* DATA_NONE/DATA_BOOL enables loopback testing */ if (flags & VFIO_IRQ_SET_DATA_NONE) { if (*ctx) { if (count) { eventfd_signal(*ctx, 1); } else { eventfd_ctx_put(*ctx); *ctx = NULL; } return 0; } } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t trigger; if (!count) return -EINVAL; trigger = *(uint8_t *)data; if (trigger && *ctx) eventfd_signal(*ctx, 1); return 0; } else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t fd; if (!count) return -EINVAL; fd = *(int32_t *)data; if (fd == -1) { if (*ctx) eventfd_ctx_put(*ctx); *ctx = NULL; } else if (fd >= 0) { struct eventfd_ctx *efdctx; efdctx = eventfd_ctx_fdget(fd); if (IS_ERR(efdctx)) return PTR_ERR(efdctx); if (*ctx) eventfd_ctx_put(*ctx); *ctx = efdctx; } return 0; } return -EINVAL; }
static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { int i; bool msix = (index == VFIO_PCI_MSIX_IRQ_INDEX) ? true : false; if (irq_is(vdev, index) && !count && (flags & VFIO_IRQ_SET_DATA_NONE)) { vfio_msi_disable(vdev, msix); return 0; } if (!(irq_is(vdev, index) || is_irq_none(vdev))) return -EINVAL; if (flags & VFIO_IRQ_SET_DATA_EVENTFD) { int32_t *fds = data; int ret; if (vdev->irq_type == index) return vfio_msi_set_block(vdev, start, count, fds, msix); ret = vfio_msi_enable(vdev, start + count, msix); if (ret) return ret; ret = vfio_msi_set_block(vdev, start, count, fds, msix); if (ret) vfio_msi_disable(vdev, msix); return ret; } if (!irq_is(vdev, index) || start + count > vdev->num_ctx) return -EINVAL; for (i = start; i < start + count; i++) { if (!vdev->ctx[i].trigger) continue; if (flags & VFIO_IRQ_SET_DATA_NONE) { eventfd_signal(vdev->ctx[i].trigger, 1); } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t *bools = data; if (bools[i - start]) eventfd_signal(vdev->ctx[i].trigger, 1); } } return 0; }
/* * MSI/MSI-X */ static irqreturn_t vfio_msihandler(int irq, void *arg) { struct eventfd_ctx *trigger = arg; eventfd_signal(trigger, 1); return IRQ_HANDLED; }
/* After we've used one of their buffers, we tell them about it. We'll then * want to notify the guest, using eventfd. */ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) { struct vring_used_elem *used; /* The virtqueue contains a ring of used buffers. Get a pointer to the * next entry in that used ring. */ used = &vq->used->ring[vq->last_used_idx % vq->num]; if (put_user(head, &used->id)) { vq_err(vq, "Failed to write used id"); return -EFAULT; } if (put_user(len, &used->len)) { vq_err(vq, "Failed to write used len"); return -EFAULT; } /* Make sure buffer is written before we update index. */ smp_wmb(); if (put_user(vq->last_used_idx + 1, &vq->used->idx)) { vq_err(vq, "Failed to increment used idx"); return -EFAULT; } if (unlikely(vq->log_used)) { /* Make sure data is seen before log. */ smp_wmb(); /* Log used ring entry write. */ log_write(vq->log_base, vq->log_addr + ((void *)used - (void *)vq->used), sizeof *used); /* Log used index update. */ log_write(vq->log_base, vq->log_addr + offsetof(struct vring_used, idx), sizeof vq->used->idx); if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); }
void eventfd_notification(struct iommu *obj) { struct iommu_event_ntfy *fd_reg; list_for_each_entry(fd_reg, &obj->event_list, list) eventfd_signal(fd_reg->evt_ctx, 1); }
/* * INTx */ static void vfio_send_intx_eventfd(void *opaque, void *unused) { struct vfio_pci_device *vdev = opaque; if (likely(is_intx(vdev) && !vdev->virq_disabled)) eventfd_signal(vdev->ctx[0].trigger, 1); }
static void irq_work_func(struct work_struct *work) { irq_work_t * irq_work = (irq_work_t *)work; struct file * efd_file = NULL; mailbox_lo = (int)read_mailbox_lo(); mailbox_hi = (int)read_mailbox_hi(); // current file is always used at the time of the interrupt if (0 < mailbox_notifier) { rcu_read_lock(); efd_file = fcheck_files(irq_work->userspace_task->files, mailbox_notifier); rcu_read_unlock(); // printk(KERN_INFO "EPIPHANY_IOC_MB_NOTIFIER: %p\n", efd_file); efd_ctx = eventfd_ctx_fileget(efd_file); if (!efd_ctx) { printk(KERN_ERR "EPIPHANY_IOC_MB_NOTIFIER: failed to get eventfd file\n"); // TODO consider setting mailbox_notifier back to default // this might complicate the user side // mailbox_notifier = -1; return; } // send the event eventfd_signal(efd_ctx, 1); } }
static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, unsigned index, unsigned start, unsigned count, uint32_t flags, void *data) { int32_t fd = *(int32_t *)data; if ((index != VFIO_PCI_ERR_IRQ_INDEX) || !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) return -EINVAL; /* DATA_NONE/DATA_BOOL enables loopback testing */ if (flags & VFIO_IRQ_SET_DATA_NONE) { if (vdev->err_trigger) eventfd_signal(vdev->err_trigger, 1); return 0; } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t trigger = *(uint8_t *)data; if (trigger && vdev->err_trigger) eventfd_signal(vdev->err_trigger, 1); return 0; } /* Handle SET_DATA_EVENTFD */ if (fd == -1) { if (vdev->err_trigger) eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = NULL; return 0; } else if (fd >= 0) { struct eventfd_ctx *efdctx; efdctx = eventfd_ctx_fdget(fd); if (IS_ERR(efdctx)) return PTR_ERR(efdctx); if (vdev->err_trigger) eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = efdctx; return 0; } else return -EINVAL; }
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len) { int i, r; /* Make sure data written is seen before log. */ smp_wmb(); for (i = 0; i < log_num; ++i) { u64 l = min(log[i].len, len); r = log_write(vq->log_base, log[i].addr, l); if (r < 0) return r; len -= l; if (!len) return 0; } if (vq->log_ctx) eventfd_signal(vq->log_ctx, 1); /* Length written exceeds what we have stored. This is a bug. */ BUG(); return 0; }