static void reset_device(struct device *dev) { struct virtqueue *vq; verbose("Resetting device %s\n", dev->name); memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); signal(SIGCHLD, SIG_IGN); for (vq = dev->vq; vq; vq = vq->next) { if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); vq->thread = (pid_t)-1; } memset(vq->vring.desc, 0, vring_size(vq->config.num, LGUEST_VRING_ALIGN)); lg_last_avail(vq) = 0; } dev->running = false; signal(SIGCHLD, (void *)kill_launcher); }
static void reset_device(struct device *dev) { struct virtqueue *vq; verbose("Resetting device %s\n", dev->name); /* Clear any features they've acked. */ memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); /* We're going to be explicitly killing threads, so ignore them. */ signal(SIGCHLD, SIG_IGN); /* Zero out the virtqueues, get rid of their threads */ for (vq = dev->vq; vq; vq = vq->next) { if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); vq->thread = (pid_t)-1; } memset(vq->vring.desc, 0, vring_size(vq->config.num, LGUEST_VRING_ALIGN)); lg_last_avail(vq) = 0; } dev->running = false; /* Now we care if threads die. */ signal(SIGCHLD, (void *)kill_launcher); }
static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); while (last_avail == vq->vring.avail->idx) { u64 event; trigger_irq(vq); vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; if (head >= vq->vring.num) errx(1, "Guest says index %u is available", head); *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; if (desc[i].flags & VRING_DESC_F_INDIRECT) { if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; } do { iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(desc[i].addr, desc[i].len); if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { if (*in_num) errx(1, "Descriptor has out after in"); (*out_num)++; } if (*out_num + *in_num > max) errx(1, "Looped descriptor"); } while ((i = next_desc(desc, i, max)) != max); return head; }
/* * This looks in the virtqueue for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function waits if necessary, and returns the descriptor number found. */ static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); /* There's nothing available? */ while (last_avail == vq->vring.avail->idx) { u64 event; /* * Since we're about to sleep, now is a good time to tell the * Guest about what we've used up to now. */ trigger_irq(vq); /* OK, now we need to know about added descriptors. */ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } /* Nothing new? Wait for eventfd to tell us they refilled. */ if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); /* We don't need to be notified again. */ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } /* Check it isn't doing very strange things with descriptor numbers. */ if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); /* * Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) errx(1, "Guest says index %u is available", head); /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; /* * If this is an indirect entry, then this buffer contains a descriptor * table which we handle as if it's any normal descriptor chain. */ if (desc[i].flags & VRING_DESC_F_INDIRECT) { if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; } do { /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(desc[i].addr, desc[i].len); /* If this is an input descriptor, increment that count. */ if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { /* * If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (*in_num) errx(1, "Descriptor has out after in"); (*out_num)++; } /* If we've got too many, that implies a descriptor loop. */ if (*out_num + *in_num > max) errx(1, "Looped descriptor"); } while ((i = next_desc(desc, i, max)) != max); return head; }