void SA1::tick() { scheduler.addclocks_cop(2); if(++status.tick_counter == 0) scheduler.sync_copcpu(); //adjust counters: //note that internally, status counters are in clocks; //whereas MMIO register counters are in dots (4 clocks = 1 dot) if(mmio.hvselb == 0) { //HV timer status.hcounter += 2; if(status.hcounter >= 1364) { status.hcounter = 0; if(++status.vcounter >= status.scanlines) status.vcounter = 0; } } else { //linear timer status.hcounter += 2; status.vcounter += (status.hcounter >> 11); status.hcounter &= 0x07ff; status.vcounter &= 0x01ff; } //test counters for timer IRQ switch((mmio.ven << 1) + (mmio.hen << 0)) { case 0: break; case 1: if(status.hcounter == (mmio.hcnt << 2)) trigger_irq(); break; case 2: if(status.vcounter == mmio.vcnt && status.hcounter == 0) trigger_irq(); break; case 3: if(status.vcounter == mmio.hcnt && status.hcounter == (mmio.hcnt << 2)) trigger_irq(); break; } }
static void net_input(struct virtqueue *vq) { int len; unsigned int head, out, in; struct iovec iov[vq->vring.num]; struct net_info *net_info = vq->dev->priv; head = wait_for_vq_desc(vq, iov, &out, &in); if (out) errx(1, "Output buffers in net input queue?"); if (vq->pending_used && will_block(net_info->tunfd)) trigger_irq(vq); len = readv(net_info->tunfd, iov, in); if (len <= 0) warn("Failed to read from tun (%d).", errno); add_used(vq, head, len); }
/* * This handles packets coming in from the tun device to our Guest. Like all * service routines, it gets called again as soon as it returns, so you don't * see a while(1) loop here. */ static void net_input(struct virtqueue *vq) { int len; unsigned int head, out, in; struct iovec iov[vq->vring.num]; struct net_info *net_info = vq->dev->priv; /* * Get a descriptor to write an incoming packet into. This will also * send an interrupt if they're out of descriptors. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (out) errx(1, "Output buffers in net input queue?"); /* * If it looks like we'll block reading from the tun device, send them * an interrupt. */ if (vq->pending_used && will_block(net_info->tunfd)) trigger_irq(vq); /* * Read in the packet. This is where we normally wait (when there's no * incoming network traffic). */ len = readv(net_info->tunfd, iov, in); if (len <= 0) warn("Failed to read from tun (%d).", errno); /* * Mark that packet buffer as used, but don't interrupt here. We want * to wait until we've done as much work as we can. */ add_used(vq, head, len); }
static void add_used_and_trigger(struct virtqueue *vq, unsigned head, int len) { add_used(vq, head, len); trigger_irq(vq); }
static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); while (last_avail == vq->vring.avail->idx) { u64 event; trigger_irq(vq); vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; if (head >= vq->vring.num) errx(1, "Guest says index %u is available", head); *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; if (desc[i].flags & VRING_DESC_F_INDIRECT) { if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; } do { iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(desc[i].addr, desc[i].len); if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { if (*in_num) errx(1, "Descriptor has out after in"); (*out_num)++; } if (*out_num + *in_num > max) errx(1, "Looped descriptor"); } while ((i = next_desc(desc, i, max)) != max); return head; }
/* * This looks in the virtqueue for the first available buffer, and converts * it to an iovec for convenient access. Since descriptors consist of some * number of output then some number of input descriptors, it's actually two * iovecs, but we pack them into one and note how many of each there were. * * This function waits if necessary, and returns the descriptor number found. */ static unsigned wait_for_vq_desc(struct virtqueue *vq, struct iovec iov[], unsigned int *out_num, unsigned int *in_num) { unsigned int i, head, max; struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); /* There's nothing available? */ while (last_avail == vq->vring.avail->idx) { u64 event; /* * Since we're about to sleep, now is a good time to tell the * Guest about what we've used up to now. */ trigger_irq(vq); /* OK, now we need to know about added descriptors. */ vq->vring.used->flags &= ~VRING_USED_F_NO_NOTIFY; /* * They could have slipped one in as we were doing that: make * sure it's written, then check again. */ mb(); if (last_avail != vq->vring.avail->idx) { vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; break; } /* Nothing new? Wait for eventfd to tell us they refilled. */ if (read(vq->eventfd, &event, sizeof(event)) != sizeof(event)) errx(1, "Event read failed?"); /* We don't need to be notified again. */ vq->vring.used->flags |= VRING_USED_F_NO_NOTIFY; } /* Check it isn't doing very strange things with descriptor numbers. */ if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) errx(1, "Guest moved used index from %u to %u", last_avail, vq->vring.avail->idx); /* * Grab the next descriptor number they're advertising, and increment * the index we've seen. */ head = vq->vring.avail->ring[last_avail % vq->vring.num]; lg_last_avail(vq)++; /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) errx(1, "Guest says index %u is available", head); /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; max = vq->vring.num; desc = vq->vring.desc; i = head; /* * If this is an indirect entry, then this buffer contains a descriptor * table which we handle as if it's any normal descriptor chain. */ if (desc[i].flags & VRING_DESC_F_INDIRECT) { if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; } do { /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base = check_pointer(desc[i].addr, desc[i].len); /* If this is an input descriptor, increment that count. */ if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; else { /* * If it's an output descriptor, they're all supposed * to come before any input descriptors. */ if (*in_num) errx(1, "Descriptor has out after in"); (*out_num)++; } /* If we've got too many, that implies a descriptor loop. */ if (*out_num + *in_num > max) errx(1, "Looped descriptor"); } while ((i = next_desc(desc, i, max)) != max); return head; }