int xenfb_pv_display_start(void *data) { DisplayState *ds; struct fbfront_dev *fb_dev; int kbd_fd, fb_fd; int offset = 0; unsigned long *mfns; int n = VGA_RAM_SIZE / PAGE_SIZE; int i; if (!fb_path || !kbd_path) return 0; ds = xs->ds; xs->vga_vram = data; mfns = malloc(2 * n * sizeof(*mfns)); for (i = 0; i < n; i++) mfns[i] = virtual_to_mfn(xs->vga_vram + i * PAGE_SIZE); for (i = 0; i < n; i++) mfns[n + i] = virtual_to_mfn(xs->nonshared_vram + i * PAGE_SIZE); fb_dev = init_fbfront(fb_path, mfns, ds->width, ds->height, ds->depth, ds->linesize, 2 * n); free(mfns); if (!fb_dev) { fprintf(stderr,"can't open frame buffer\n"); exit(1); } free(fb_path); if (ds->shared_buf) { offset = (void*) ds->data - xs->vga_vram; } else { offset = VGA_RAM_SIZE; ds->data = xs->nonshared_vram; } if (offset) fbfront_resize(fb_dev, ds->width, ds->height, ds->linesize, ds->depth, offset); down(&xs->kbd_sem); free(kbd_path); kbd_fd = kbdfront_open(xs->kbd_dev); qemu_set_fd_handler(kbd_fd, xenfb_kbd_handler, NULL, xs); fb_fd = fbfront_open(fb_dev); qemu_set_fd_handler(fb_fd, xenfb_fb_handler, NULL, xs); xs->fb_dev = fb_dev; return 0; }
struct fbfront_dev *fb_open(void *fb, int width, int height, int depth) { unsigned long *mfns; int linesize = width * (depth / 8); int memsize = linesize * height; int numpages = (memsize + PAGE_SIZE - 1) / PAGE_SIZE; int i; create_thread("kbdfront", kbd_thread, &kbd_sem); mfns = malloc(numpages * sizeof(*mfns)); for (i = 0; i < numpages; i++) { memset(fb + i * PAGE_SIZE, 0, PAGE_SIZE); mfns[i] = virtual_to_mfn(fb + i * PAGE_SIZE); } fb_dev = init_fbfront(NULL, mfns, width, height, depth, linesize, numpages); free(mfns); if (!fb_dev) return NULL; down(&kbd_sem); if (!kbd_dev) return NULL; return fb_dev; }
/* Issue an aio */ void blkfront_aio(struct blkfront_aiocb *aiocbp, int write) { struct blkfront_dev *dev = aiocbp->aio_dev; struct blkif_request *req; RING_IDX i; int notify; int n, j; uintptr_t start, end; // Can't io at non-sector-aligned location ASSERT(!(aiocbp->aio_offset & (dev->info.sector_size-1))); // Can't io non-sector-sized amounts ASSERT(!(aiocbp->aio_nbytes & (dev->info.sector_size-1))); // Can't io non-sector-aligned buffer ASSERT(!((uintptr_t) aiocbp->aio_buf & (dev->info.sector_size-1))); start = (uintptr_t)aiocbp->aio_buf & PAGE_MASK; end = ((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes + PAGE_SIZE - 1) & PAGE_MASK; aiocbp->n = n = (end - start) / PAGE_SIZE; /* qemu's IDE max multsect is 16 (8KB) and SCSI max DMA was set to 32KB, * so max 44KB can't happen */ ASSERT(n <= BLKIF_MAX_SEGMENTS_PER_REQUEST); blkfront_wait_slot(dev); i = dev->ring.req_prod_pvt; req = RING_GET_REQUEST(&dev->ring, i); req->operation = write ? BLKIF_OP_WRITE : BLKIF_OP_READ; req->nr_segments = n; req->handle = dev->handle; req->id = (uintptr_t) aiocbp; req->sector_number = aiocbp->aio_offset / 512; for (j = 0; j < n; j++) { req->seg[j].first_sect = 0; req->seg[j].last_sect = PAGE_SIZE / 512 - 1; } req->seg[0].first_sect = ((uintptr_t)aiocbp->aio_buf & ~PAGE_MASK) / 512; req->seg[n-1].last_sect = (((uintptr_t)aiocbp->aio_buf + aiocbp->aio_nbytes - 1) & ~PAGE_MASK) / 512; for (j = 0; j < n; j++) { uintptr_t data = start + j * PAGE_SIZE; if (!write) { /* Trigger CoW if needed */ *(char*)(data + (req->seg[j].first_sect << 9)) = 0; barrier(); } aiocbp->gref[j] = req->seg[j].gref = gnttab_grant_access(dev->dom, virtual_to_mfn(data), write); } dev->ring.req_prod_pvt = i + 1; wmb(); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&dev->ring, notify); if(notify) notify_remote_via_evtchn(dev->evtchn); }
static inline struct netif_tx_request *netfront_make_txreqs(struct netfront_dev *dev, struct netif_tx_request *tx, struct pbuf *p, int *slots) { struct netif_tx_request *first_tx = tx; struct net_txbuffer *buf; struct pbuf *first_p = p; struct pbuf *q; unsigned long tot_len; unsigned long s; void *page; int q_slots; size_t plen, left; tot_len = 0; buf = &dev->tx_buffers[tx->id]; /* map pages of pbuf */ for (q = p; q != NULL; q = q->next) { left = q->len; q_slots = (int) _count_pages(q->payload, q->len); /* grant pages of pbuf */ for (s = 0; s < q_slots; ++s) { /* read only mapping */ page = (void *)((((unsigned long) q->payload) & PAGE_MASK) + (s * PAGE_SIZE)); tx->gref = buf->gref = gnttab_grant_access(dev->dom, virtual_to_mfn(page), 0); BUG_ON(tx->gref == GRANT_INVALID_REF); if (s == 0) /* first slot */ tx->offset = ((unsigned long) q->payload) & ~PAGE_MASK; else tx->offset = 0; if ((s + 1) == q_slots) /* last slot */ tx->size = ((((unsigned long) q->payload) + q->len) & ~PAGE_MASK) - tx->offset; else tx->size = PAGE_SIZE - tx->offset; tot_len += tx->size; if ((s + 1) < q_slots || q->next != NULL) { /* there will be a follow-up slot */ tx->flags |= NETTXF_more_data; tx = netfront_get_page(dev); /* next slot */ BUG_ON(tx == NULL); /* out of memory -> this should have been catched before calling this function */ (*slots)++; buf = &dev->tx_buffers[tx->id]; } } } /* * The first fragment has the entire packet * size, subsequent fragments have just the * fragment size. The backend works out the * true size of the first fragment by * subtracting the sizes of the other * fragments. */ BUG_ON(first_p->tot_len != tot_len); /* broken pbuf?! */ first_tx->size = tot_len; pbuf_ref(first_p); /* increase ref count */ buf->pbuf = first_p; /* remember chain for later release on last buf */ return tx; }