Exemplo n.º 1
0
int
viomb_deflate_intr(struct virtqueue *vq)
{
	struct virtio_softc *vsc = vq->vq_owner;
	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
	struct balloon_req *b;
	u_int64_t nvpages;

	if (viomb_vq_dequeue(vq))
		return(1);

	b = &sc->sc_req;
	nvpages = b->bl_nentries;
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
			sizeof(u_int32_t) * nvpages,
			BUS_DMASYNC_POSTWRITE);

	if (vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST)
		uvm_pglistfree(&b->bl_pglist);

	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
		sc->sc_actual, sc->sc_actual - nvpages);
	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
				     sc->sc_actual - nvpages);
	viomb_read_config(sc);

	/* if we have more work to do, add it to tasks list */
	if (sc->sc_npages < sc->sc_actual)
		task_add(sc->sc_taskq, &sc->sc_task);

	return(1);
}
Exemplo n.º 2
0
/*
 * interrupt handling for vq's
 */
int
viomb_inflate_intr(struct virtqueue *vq)
{
	struct virtio_softc *vsc = vq->vq_owner;
	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
	struct balloon_req *b;
	struct vm_page *p;
	u_int64_t nvpages;

	if (viomb_vq_dequeue(vq))
		return(1);

	b = &sc->sc_req;
	nvpages = b->bl_nentries;
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
			sizeof(u_int32_t) * nvpages,
			BUS_DMASYNC_POSTWRITE);
	while (!TAILQ_EMPTY(&b->bl_pglist)) {
		p = TAILQ_FIRST(&b->bl_pglist);
		TAILQ_REMOVE(&b->bl_pglist, p, pageq);
		TAILQ_INSERT_TAIL(&sc->sc_balloon_pages, p, pageq);
	}
	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
		   sc->sc_actual, sc->sc_actual + nvpages);
	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
				     sc->sc_actual + nvpages);
	viomb_read_config(sc);

	/* if we have more work to do, add it to the task list */
	if (sc->sc_npages > sc->sc_actual)
		task_add(sc->sc_taskq, &sc->sc_task);

	return (1);
}
Exemplo n.º 3
0
void
viomb_worker(void *arg1, void *arg2)
{
	struct viomb_softc *sc = (struct viomb_softc *)arg1;
	int s;

	s = splbio();
	viomb_read_config(sc);
	if (sc->sc_npages > sc->sc_actual){
		VIOMBDEBUG(sc, "inflating balloon from %u to %u.\n",
			   sc->sc_actual, sc->sc_npages);
		viomb_inflate(sc);
		}
	else if (sc->sc_npages < sc->sc_actual){
		viomb_deflate(sc);
		VIOMBDEBUG(sc, "deflating balloon from %u to %u.\n",
			   sc->sc_actual, sc->sc_npages);
	}
	splx(s);
}
Exemplo n.º 4
0
void
viomb_worker(void *arg1)
{
    struct viomb_softc *sc = (struct viomb_softc *)arg1;
    int s;

    s = splbio();
    viomb_read_config(sc);
    if (sc->sc_npages > sc->sc_actual) {
        VIOMBDEBUG(sc, "inflating balloon from %u to %u.\n",
                   sc->sc_actual, sc->sc_npages);
        viomb_inflate(sc);
    }
    else if (sc->sc_npages < sc->sc_actual) {
        VIOMBDEBUG(sc, "deflating balloon from %u to %u.\n",
                   sc->sc_actual, sc->sc_npages);
        viomb_deflate(sc);
    }

    sc->sc_sens[0].value = sc->sc_npages << PAGE_SHIFT;
    sc->sc_sens[1].value = sc->sc_actual << PAGE_SHIFT;

    splx(s);
}
Exemplo n.º 5
0
void
viomb_attach(struct device *parent, struct device *self, void *aux)
{
	struct viomb_softc *sc = (struct viomb_softc *)self;
	struct virtio_softc *vsc = (struct virtio_softc *)parent;
	u_int32_t features;
	int i;

	if (vsc->sc_child != NULL) {
		printf("child already attached for %s; something wrong...\n",
		    parent->dv_xname);
		return;
	}

	/* fail on non-4K page size archs */
	if (VIRTIO_PAGE_SIZE != PAGE_SIZE){
		printf("non-4K page size arch found, needs %d, got %d\n",
		    VIRTIO_PAGE_SIZE, PAGE_SIZE);
		return;
	}

	sc->sc_virtio = vsc;
	vsc->sc_vqs = &sc->sc_vq[VQ_INFLATE];
	vsc->sc_nvqs = 0;
	vsc->sc_child = self;
	vsc->sc_ipl = IPL_BIO;
	vsc->sc_config_change = viomb_config_change;
	vsc->sc_intrhand = virtio_vq_intr;

	/* negotiate features */
	features = VIRTIO_F_RING_INDIRECT_DESC;
	features = virtio_negotiate_features(vsc, features,
					     viomb_feature_names);

	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE,
	     sizeof(u_int32_t) * PGS_PER_REQ, 1, "inflate") != 0))
		goto err;
	vsc->sc_nvqs++;
	if ((virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE,
	     sizeof(u_int32_t) * PGS_PER_REQ, 1, "deflate") != 0))
		goto err;
	vsc->sc_nvqs++;

	sc->sc_vq[VQ_INFLATE].vq_done = viomb_inflate_intr;
	sc->sc_vq[VQ_DEFLATE].vq_done = viomb_deflate_intr;
	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_INFLATE]);
	virtio_start_vq_intr(vsc, &sc->sc_vq[VQ_DEFLATE]);

	viomb_read_config(sc);
	TAILQ_INIT(&sc->sc_balloon_pages);

	if ((sc->sc_req.bl_pages = dma_alloc(sizeof(u_int32_t) * PGS_PER_REQ,
	    PR_NOWAIT|PR_ZERO)) == NULL) {
		printf("%s: Can't alloc DMA memory.\n", DEVNAME(sc));
		goto err;
	}
	if (bus_dmamap_create(vsc->sc_dmat, sizeof(u_int32_t) * PGS_PER_REQ,
			      1, sizeof(u_int32_t) * PGS_PER_REQ, 0,
			      BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
		printf("%s: dmamap creation failed.\n", DEVNAME(sc));
		goto err;
	}
	if (bus_dmamap_load(vsc->sc_dmat, sc->sc_req.bl_dmamap,
			    &sc->sc_req.bl_pages[0],
			    sizeof(uint32_t) * PGS_PER_REQ,
			    NULL, BUS_DMA_NOWAIT)) {
		printf("%s: dmamap load failed.\n", DEVNAME(sc));
		goto err_dmamap;
	}

	sc->sc_taskq = taskq_create("viomb", 1, IPL_BIO);
	if (sc->sc_taskq == NULL)
		goto err_dmamap;
	task_set(&sc->sc_task, viomb_worker, sc, NULL);

	printf("\n");
	return;
err_dmamap:
	bus_dmamap_destroy(vsc->sc_dmat, sc->sc_req.bl_dmamap);
err:
	if (sc->sc_req.bl_pages)
		dma_free(sc->sc_req.bl_pages, sizeof(u_int32_t) * PGS_PER_REQ);
	for (i = 0; i < vsc->sc_nvqs; i++)
		virtio_free_vq(vsc, &sc->sc_vq[i]);
	vsc->sc_nvqs = 0;
	vsc->sc_child = VIRTIO_CHILD_ERROR;
	return;
}