Beispiel #1
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

#ifdef DEBUG_DMA
	printf("dmamem_free: t=%p segs=%p nsegs=%x\n", t, segs, nsegs);
#endif	/* DEBUG_DMA */

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			TAILQ_INSERT_TAIL(&mlist, m, pageq);
		}
	}
	uvm_pglistfree(&mlist);
}
Beispiel #2
0
int
viomb_deflate_intr(struct virtqueue *vq)
{
	struct virtio_softc *vsc = vq->vq_owner;
	struct viomb_softc *sc = (struct viomb_softc *)vsc->sc_child;
	struct balloon_req *b;
	u_int64_t nvpages;

	if (viomb_vq_dequeue(vq))
		return(1);

	b = &sc->sc_req;
	nvpages = b->bl_nentries;
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
			sizeof(u_int32_t) * nvpages,
			BUS_DMASYNC_POSTWRITE);

	if (vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST)
		uvm_pglistfree(&b->bl_pglist);

	VIOMBDEBUG(sc, "updating sc->sc_actual from %u to %llu\n",
		sc->sc_actual, sc->sc_actual - nvpages);
	virtio_write_device_config_4(vsc, VIRTIO_BALLOON_CONFIG_ACTUAL,
				     sc->sc_actual - nvpages);
	viomb_read_config(sc);

	/* if we have more work to do, add it to tasks list */
	if (sc->sc_npages < sc->sc_actual)
		task_add(sc->sc_taskq, &sc->sc_task);

	return(1);
}
Beispiel #3
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	DPRINTF(("bus_dmamem_free: t = %p, segs = %p, nsegs = %d\n", t, segs, nsegs));

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		DPRINTF(("bus_dmamem_free: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			DPRINTF(("bus_dmamem_free: m = %p\n", m));
			TAILQ_INSERT_TAIL(&mlist, m, pageq);
		}
	}

	uvm_pglistfree(&mlist);
}
void
__free_page(struct page *page)
{
	struct pglist pglist = TAILQ_HEAD_INITIALIZER(pglist);

	TAILQ_INSERT_TAIL(&pglist, &page->p_vmp, pageq.queue);

	uvm_pglistfree(&pglist);
}
Beispiel #5
0
void
viomb_deflate(struct viomb_softc *sc)
{
	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
	struct balloon_req *b;
	struct vm_page *p;
	struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
	u_int64_t nvpages;
	int i, slot;

	nvpages = sc->sc_actual - sc->sc_npages;
	if (nvpages > PGS_PER_REQ)
		nvpages = PGS_PER_REQ;
	b = &sc->sc_req;
	b->bl_nentries = nvpages;

	TAILQ_INIT(&b->bl_pglist);
	for (i = 0; i < nvpages; i++) {
		p = TAILQ_FIRST(&sc->sc_balloon_pages);
		if (p == NULL){
		    b->bl_nentries = i - 1;
		    break;
		}
		TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq);
		TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq);
		b->bl_pages[i] = p->phys_addr / VIRTIO_PAGE_SIZE;
	}

	if (virtio_enqueue_prep(vq, &slot)) {
		printf("%s:virtio_get_slot(def) vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	if (virtio_enqueue_reserve(vq, slot, 1)) {
		printf("%s:virtio_enqueue_reserve() vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
		    sizeof(u_int32_t) * nvpages,
		    BUS_DMASYNC_PREWRITE);
	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
			 sizeof(u_int32_t) * nvpages, VRING_READ);

	if (!(vsc->sc_features & VIRTIO_BALLOON_F_MUST_TELL_HOST))
		uvm_pglistfree(&b->bl_pglist);
	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
	return;
err:
	while ((p = TAILQ_LAST(&b->bl_pglist, pglist))) {
		TAILQ_REMOVE(&b->bl_pglist, p, pageq);
		TAILQ_INSERT_HEAD(&sc->sc_balloon_pages, p, pageq);
	}
	return;
}
Beispiel #6
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void 
_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{

	if (nsegs != 1)
		panic("bus_dmamem_free: nsegs = %d", nsegs);

	/*
	 * Return the list of physical pages back to the VM system.
	 */
	uvm_pglistfree(segs[0]._ds_mlist);
	free(segs[0]._ds_mlist, M_DEVBUF);
}
Beispiel #7
0
void
viomb_inflate(struct viomb_softc *sc)
{
	struct virtio_softc *vsc = (struct virtio_softc *)sc->sc_virtio;
	struct balloon_req *b;
	struct vm_page *p;
	struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
	u_int32_t nvpages;
	int slot, error, i = 0;

	nvpages = sc->sc_npages - sc->sc_actual;
	if (nvpages > PGS_PER_REQ)
		nvpages = PGS_PER_REQ;
	b = &sc->sc_req;

	if ((error = uvm_pglistalloc(nvpages * PAGE_SIZE, 0,
				     dma_constraint.ucr_high,
				     0, 0, &b->bl_pglist, nvpages,
				     UVM_PLA_NOWAIT))) {
		printf("%s unable to allocate %u physmem pages,"
		    "error %d\n", DEVNAME(sc), nvpages, error);
		return;
	}

	b->bl_nentries = nvpages;
	TAILQ_FOREACH(p, &b->bl_pglist, pageq)
		b->bl_pages[i++] = p->phys_addr / VIRTIO_PAGE_SIZE;

	KASSERT(i == nvpages);

	if ((virtio_enqueue_prep(vq, &slot)) > 0) {
		printf("%s:virtio_enqueue_prep() vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	if (virtio_enqueue_reserve(vq, slot, 1)) {
		printf("%s:virtio_enqueue_reserve vq_num %d\n",
		       DEVNAME(sc), vq->vq_num);
		goto err;
	}
	bus_dmamap_sync(vsc->sc_dmat, b->bl_dmamap, 0,
			sizeof(u_int32_t) * nvpages, BUS_DMASYNC_PREWRITE);
	virtio_enqueue_p(vq, slot, b->bl_dmamap, 0,
			 sizeof(u_int32_t) * nvpages, VRING_READ);
	virtio_enqueue_commit(vsc, vq, slot, VRING_NOTIFY);
	return;
err:
	uvm_pglistfree(&b->bl_pglist);
	return;
}
Beispiel #8
0
void
mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
{
	struct pglist pglist;
	paddr_t pa, epa;

	TAILQ_INIT(&pglist);
	for(; nsegs--; segs++)
		for (pa = segs->ds_addr, epa = pa + segs->ds_len;
		     pa < epa; pa += PAGE_SIZE) {
			struct vm_page *pg = PHYS_TO_VM_PAGE(pa);
			if (!pg)
				panic("mbus_dmamem_free: no page for pa");
			TAILQ_INSERT_TAIL(&pglist, pg, pageq);
			pdcache(HPPA_SID_KERNEL, pa, PAGE_SIZE);
			pdtlb(HPPA_SID_KERNEL, pa);
			pitlb(HPPA_SID_KERNEL, pa);
		}
	uvm_pglistfree(&pglist);
}
/*
 * _bus_dmamem_free_common --
 *	Free memory allocated with _bus_dmamem_alloc_range_common()
 *	back to the VM system.
 */
void
_bus_dmamem_free_common(bus_dma_tag_t t,
			bus_dma_segment_t *segs,
			int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		     addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
		}
	}

	uvm_pglistfree(&mlist);
}
Beispiel #10
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	vm_page_t m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE((*t->_device_to_pa)(addr));
			TAILQ_INSERT_TAIL(&mlist, m, pageq);
		}
	}

	uvm_pglistfree(&mlist);
}
Beispiel #11
0
/*
 * Common function for freeing DMA-safe memory.  May be called by
 * bus-specific DMA memory free functions.
 */
void
_hpcmips_bd_mem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
{
	struct vm_page *m;
	bus_addr_t addr;
	struct pglist mlist;
	int curseg;

	/*
	 * Build a list of pages to free back to the VM system.
	 */
	TAILQ_INIT(&mlist);
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE) {
			m = PHYS_TO_VM_PAGE(addr);
			TAILQ_INSERT_TAIL(&mlist, m, pageq.queue);
		}
	}

	uvm_pglistfree(&mlist);
}