コード例 #1
0
ファイル: swap_pager.c プロジェクト: UnitedMarsupials/kame
void
swap_pager_swap_init()
{
	swp_clean_t spc;
	struct buf *bp;
	int i;

	/*
	 * kva's are allocated here so that we dont need to keep doing
	 * kmem_alloc pageables at runtime
	 */
	for (i = 0, spc = swcleanlist; i < npendingio; i++, spc++) {
		spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE * MAX_PAGEOUT_CLUSTER);
		if (!spc->spc_kva) {
			break;
		}
		spc->spc_bp = malloc(sizeof(*bp), M_TEMP, M_KERNEL);
		if (!spc->spc_bp) {
			kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE);
			break;
		}
		spc->spc_flags = 0;
		TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
		swap_pager_free_count++;
	}
}
コード例 #2
0
ファイル: vfs__bio.c プロジェクト: dank101/386BSD
/*
 * Exchange a buffer's underlying buffer storage for one of different
 * size, taking care to maintain contents appropriately. When buffer
 * increases in size, caller is responsible for filling out additional
 * contents. When buffer shrinks in size, data is lost, so caller must
 * first return it to backing store before shrinking the buffer, as
 * no implied I/O will be done.
 *
 * Expanded buffer is returned as value.
 */
void
allocbuf(register struct buf *bp, int size)
{
	caddr_t newcontents;

	/* get new memory buffer */
#ifndef notyet
	newcontents = (caddr_t) malloc (size, M_TEMP, M_WAITOK);
#else /* notyet */
	if (round_page(size) == size)
		newcontents = (caddr_t) kmem_alloc_wired_wait(buffer_map, size);
	else
		newcontents = (caddr_t) malloc (size, M_TEMP, M_WAITOK);
#endif /* notyet */

	/* copy the old into the new, up to the maximum that will fit */
	bcopy (bp->b_un.b_addr, newcontents, min(bp->b_bufsize, size));

	/* return old contents to free heap */
#ifndef notyet
	free (bp->b_un.b_addr, M_TEMP);
#else /* notyet */
	if (round_page(bp->b_bufsize) == bp->b_bufsize)
		kmem_free_wakeup(buffer_map, bp->b_un.b_addr, bp->b_bufsize);
	else
		free (bp->b_un.b_addr, M_TEMP);
#endif /* notyet */

	/* adjust buffer cache's idea of memory allocated to buffer contents */
	freebufspace -= size - bp->b_bufsize;
	allocbufspace += size - bp->b_bufsize;

	/* update buffer header */
	bp->b_un.b_addr = newcontents;
	bp->b_bcount = bp->b_bufsize = size;
}
コード例 #3
0
ファイル: mem.c プロジェクト: dcui/FreeBSD-9.3_kernel
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	vm_offset_t eva;
	vm_offset_t off;
	vm_offset_t ova;
	vm_offset_t va;
	vm_prot_t prot;
	vm_paddr_t pa;
	vm_size_t cnt;
	vm_page_t m;
	int error;
	int i;
	uint32_t colors;

	cnt = 0;
	colors = 1;
	error = 0;
	ova = 0;

	GIANT_REQUIRED;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			pa = uio->uio_offset & ~PAGE_MASK;
			if (!is_physical_memory(pa)) {
				error = EFAULT;
				break;
			}

			off = uio->uio_offset & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = ulmin(cnt, PAGE_SIZE - off);
			cnt = ulmin(cnt, iov->iov_len);

			m = NULL;
			for (i = 0; phys_avail[i] != 0; i += 2) {
				if (pa >= phys_avail[i] &&
				    pa < phys_avail[i + 1]) {
					m = PHYS_TO_VM_PAGE(pa);
					break;
				}
			}

			if (m != NULL) {
				if (ova == 0) {
					if (dcache_color_ignore == 0)
						colors = DCACHE_COLORS;
					ova = kmem_alloc_wait(kernel_map,
					    PAGE_SIZE * colors);
				}
				if (colors != 1 && m->md.color != -1)
					va = ova + m->md.color * PAGE_SIZE;
				else
					va = ova;
				pmap_qenter(va, &m, 1);
				error = uiomove((void *)(va + off), cnt,
				    uio);
				pmap_qremove(va, 1);
			} else {
				va = TLB_PHYS_TO_DIRECT(pa);
				error = uiomove((void *)(va + off), cnt,
				    uio);
			}
			break;
		} else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset + iov->iov_len);

			/*
			 * Make sure that all of the pages are currently
			 * resident so we don't create any zero fill pages.
			 */
			for (; va < eva; va += PAGE_SIZE)
				if (pmap_kextract(va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ) ? VM_PROT_READ :
			    VM_PROT_WRITE;
			va = uio->uio_offset;
			if (va < VM_MIN_DIRECT_ADDRESS &&
			    kernacc((void *)va, iov->iov_len, prot) == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);
			break;
		}
		/* else panic! */
	}
	if (ova != 0)
		kmem_free_wakeup(kernel_map, ova, PAGE_SIZE * colors);
	return (error);
}