Exemple #1
0
static int
phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
	int i, s;

	s = splvm();
	/*
	 * Fill as many pages as vm_fault has allocated for us.
	 */
	for (i = 0; i < count; i++) {
		if ((m[i]->flags & PG_ZERO) == 0)
			pmap_zero_page(m[i]);
		vm_page_flag_set(m[i], PG_ZERO);
		/* Switch off pv_entries */
		vm_page_lock_queues();
		vm_page_unmanage(m[i]);
		vm_page_unlock_queues();
		m[i]->valid = VM_PAGE_BITS_ALL;
		m[i]->dirty = 0;
		/* The requested page must remain busy, the others not. */
		if (reqpage != i) {
			vm_page_flag_clear(m[i], PG_BUSY);
			m[i]->busy = 0;
		}
	}
	splx(s);

	return (VM_PAGER_OK);
}
Exemple #2
0
/*
 * Fill as many pages as vm_fault has allocated for us.
 */
static int
phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
	int i;

	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
	for (i = 0; i < count; i++) {
		if (m[i]->valid == 0) {
			if ((m[i]->flags & PG_ZERO) == 0)
				pmap_zero_page(m[i]);
			m[i]->valid = VM_PAGE_BITS_ALL;
		}
		KASSERT(m[i]->valid == VM_PAGE_BITS_ALL,
		    ("phys_pager_getpages: partially valid page %p", m[i]));
	}
	vm_page_lock_queues();
	for (i = 0; i < count; i++) {
		/* Switch off pv_entries */
		vm_page_unmanage(m[i]);
		m[i]->dirty = 0;
		/* The requested page must remain busy, the others not. */
		if (reqpage != i) {
			vm_page_flag_clear(m[i], PG_BUSY);
			m[i]->busy = 0;
		}
	}
	vm_page_unlock_queues();
	return (VM_PAGER_OK);
}
Exemple #3
0
/*
 * vm_contig_pg_alloc:
 *
 * Allocate contiguous pages from the VM.  This function does not
 * map the allocated pages into the kernel map, otherwise it is
 * impossible to make large allocations (i.e. >2G).
 *
 * Malloc()'s data structures have been used for collection of
 * statistics and for allocations of less than a page.
 *
 * The caller must hold vm_token.
 */
static int
vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high,
		   unsigned long alignment, unsigned long boundary, int mflags)
{
	int i, start, pass;
	vm_offset_t phys;
	vm_page_t pga = vm_page_array;
	vm_page_t m;
	int pqtype;

	size = round_page(size);
	if (size == 0)
		panic("vm_contig_pg_alloc: size must not be 0");
	if ((alignment & (alignment - 1)) != 0)
		panic("vm_contig_pg_alloc: alignment must be a power of 2");
	if ((boundary & (boundary - 1)) != 0)
		panic("vm_contig_pg_alloc: boundary must be a power of 2");

	start = 0;
	crit_enter();

	/*
	 * Three passes (0, 1, 2).  Each pass scans the VM page list for
	 * free or cached pages.  After each pass if the entire scan failed
	 * we attempt to flush inactive pages and reset the start index back
	 * to 0.  For passes 1 and 2 we also attempt to flush active pages.
	 */
	for (pass = 0; pass < 3; pass++) {
		/*
		 * Find first page in array that is free, within range, 
		 * aligned, and such that the boundary won't be crossed.
		 */
again:
		for (i = start; i < vmstats.v_page_count; i++) {
			m = &pga[i];
			phys = VM_PAGE_TO_PHYS(m);
			pqtype = m->queue - m->pc;
			if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
			    (phys >= low) && (phys < high) &&
			    ((phys & (alignment - 1)) == 0) &&
			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) &&
			    m->busy == 0 && m->wire_count == 0 &&
			    m->hold_count == 0 && (m->flags & PG_BUSY) == 0

			) {
				break;
			}
		}

		/*
		 * If we cannot find the page in the given range, or we have
		 * crossed the boundary, call the vm_contig_pg_clean() function
		 * for flushing out the queues, and returning it back to
		 * normal state.
		 */
		if ((i == vmstats.v_page_count) ||
			((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {

			/*
			 * Best effort flush of all inactive pages.
			 * This is quite quick, for now stall all
			 * callers, even if they've specified M_NOWAIT.
			 */
			vm_contig_pg_flush(PQ_INACTIVE, 
					    vmstats.v_inactive_count);

			crit_exit(); /* give interrupts a chance */
			crit_enter();

			/*
			 * Best effort flush of active pages.
			 *
			 * This is very, very slow.
			 * Only do this if the caller has agreed to M_WAITOK.
			 *
			 * If enough pages are flushed, we may succeed on
			 * next (final) pass, if not the caller, contigmalloc(),
			 * will fail in the index < 0 case.
			 */
			if (pass > 0 && (mflags & M_WAITOK)) {
				vm_contig_pg_flush (PQ_ACTIVE,
						    vmstats.v_active_count);
			}

			/*
			 * We're already too high in the address space
			 * to succeed, reset to 0 for the next iteration.
			 */
			start = 0;
			crit_exit(); /* give interrupts a chance */
			crit_enter();
			continue;	/* next pass */
		}
		start = i;

		/*
		 * Check successive pages for contiguous and free.
		 *
		 * (still in critical section)
		 */
		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
			m = &pga[i];
			pqtype = m->queue - m->pc;
			if ((VM_PAGE_TO_PHYS(&m[0]) !=
			    (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) ||
			    ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) ||
			    m->busy || m->wire_count ||
			    m->hold_count || (m->flags & PG_BUSY)
			) {
				start++;
				goto again;
			}
		}

		/*
		 * (still in critical section)
		 */
		for (i = start; i < (start + size / PAGE_SIZE); i++) {
			m = &pga[i];
			pqtype = m->queue - m->pc;
			if (pqtype == PQ_CACHE) {
				vm_page_busy(m);
				vm_page_free(m);
			}
			KKASSERT(m->object == NULL);
			vm_page_unqueue_nowakeup(m);
			m->valid = VM_PAGE_BITS_ALL;
			if (m->flags & PG_ZERO)
				vm_page_zero_count--;
			KASSERT(m->dirty == 0,
				("vm_contig_pg_alloc: page %p was dirty", m));
			m->wire_count = 0;
			m->busy = 0;

			/*
			 * Clear all flags except PG_ZERO and PG_WANTED.  This
			 * also clears PG_BUSY.
			 */
			vm_page_flag_clear(m, ~(PG_ZERO|PG_WANTED));
		}

		/*
		 * Our job is done, return the index page of vm_page_array.
		 */
		crit_exit();
		return (start); /* aka &pga[start] */
	}

	/*
	 * Failed.
	 */
	crit_exit();
	return (-1);
}
Exemple #4
0
/*
 * vm_contig_pg_alloc:
 *
 * Allocate contiguous pages from the VM.  This function does not
 * map the allocated pages into the kernel map, otherwise it is
 * impossible to make large allocations (i.e. >2G).
 *
 * Malloc()'s data structures have been used for collection of
 * statistics and for allocations of less than a page.
 */
static int
vm_contig_pg_alloc(unsigned long size, vm_paddr_t low, vm_paddr_t high,
		   unsigned long alignment, unsigned long boundary, int mflags)
{
	int i, q, start, pass;
	vm_offset_t phys;
	vm_page_t pga = vm_page_array;
	vm_page_t m;
	int pqtype;

	size = round_page(size);
	if (size == 0)
		panic("vm_contig_pg_alloc: size must not be 0");
	if ((alignment & (alignment - 1)) != 0)
		panic("vm_contig_pg_alloc: alignment must be a power of 2");
	if ((boundary & (boundary - 1)) != 0)
		panic("vm_contig_pg_alloc: boundary must be a power of 2");

	/*
	 * See if we can get the pages from the contiguous page reserve
	 * alist.  The returned pages will be allocated and wired but not
	 * busied.
	 */
	m = vm_page_alloc_contig(
		low, high, alignment, boundary, size, VM_MEMATTR_DEFAULT);
	if (m)
		return (m - &pga[0]);

	/*
	 * Three passes (0, 1, 2).  Each pass scans the VM page list for
	 * free or cached pages.  After each pass if the entire scan failed
	 * we attempt to flush inactive pages and reset the start index back
	 * to 0.  For passes 1 and 2 we also attempt to flush active pages.
	 */
	start = 0;
	for (pass = 0; pass < 3; pass++) {
		/*
		 * Find first page in array that is free, within range, 
		 * aligned, and such that the boundary won't be crossed.
		 */
again:
		for (i = start; i < vmstats.v_page_count; i++) {
			m = &pga[i];
			phys = VM_PAGE_TO_PHYS(m);
			pqtype = m->queue - m->pc;
			if (((pqtype == PQ_FREE) || (pqtype == PQ_CACHE)) &&
			    (phys >= low) && (phys < high) &&
			    ((phys & (alignment - 1)) == 0) &&
			    (((phys ^ (phys + size - 1)) & ~(boundary - 1)) == 0) &&
			    m->busy == 0 && m->wire_count == 0 &&
			    m->hold_count == 0 &&
			    (m->flags & (PG_BUSY | PG_NEED_COMMIT)) == 0)
			{
				break;
			}
		}

		/*
		 * If we cannot find the page in the given range, or we have
		 * crossed the boundary, call the vm_contig_pg_clean() function
		 * for flushing out the queues, and returning it back to
		 * normal state.
		 */
		if ((i == vmstats.v_page_count) ||
		    ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {

			/*
			 * Best effort flush of all inactive pages.
			 * This is quite quick, for now stall all
			 * callers, even if they've specified M_NOWAIT.
			 */
			for (q = 0; q < PQ_L2_SIZE; ++q) {
				vm_contig_pg_clean(PQ_INACTIVE + q,
						   vmstats.v_inactive_count);
				lwkt_yield();
			}

			/*
			 * Best effort flush of active pages.
			 *
			 * This is very, very slow.
			 * Only do this if the caller has agreed to M_WAITOK.
			 *
			 * If enough pages are flushed, we may succeed on
			 * next (final) pass, if not the caller, contigmalloc(),
			 * will fail in the index < 0 case.
			 */
			if (pass > 0 && (mflags & M_WAITOK)) {
				for (q = 0; q < PQ_L2_SIZE; ++q) {
					vm_contig_pg_clean(PQ_ACTIVE + q,
						       vmstats.v_active_count);
				}
				lwkt_yield();
			}

			/*
			 * We're already too high in the address space
			 * to succeed, reset to 0 for the next iteration.
			 */
			start = 0;
			continue;	/* next pass */
		}
		start = i;

		/*
		 * Check successive pages for contiguous and free.
		 *
		 * (still in critical section)
		 */
		for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
			m = &pga[i];
			pqtype = m->queue - m->pc;
			if ((VM_PAGE_TO_PHYS(&m[0]) !=
			    (VM_PAGE_TO_PHYS(&m[-1]) + PAGE_SIZE)) ||
			    ((pqtype != PQ_FREE) && (pqtype != PQ_CACHE)) ||
			    m->busy || m->wire_count ||
			    m->hold_count ||
			    (m->flags & (PG_BUSY | PG_NEED_COMMIT)))
			{
				start++;
				goto again;
			}
		}

		/*
		 * Try to allocate the pages, wiring them as we go.
		 *
		 * (still in critical section)
		 */
		for (i = start; i < (start + size / PAGE_SIZE); i++) {
			m = &pga[i];

			if (vm_page_busy_try(m, TRUE)) {
				vm_contig_pg_free(start,
						  (i - start) * PAGE_SIZE);
				start++;
				goto again;
			}
			pqtype = m->queue - m->pc;
			if (pqtype == PQ_CACHE &&
			    m->hold_count == 0 &&
			    m->wire_count == 0 &&
			    (m->flags & (PG_UNMANAGED | PG_NEED_COMMIT)) == 0) {
				vm_page_protect(m, VM_PROT_NONE);
				KKASSERT((m->flags & PG_MAPPED) == 0);
				KKASSERT(m->dirty == 0);
				vm_page_free(m);
				--i;
				continue;	/* retry the page */
			}
			if (pqtype != PQ_FREE || m->hold_count) {
				vm_page_wakeup(m);
				vm_contig_pg_free(start,
						  (i - start) * PAGE_SIZE);
				start++;
				goto again;
			}
			KKASSERT((m->valid & m->dirty) == 0);
			KKASSERT(m->wire_count == 0);
			KKASSERT(m->object == NULL);
			vm_page_unqueue_nowakeup(m);
			m->valid = VM_PAGE_BITS_ALL;
			if (m->flags & PG_ZERO)
				vm_page_zero_count--;
			KASSERT(m->dirty == 0,
				("vm_contig_pg_alloc: page %p was dirty", m));
			KKASSERT(m->wire_count == 0);
			KKASSERT(m->busy == 0);

			/*
			 * Clear all flags except PG_BUSY, PG_ZERO, and
			 * PG_WANTED, then unbusy the now allocated page.
			 */
			vm_page_flag_clear(m, ~(PG_BUSY | PG_SBUSY |
						PG_ZERO | PG_WANTED));
			vm_page_wire(m);
			vm_page_wakeup(m);
		}

		/*
		 * Our job is done, return the index page of vm_page_array.
		 */
		return (start); /* aka &pga[start] */
	}

	/*
	 * Failed.
	 */
	return (-1);
}