Ejemplo n.º 1
0
/*
 * Speed up the reclamation of up to "distance" pages that precede the
 * faulting pindex within the first object of the shadow chain.
 */
static void
vm_fault_cache_behind(const struct faultstate *fs, int distance)
{
	vm_object_t first_object, object;
	vm_page_t m, m_prev;
	vm_pindex_t pindex;

	object = fs->object;
	VM_OBJECT_ASSERT_WLOCKED(object);
	first_object = fs->first_object;
	if (first_object != object) {
		if (!VM_OBJECT_TRYWLOCK(first_object)) {
			VM_OBJECT_WUNLOCK(object);
			VM_OBJECT_WLOCK(first_object);
			VM_OBJECT_WLOCK(object);
		}
	}
	/* Neither fictitious nor unmanaged pages can be cached. */
	if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) {
		if (fs->first_pindex < distance)
			pindex = 0;
		else
			pindex = fs->first_pindex - distance;
		if (pindex < OFF_TO_IDX(fs->entry->offset))
			pindex = OFF_TO_IDX(fs->entry->offset);
		m = first_object != object ? fs->first_m : fs->m;
		KASSERT((m->oflags & VPO_BUSY) != 0,
		    ("vm_fault_cache_behind: page %p is not busy", m));
		m_prev = vm_page_prev(m);
		while ((m = m_prev) != NULL && m->pindex >= pindex &&
		    m->valid == VM_PAGE_BITS_ALL) {
			m_prev = vm_page_prev(m);
			if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0)
				continue;
			vm_page_lock(m);
			if (m->hold_count == 0 && m->wire_count == 0) {
				pmap_remove_all(m);
				vm_page_aflag_clear(m, PGA_REFERENCED);
				if (m->dirty != 0)
					vm_page_deactivate(m);
				else
					vm_page_cache(m);
			}
			vm_page_unlock(m);
		}
	}
	if (first_object != object)
		VM_OBJECT_WUNLOCK(first_object);
}
Ejemplo n.º 2
0
/*
 * Speed up the reclamation of up to "distance" pages that precede the
 * faulting pindex within the first object of the shadow chain.
 */
static void
vm_fault_cache_behind(const struct faultstate *fs, int distance)
{
	vm_object_t first_object, object;
	vm_page_t m, m_prev;
	vm_pindex_t pindex;

	object = fs->object;
	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
	first_object = fs->first_object;
	if (first_object != object) {
		if (!VM_OBJECT_TRYLOCK(first_object)) {
			VM_OBJECT_UNLOCK(object);
			VM_OBJECT_LOCK(first_object);
			VM_OBJECT_LOCK(object);
		}
	}
	if (first_object->type != OBJT_DEVICE &&
	    first_object->type != OBJT_PHYS && first_object->type != OBJT_SG) {
		if (fs->first_pindex < distance)
			pindex = 0;
		else
			pindex = fs->first_pindex - distance;
		if (pindex < OFF_TO_IDX(fs->entry->offset))
			pindex = OFF_TO_IDX(fs->entry->offset);
		m = first_object != object ? fs->first_m : fs->m;
		KASSERT((m->oflags & VPO_BUSY) != 0,
		    ("vm_fault_cache_behind: page %p is not busy", m));
		m_prev = vm_page_prev(m);
		while ((m = m_prev) != NULL && m->pindex >= pindex &&
		    m->valid == VM_PAGE_BITS_ALL) {
			m_prev = vm_page_prev(m);
			if (m->busy != 0 || (m->oflags & VPO_BUSY) != 0)
				continue;
			vm_page_lock(m);
			if (m->hold_count == 0 && m->wire_count == 0) {
				pmap_remove_all(m);
				vm_page_aflag_clear(m, PGA_REFERENCED);
				if (m->dirty != 0)
					vm_page_deactivate(m);
				else
					vm_page_cache(m);
			}
			vm_page_unlock(m);
		}
	}
	if (first_object != object)
		VM_OBJECT_UNLOCK(first_object);
}
Ejemplo n.º 3
0
/*
 * vm_contig_pg_clean:
 * 
 * Do a thorough cleanup of the specified 'queue', which can be either
 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
 * marked dirty, it is shoved into the page cache, provided no one has
 * currently aqcuired it, otherwise localized action per object type
 * is taken for cleanup:
 *
 * 	In the OBJT_VNODE case, the whole page range is cleaned up
 * 	using the vm_object_page_clean() routine, by specyfing a
 * 	start and end of '0'.
 *
 * 	Otherwise if the object is of any other type, the generic
 * 	pageout (daemon) flush routine is invoked.
 *
 * The caller must hold vm_token.
 */
static int
vm_contig_pg_clean(int queue)
{
	vm_object_t object;
	vm_page_t m, m_tmp, next;

	ASSERT_LWKT_TOKEN_HELD(&vm_token);

	for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
		KASSERT(m->queue == queue,
			("vm_contig_clean: page %p's queue is not %d", 
			m, queue));
		next = TAILQ_NEXT(m, pageq);

		if (m->flags & PG_MARKER)
			continue;
		
		if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
			return (TRUE);
		
		vm_page_test_dirty(m);
		if (m->dirty) {
			object = m->object;
			if (object->type == OBJT_VNODE) {
				vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
				vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
				vn_unlock(((struct vnode *)object->handle));
				return (TRUE);
			} else if (object->type == OBJT_SWAP ||
					object->type == OBJT_DEFAULT) {
				m_tmp = m;
				vm_pageout_flush(&m_tmp, 1, 0);
				return (TRUE);
			}
		}
		KKASSERT(m->busy == 0);
		if (m->dirty == 0 && m->hold_count == 0) {
			vm_page_busy(m);
			vm_page_cache(m);
		}
	}
	return (FALSE);
}
Ejemplo n.º 4
0
/*
 * vm_contig_pg_clean:
 *
 * Do a thorough cleanup of the specified 'queue', which can be either
 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
 * marked dirty, it is shoved into the page cache, provided no one has
 * currently aqcuired it, otherwise localized action per object type
 * is taken for cleanup:
 *
 * 	In the OBJT_VNODE case, the whole page range is cleaned up
 * 	using the vm_object_page_clean() routine, by specyfing a
 * 	start and end of '0'.
 *
 * 	Otherwise if the object is of any other type, the generic
 * 	pageout (daemon) flush routine is invoked.
 */
static void
vm_contig_pg_clean(int queue, int count)
{
    vm_object_t object;
    vm_page_t m, m_tmp;
    struct vm_page marker;
    struct vpgqueues *pq = &vm_page_queues[queue];

    /*
     * Setup a local marker
     */
    bzero(&marker, sizeof(marker));
    marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
    marker.queue = queue;
    marker.wire_count = 1;

    vm_page_queues_spin_lock(queue);
    TAILQ_INSERT_HEAD(&pq->pl, &marker, pageq);
    vm_page_queues_spin_unlock(queue);

    /*
     * Iterate the queue.  Note that the vm_page spinlock must be
     * acquired before the pageq spinlock so it's easiest to simply
     * not hold it in the loop iteration.
     */
    while (count-- > 0 && (m = TAILQ_NEXT(&marker, pageq)) != NULL) {
        vm_page_and_queue_spin_lock(m);
        if (m != TAILQ_NEXT(&marker, pageq)) {
            vm_page_and_queue_spin_unlock(m);
            ++count;
            continue;
        }
        KKASSERT(m->queue == queue);

        TAILQ_REMOVE(&pq->pl, &marker, pageq);
        TAILQ_INSERT_AFTER(&pq->pl, m, &marker, pageq);

        if (m->flags & PG_MARKER) {
            vm_page_and_queue_spin_unlock(m);
            continue;
        }
        if (vm_page_busy_try(m, TRUE)) {
            vm_page_and_queue_spin_unlock(m);
            continue;
        }
        vm_page_and_queue_spin_unlock(m);

        /*
         * We've successfully busied the page
         */
        if (m->queue - m->pc != queue) {
            vm_page_wakeup(m);
            continue;
        }
        if (m->wire_count || m->hold_count) {
            vm_page_wakeup(m);
            continue;
        }
        if ((object = m->object) == NULL) {
            vm_page_wakeup(m);
            continue;
        }
        vm_page_test_dirty(m);
        if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
            vm_object_hold(object);
            KKASSERT(m->object == object);

            if (object->type == OBJT_VNODE) {
                vm_page_wakeup(m);
                vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
                vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
                vn_unlock(((struct vnode *)object->handle));
            } else if (object->type == OBJT_SWAP ||
                       object->type == OBJT_DEFAULT) {
                m_tmp = m;
                vm_pageout_flush(&m_tmp, 1, 0);
            } else {
                vm_page_wakeup(m);
            }
            vm_object_drop(object);
        } else if (m->hold_count == 0) {
            vm_page_cache(m);
        } else {
            vm_page_wakeup(m);
        }
    }

    /*
     * Scrap our local marker
     */
    vm_page_queues_spin_lock(queue);
    TAILQ_REMOVE(&pq->pl, &marker, pageq);
    vm_page_queues_spin_unlock(queue);
}