示例#1
0
/*
 * vm_contig_pg_clean:
 * 
 * Do a thorough cleanup of the specified 'queue', which can be either
 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
 * marked dirty, it is shoved into the page cache, provided no one has
 * currently aqcuired it, otherwise localized action per object type
 * is taken for cleanup:
 *
 * 	In the OBJT_VNODE case, the whole page range is cleaned up
 * 	using the vm_object_page_clean() routine, by specyfing a
 * 	start and end of '0'.
 *
 * 	Otherwise if the object is of any other type, the generic
 * 	pageout (daemon) flush routine is invoked.
 *
 * The caller must hold vm_token.
 */
static int
vm_contig_pg_clean(int queue)
{
	vm_object_t object;
	vm_page_t m, m_tmp, next;

	ASSERT_LWKT_TOKEN_HELD(&vm_token);

	for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
		KASSERT(m->queue == queue,
			("vm_contig_clean: page %p's queue is not %d", 
			m, queue));
		next = TAILQ_NEXT(m, pageq);

		if (m->flags & PG_MARKER)
			continue;
		
		if (vm_page_sleep_busy(m, TRUE, "vpctw0"))
			return (TRUE);
		
		vm_page_test_dirty(m);
		if (m->dirty) {
			object = m->object;
			if (object->type == OBJT_VNODE) {
				vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
				vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
				vn_unlock(((struct vnode *)object->handle));
				return (TRUE);
			} else if (object->type == OBJT_SWAP ||
					object->type == OBJT_DEFAULT) {
				m_tmp = m;
				vm_pageout_flush(&m_tmp, 1, 0);
				return (TRUE);
			}
		}
		KKASSERT(m->busy == 0);
		if (m->dirty == 0 && m->hold_count == 0) {
			vm_page_busy(m);
			vm_page_cache(m);
		}
	}
	return (FALSE);
}
示例#2
0
/*
 * vm_contig_pg_clean:
 *
 * Do a thorough cleanup of the specified 'queue', which can be either
 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough.  If the page is not
 * marked dirty, it is shoved into the page cache, provided no one has
 * currently aqcuired it, otherwise localized action per object type
 * is taken for cleanup:
 *
 * 	In the OBJT_VNODE case, the whole page range is cleaned up
 * 	using the vm_object_page_clean() routine, by specyfing a
 * 	start and end of '0'.
 *
 * 	Otherwise if the object is of any other type, the generic
 * 	pageout (daemon) flush routine is invoked.
 */
static void
vm_contig_pg_clean(int queue, int count)
{
    vm_object_t object;
    vm_page_t m, m_tmp;
    struct vm_page marker;
    struct vpgqueues *pq = &vm_page_queues[queue];

    /*
     * Setup a local marker
     */
    bzero(&marker, sizeof(marker));
    marker.flags = PG_BUSY | PG_FICTITIOUS | PG_MARKER;
    marker.queue = queue;
    marker.wire_count = 1;

    vm_page_queues_spin_lock(queue);
    TAILQ_INSERT_HEAD(&pq->pl, &marker, pageq);
    vm_page_queues_spin_unlock(queue);

    /*
     * Iterate the queue.  Note that the vm_page spinlock must be
     * acquired before the pageq spinlock so it's easiest to simply
     * not hold it in the loop iteration.
     */
    while (count-- > 0 && (m = TAILQ_NEXT(&marker, pageq)) != NULL) {
        vm_page_and_queue_spin_lock(m);
        if (m != TAILQ_NEXT(&marker, pageq)) {
            vm_page_and_queue_spin_unlock(m);
            ++count;
            continue;
        }
        KKASSERT(m->queue == queue);

        TAILQ_REMOVE(&pq->pl, &marker, pageq);
        TAILQ_INSERT_AFTER(&pq->pl, m, &marker, pageq);

        if (m->flags & PG_MARKER) {
            vm_page_and_queue_spin_unlock(m);
            continue;
        }
        if (vm_page_busy_try(m, TRUE)) {
            vm_page_and_queue_spin_unlock(m);
            continue;
        }
        vm_page_and_queue_spin_unlock(m);

        /*
         * We've successfully busied the page
         */
        if (m->queue - m->pc != queue) {
            vm_page_wakeup(m);
            continue;
        }
        if (m->wire_count || m->hold_count) {
            vm_page_wakeup(m);
            continue;
        }
        if ((object = m->object) == NULL) {
            vm_page_wakeup(m);
            continue;
        }
        vm_page_test_dirty(m);
        if (m->dirty || (m->flags & PG_NEED_COMMIT)) {
            vm_object_hold(object);
            KKASSERT(m->object == object);

            if (object->type == OBJT_VNODE) {
                vm_page_wakeup(m);
                vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY);
                vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
                vn_unlock(((struct vnode *)object->handle));
            } else if (object->type == OBJT_SWAP ||
                       object->type == OBJT_DEFAULT) {
                m_tmp = m;
                vm_pageout_flush(&m_tmp, 1, 0);
            } else {
                vm_page_wakeup(m);
            }
            vm_object_drop(object);
        } else if (m->hold_count == 0) {
            vm_page_cache(m);
        } else {
            vm_page_wakeup(m);
        }
    }

    /*
     * Scrap our local marker
     */
    vm_page_queues_spin_lock(queue);
    TAILQ_REMOVE(&pq->pl, &marker, pageq);
    vm_page_queues_spin_unlock(queue);
}