Example #1
0
/*
 * Search the dentry child list for the specified parent,
 * and move any unused dentries to the end of the unused
 * list for prune_dcache(). We descend to the next level
 * whenever the d_subdirs list is non-empty and continue
 * searching.
 */
static int select_parent(struct dentry * parent)
{
	struct dentry *this_parent = parent;
	struct list_head *next;
	int found = 0;

	DEFINE_LOCK_COUNT();

	spin_lock(&dcache_lock);
repeat:
	next = this_parent->d_subdirs.next;
resume:
	while (next != &this_parent->d_subdirs) {
		struct list_head *tmp = next;
		struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
		next = tmp->next;
		if (!atomic_read(&dentry->d_count)) {
			list_del(&dentry->d_lru);
			list_add(&dentry->d_lru, dentry_unused.prev);
			found++;
		}
		if (TEST_LOCK_COUNT(500) && found > 10) {
			debug_lock_break(1);
			if (conditional_schedule_needed())
				goto out;
			RESET_LOCK_COUNT();
		}
		/*
		 * Descend a level if the d_subdirs list is non-empty.
		 */
		if (!list_empty(&dentry->d_subdirs)) {
			this_parent = dentry;
#ifdef DCACHE_DEBUG
printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
dentry->d_parent->d_name.name, dentry->d_name.name, found);
#endif
			goto repeat;
		}
	}
	/*
	 * All done at this level ... ascend and resume the search.
	 */
	if (this_parent != parent) {
		next = this_parent->d_child.next; 
		this_parent = this_parent->d_parent;
#ifdef DCACHE_DEBUG
printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
#endif
		goto resume;
	}
out:
	spin_unlock(&dcache_lock);
	return found;
}
Example #2
0
/* mm->page_table_lock is held. mmap_sem is not held */
static inline int swap_out_pmd(struct mm_struct * mm, struct vm_area_struct * vma, pmd_t *dir, unsigned long address, unsigned long end, int count, zone_t * classzone)
{
	pte_t * pte;
	unsigned long pmd_end;

	DEFINE_LOCK_COUNT();

	if (pmd_none(*dir))
		return count;
	if (pmd_bad(*dir)) {
		pmd_ERROR(*dir);
		pmd_clear(dir);
		return count;
	}
	
	pte = pte_offset(dir, address);
	
	pmd_end = (address + PMD_SIZE) & PMD_MASK;
	if (end > pmd_end)
		end = pmd_end;

	do {
		if (pte_present(*pte)) {
			struct page *page = pte_page(*pte);

			if (VALID_PAGE(page) && !PageReserved(page)) {
				count -= try_to_swap_out(mm, vma, address, pte, page, classzone);
				if (!count) {
					address += PAGE_SIZE;
					break;
				}
				/* we reach this with a lock depth of 1 or 2 */
#if 0
				if (TEST_LOCK_COUNT(4)) {
					if (conditional_schedule_needed())
						return count;
					RESET_LOCK_COUNT();
				}
#endif
			}
		}
		address += PAGE_SIZE;
		pte++;
	} while (address && (address < end));
	mm->swap_address = address;
	return count;
}
Example #3
0
void prune_dcache(int count)
{
	DEFINE_LOCK_COUNT();

	spin_lock(&dcache_lock);

redo:
	for (;;) {
		struct dentry *dentry;
		struct list_head *tmp;

		if (TEST_LOCK_COUNT(100)) {
			RESET_LOCK_COUNT();
			debug_lock_break(1);
			if (conditional_schedule_needed()) {
				break_spin_lock(&dcache_lock);
				goto redo;
			}
		}

		tmp = dentry_unused.prev;

		if (tmp == &dentry_unused)
			break;
		list_del_init(tmp);
		dentry = list_entry(tmp, struct dentry, d_lru);

		/* If the dentry was recently referenced, don't free it. */
		if (dentry->d_vfs_flags & DCACHE_REFERENCED) {
			dentry->d_vfs_flags &= ~DCACHE_REFERENCED;
			list_add(&dentry->d_lru, &dentry_unused);
			continue;
		}
		dentry_stat.nr_unused--;

		/* Unused dentry with a count? */
		if (atomic_read(&dentry->d_count))
			BUG();

		prune_one_dentry(dentry);
		if (!--count)
			break;
	}
	spin_unlock(&dcache_lock);
}