Exemple #1
0
/*
 * Release a reference to the dquot (decrement ref-count) and unlock it.
 *
 * If there is a group quota attached to this dquot, carefully release that
 * too without tripping over deadlocks'n'stuff.
 */
void
xfs_qm_dqput(
	struct xfs_dquot	*dqp)
{
	ASSERT(dqp->q_nrefs > 0);
	ASSERT(XFS_DQ_IS_LOCKED(dqp));

	trace_xfs_dqput(dqp);

	if (--dqp->q_nrefs == 0) {
		struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
		trace_xfs_dqput_free(dqp);

		if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
			XFS_STATS_INC(xs_qm_dquot_unused);
	}
	xfs_dqunlock(dqp);
}
Exemple #2
0
STATIC void
xfs_qm_dqput_final(
	struct xfs_dquot	*dqp)
{
	struct xfs_quotainfo	*qi = dqp->q_mount->m_quotainfo;
	struct xfs_dquot	*gdqp;
	struct xfs_dquot	*pdqp;

	trace_xfs_dqput_free(dqp);

	if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
		XFS_STATS_INC(xs_qm_dquot_unused);

	/*
	 * If we just added a udquot to the freelist, then we want to release
	 * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
	 * keep the gdquot/pdquot from getting reclaimed.
	 */
	gdqp = dqp->q_gdquot;
	if (gdqp) {
		xfs_dqlock(gdqp);
		dqp->q_gdquot = NULL;
	}

	pdqp = dqp->q_pdquot;
	if (pdqp) {
		xfs_dqlock(pdqp);
		dqp->q_pdquot = NULL;
	}
	xfs_dqunlock(dqp);

	/*
	 * If we had a group/project quota hint, release it now.
	 */
	if (gdqp)
		xfs_qm_dqput(gdqp);
	if (pdqp)
		xfs_qm_dqput(pdqp);
}
Exemple #3
0
static void page_cache_tree_delete(struct address_space *mapping,
				   struct page *page, void *shadow)
{
	struct radix_tree_node *node;
	unsigned long index;
	unsigned int offset;
	unsigned int tag;
	void **slot;

	VM_BUG_ON(!PageLocked(page));

	__radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot);

	if (shadow) {
		mapping->nrshadows++;
		/*
		 * Make sure the nrshadows update is committed before
		 * the nrpages update so that final truncate racing
		 * with reclaim does not see both counters 0 at the
		 * same time and miss a shadow entry.
		 */
		smp_wmb();
	}
	mapping->nrpages--;

	if (!node) {
		/* Clear direct pointer tags in root node */
		mapping->page_tree.gfp_mask &= __GFP_BITS_MASK;
		radix_tree_replace_slot(slot, shadow);
		return;
	}

	/* Clear tree tags for the removed page */
	index = page->index;
	offset = index & RADIX_TREE_MAP_MASK;
	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
		if (test_bit(offset, node->tags[tag]))
			radix_tree_tag_clear(&mapping->page_tree, index, tag);
	}

	/* Delete page, swap shadow entry */
	radix_tree_replace_slot(slot, shadow);
	workingset_node_pages_dec(node);
	if (shadow)
		workingset_node_shadows_inc(node);
	else
		if (__radix_tree_delete_node(&mapping->page_tree, node))
			return;

	/*
	 * Track node that only contains shadow entries.
	 *
	 * Avoid acquiring the list_lru lock if already tracked.  The
	 * list_empty() test is safe as node->private_list is
	 * protected by mapping->tree_lock.
	 */
	if (!workingset_node_pages(node) &&
	    list_empty(&node->private_list)) {
		node->private_data = mapping;
		list_lru_add(&workingset_shadow_nodes, &node->private_list);
	}
}
Exemple #4
0
static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
				    void *start, void *end)
{
	void *page_addr;
	unsigned long user_page_addr;
	struct binder_lru_page *page;
	struct vm_area_struct *vma = NULL;
	struct mm_struct *mm = NULL;
	bool need_mm = false;

	binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
		     "%d: %s pages %pK-%pK\n", alloc->pid,
		     allocate ? "allocate" : "free", start, end);

	if (end <= start)
		return 0;

	trace_binder_update_page_range(alloc, allocate, start, end);

	if (allocate == 0)
		goto free_range;

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
		if (!page->page_ptr) {
			need_mm = true;
			break;
		}
	}

	if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
		mm = alloc->vma_vm_mm;

	if (mm) {
		down_read(&mm->mmap_sem);
		vma = alloc->vma;
	}

	if (!vma && need_mm) {
		pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
			alloc->pid);
		goto err_no_vma;
	}

	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
		int ret;
		bool on_lru;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		if (page->page_ptr) {
			trace_binder_alloc_lru_start(alloc, index);

			on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
			WARN_ON(!on_lru);

			trace_binder_alloc_lru_end(alloc, index);
			continue;
		}

		if (WARN_ON(!vma))
			goto err_page_ptr_cleared;

		trace_binder_alloc_page_start(alloc, index);
		page->page_ptr = alloc_page(GFP_KERNEL |
					    __GFP_HIGHMEM |
					    __GFP_ZERO);
		if (!page->page_ptr) {
			pr_err("%d: binder_alloc_buf failed for page at %pK\n",
				alloc->pid, page_addr);
			goto err_alloc_page_failed;
		}
		page->alloc = alloc;
		INIT_LIST_HEAD(&page->lru);

		ret = map_kernel_range_noflush((unsigned long)page_addr,
					       PAGE_SIZE, PAGE_KERNEL,
					       &page->page_ptr);
		flush_cache_vmap((unsigned long)page_addr,
				(unsigned long)page_addr + PAGE_SIZE);
		if (ret != 1) {
			pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
			       alloc->pid, page_addr);
			goto err_map_kernel_failed;
		}
		user_page_addr =
			(uintptr_t)page_addr + alloc->user_buffer_offset;
		ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
		if (ret) {
			pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
			       alloc->pid, user_page_addr);
			goto err_vm_insert_page_failed;
		}

		if (index + 1 > alloc->pages_high)
			alloc->pages_high = index + 1;

		trace_binder_alloc_page_end(alloc, index);
		/* vm_insert_page does not seem to increment the refcount */
	}
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return 0;

free_range:
	for (page_addr = end - PAGE_SIZE; page_addr >= start;
	     page_addr -= PAGE_SIZE) {
		bool ret;
		size_t index;

		index = (page_addr - alloc->buffer) / PAGE_SIZE;
		page = &alloc->pages[index];

		trace_binder_free_lru_start(alloc, index);

		ret = list_lru_add(&binder_alloc_lru, &page->lru);
		WARN_ON(!ret);

		trace_binder_free_lru_end(alloc, index);
		continue;

err_vm_insert_page_failed:
		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
		__free_page(page->page_ptr);
		page->page_ptr = NULL;
err_alloc_page_failed:
err_page_ptr_cleared:
		;
	}
err_no_vma:
	if (mm) {
		up_read(&mm->mmap_sem);
		mmput(mm);
	}
	return vma ? -ENOMEM : -ESRCH;
}