示例#1
0
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	swp_entry_t entry;
	struct address_space *address_space;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageSwapCache(page));
	VM_BUG_ON(PageWriteback(page));

	entry.val = page_private(page);
	address_space = swap_address_space(entry);
	radix_tree_delete(&address_space->page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	address_space->nrpages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	INC_CACHE_INFO(del_total);
}
static inline void pgd_list_del(pgd_t *pgd)
{
	struct page *next, **pprev, *page = virt_to_page(pgd);
	next = (struct page *) page->index;
	pprev = (struct page **) page_private(page);
	*pprev = next;
	if (next)
		set_page_private(next, (unsigned long) pprev);
}
示例#3
0
void fscrypt_restore_control_page(struct page *page)
{
	struct fscrypt_ctx *ctx;

	ctx = (struct fscrypt_ctx *)page_private(page);
	set_page_private(page, (unsigned long)NULL);
	ClearPagePrivate(page);
	unlock_page(page);
	fscrypt_release_ctx(ctx);
}
static void put_compound_page(struct page *page)
{
	page = (struct page *)page_private(page);
	if (put_page_testzero(page)) {
		void (*dtor)(struct page *page);

		dtor = (void (*)(struct page *))page[1].lru.next;
		(*dtor)(page);
	}
}
示例#5
0
文件: kexec_core.c 项目: mdamt/linux
static void kimage_free_pages(struct page *page)
{
	unsigned int order, count, i;

	order = page_private(page);
	count = 1 << order;
	for (i = 0; i < count; i++)
		ClearPageReserved(page + i);
	__free_pages(page, order);
}
示例#6
0
void ext4_restore_control_page(struct page *data_page)
{
	struct ext4_crypto_ctx *ctx =
		(struct ext4_crypto_ctx *)page_private(data_page);

	set_page_private(data_page, (unsigned long)NULL);
	ClearPagePrivate(data_page);
	unlock_page(data_page);
	ext4_release_crypto_ctx(ctx);
}
示例#7
0
static struct nfs_page *nfs_page_find_request_locked(struct page *page)
{
	struct nfs_page *req = NULL;

	if (PagePrivate(page)) {
		req = (struct nfs_page *)page_private(page);
		if (req != NULL)
			kref_get(&req->wb_kref);
	}
	return req;
}
示例#8
0
/**
 * __delete_from_swap_cache:page从交换区高速缓存中删去
 */
void __delete_from_swap_cache(struct page *page)
{
	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageSwapCache(page));
	VM_BUG_ON(PageWriteback(page));

	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	total_swapcache_pages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	INC_CACHE_INFO(del_total);
}
示例#9
0
/**
 * delete_from_swap_cache:page从交换区高速缓存中删除
 */
void delete_from_swap_cache(struct page *page)
{
	swp_entry_t entry;

	entry.val = page_private(page);

	spin_lock_irq(&swapper_space.tree_lock);
	__delete_from_swap_cache(page);
	spin_unlock_irq(&swapper_space.tree_lock);

	swapcache_free(entry, page);
	page_cache_release(page);
}
示例#10
0
/**
 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 * @nr_pages; number of pages to free
 * @pages: the pages
 */
void gnttab_free_pages(int nr_pages, struct page **pages)
{
	int i;

	for (i = 0; i < nr_pages; i++) {
		if (PagePrivate(pages[i])) {
#if BITS_PER_LONG < 64
			kfree((void *)page_private(pages[i]));
#endif
			ClearPagePrivate(pages[i]);
		}
	}
	free_xenballooned_pages(nr_pages, pages);
}
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	BUG_ON(!PageLocked(page));
	BUG_ON(!PageSwapCache(page));
	BUG_ON(PageWriteback(page));
	BUG_ON(PagePrivate(page));

	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	total_swapcache_pages--;
	pagecache_acct(-1);
	INC_CACHE_INFO(del_total);
}
示例#12
0
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
	int i;
	dma_addr_t address;
	ssize_t offset;

	for (i = 0; i < buffer->page_count; i++) {
		address = page_private(buffer->pages[i]);
		offset = (ssize_t)completed - (ssize_t)address;
		if (offset > 0 && offset <= PAGE_SIZE)
			return (i << PAGE_SHIFT) + offset;
	}

	return 0;
}
示例#13
0
/*
 * How many references to page are currently swapped out?
 */
static inline int page_swapcount(struct page *page)
{
	int count = 0;
	struct swap_info_struct *p;
	swp_entry_t entry;

	entry.val = page_private(page);
	p = swap_info_get(entry);
	if (p) {
		/* Subtract the 1 for the swap cache itself */
		count = p->swap_map[swp_offset(entry)] - 1;
		spin_unlock(&swap_lock);
	}
	return count;
}
示例#14
0
void put_page(struct page *page)
{
	if (unlikely(PageCompound(page))) {
		page = (struct page *)page_private(page);
		if (put_page_testzero(page)) {
			void (*dtor)(struct page *page);

			dtor = (void (*)(struct page *))page[1].mapping;
			(*dtor)(page);
		}
		return;
	}
	if (put_page_testzero(page))
		__page_cache_release(page);
}
示例#15
0
文件: swap_state.c 项目: oldzhu/linux
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache and locked.
 * It will never put the page into the free list,
 * the caller has a reference on the page.
 */
void delete_from_swap_cache(struct page *page)
{
    swp_entry_t entry;
    struct address_space *address_space;

    entry.val = page_private(page);

    address_space = swap_address_space(entry);
    spin_lock_irq(&address_space->tree_lock);
    __delete_from_swap_cache(page);
    spin_unlock_irq(&address_space->tree_lock);

    swapcache_free(entry);
    put_page(page);
}
示例#16
0
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
			   struct fw_card *card)
{
	int i;
	dma_addr_t address;

	for (i = 0; i < buffer->page_count; i++) {
		address = page_private(buffer->pages[i]);
		dma_unmap_page(card->device, address,
			       PAGE_SIZE, buffer->direction);
		__free_page(buffer->pages[i]);
	}

	kfree(buffer->pages);
	buffer->pages = NULL;
}
示例#17
0
void f2fs_restore_and_release_control_page(struct page **page)
{
	struct f2fs_crypto_ctx *ctx;
	struct page *bounce_page;

	/* The bounce data pages are unmapped. */
	if ((*page)->mapping)
		return;

	/* The bounce data page is unmapped. */
	bounce_page = *page;
	ctx = (struct f2fs_crypto_ctx *)page_private(bounce_page);

	/* restore control page */
	*page = ctx->w.control_page;

	f2fs_restore_control_page(bounce_page);
}
示例#18
0
文件: bio.c 项目: AlexShiLucky/linux
void fscrypt_pullback_bio_page(struct page **page, bool restore)
{
	struct fscrypt_ctx *ctx;
	struct page *bounce_page;

	/* The bounce data pages are unmapped. */
	if ((*page)->mapping)
		return;

	/* The bounce data page is unmapped. */
	bounce_page = *page;
	ctx = (struct fscrypt_ctx *)page_private(bounce_page);

	/* restore control page */
	*page = ctx->w.control_page;

	if (restore)
		fscrypt_restore_control_page(bounce_page);
}
示例#19
0
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
		       int page_count, enum dma_data_direction direction)
{
	int i, j;
	dma_addr_t address;

	buffer->page_count = page_count;
	buffer->direction = direction;

	buffer->pages = kmalloc(page_count * sizeof(buffer->pages[0]),
				GFP_KERNEL);
	if (buffer->pages == NULL)
		goto out;

	for (i = 0; i < buffer->page_count; i++) {
		buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
		if (buffer->pages[i] == NULL)
			goto out_pages;

		address = dma_map_page(card->device, buffer->pages[i],
				       0, PAGE_SIZE, direction);
		if (dma_mapping_error(card->device, address)) {
			__free_page(buffer->pages[i]);
			goto out_pages;
		}
		set_page_private(buffer->pages[i], address);
	}

	return 0;

 out_pages:
	for (j = 0; j < i; j++) {
		address = page_private(buffer->pages[j]);
		dma_unmap_page(card->device, address,
			       PAGE_SIZE, direction);
		__free_page(buffer->pages[j]);
	}
	kfree(buffer->pages);
 out:
	buffer->pages = NULL;

	return -ENOMEM;
}
示例#20
0
/*
 * Work out if there are any other processes sharing this
 * swap cache page. Free it if you can. Return success.
 */
int remove_exclusive_swap_page(struct page *page)
{
	int retval;
	struct swap_info_struct * p;
	swp_entry_t entry;

	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));

	if (!PageSwapCache(page))
		return 0;
	if (PageWriteback(page))
		return 0;
	if (page_count(page) != 2) /* 2: us + cache */
		return 0;

	entry.val = page_private(page);
	p = swap_info_get(entry);
	if (!p)
		return 0;

	/* Is the only swap cache user the cache itself? */
	retval = 0;
	if (p->swap_map[swp_offset(entry)] == 1) {
		/* Recheck the page count with the swapcache lock held.. */
		write_lock_irq(&swapper_space.tree_lock);
		if ((page_count(page) == 2) && !PageWriteback(page)) {
			__delete_from_swap_cache(page);
			SetPageDirty(page);
			retval = 1;
		}
		write_unlock_irq(&swapper_space.tree_lock);
	}
	spin_unlock(&swap_lock);

	if (retval) {
		swap_free(entry);
		page_cache_release(page);
	}

	return retval;
}
示例#21
0
/*
 * release a page and cleanup its private data
 */
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
	struct afs_writeback *wb;

	_enter("{{%x:%u}[%lu],%lx},%x",
	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
	       gfp_flags);

	if (PagePrivate(page)) {
		wb = (struct afs_writeback *) page_private(page);
		ASSERT(wb != NULL);
		set_page_private(page, 0);
		ClearPagePrivate(page);
		afs_put_writeback(wb);
	}

	_leave(" = 0");
	return 0;
}
示例#22
0
struct page *m2p_find_override(unsigned long mfn)
{
	unsigned long flags;
	struct list_head *bucket = &m2p_overrides[mfn_hash(mfn)];
	struct page *p, *ret;

	ret = NULL;

	spin_lock_irqsave(&m2p_override_lock, flags);

	list_for_each_entry(p, bucket, lru) {
		if (page_private(p) == mfn) {
			ret = p;
			break;
		}
	}

	spin_unlock_irqrestore(&m2p_override_lock, flags);

	return ret;
}
示例#23
0
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
	} else {
		/*
		 * There is no reason to keep both uncompressed data and
		 * compressed data in memory.
		 */
		struct swap_info_struct *sis;

		SetPageUptodate(page);
		sis = page_swap_info(page);
		if (sis->flags & SWP_BLKDEV) {
			struct gendisk *disk = sis->bdev->bd_disk;
			if (disk->fops->swap_slot_free_notify) {
				swp_entry_t entry;
				unsigned long offset;

				entry.val = page_private(page);
				offset = swp_offset(entry);

				SetPageDirty(page);
				disk->fops->swap_slot_free_notify(sis->bdev,
						offset);
			}
		}
 	}

	unlock_page(page);
	bio_put(bio);
}
示例#24
0
/*
 * release a page and cleanup its private data
 */
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct cachefs_page *pageio;

	_enter("{%lu},%x", page->index, gfp_flags);

	if (PagePrivate(page)) {
#ifdef AFS_CACHING_SUPPORT
		struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
		cachefs_uncache_page(vnode->cache, page);
#endif

		pageio = (struct cachefs_page *) page_private(page);
		set_page_private(page, 0);
		ClearPagePrivate(page);

		kfree(pageio);
	}

	_leave(" = 0");
	return 0;
} /* end afs_file_releasepage() */
示例#25
0
/*
 * release a page and clean up its private state if it's not busy
 * - return true if the page can now be released, false if not
 */
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
	struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);

	_enter("{{%x:%u}[%lu],%lx},%x",
	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
	       gfp_flags);

	/* deny if page is being written to the cache and the caller hasn't
	 * elected to wait */
#ifdef CONFIG_AFS_FSCACHE
	if (PageFsCache(page)) {
		if (fscache_check_page_write(vnode->cache, page)) {
			if (!(gfp_flags & __GFP_WAIT)) {
				_leave(" = F [cache busy]");
				return 0;
			}
			fscache_wait_on_page_write(vnode->cache, page);
		}

		fscache_uncache_page(vnode->cache, page);
		ClearPageFsCache(page);
	}
#endif

	if (PagePrivate(page)) {
		if (wb) {
			set_page_private(page, 0);
			afs_put_writeback(wb);
		}
		ClearPagePrivate(page);
	}

	/* indicate that the page can be released */
	_leave(" = T");
	return 1;
}
示例#26
0
/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 0 if all pages in the range is isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page))
			pfn += 1 << page_order(page);
		else if (page_count(page) == 0 &&
				page_private(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else
			break;
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
示例#27
0
文件: swap_state.c 项目: mdamt/linux
/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	struct address_space *address_space;
	int i, nr = hpage_nr_pages(page);
	swp_entry_t entry;
	pgoff_t idx;

	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
	VM_BUG_ON_PAGE(PageWriteback(page), page);

	entry.val = page_private(page);
	address_space = swap_address_space(entry);
	idx = swp_offset(entry);
	for (i = 0; i < nr; i++) {
		radix_tree_delete(&address_space->page_tree, idx + i);
		set_page_private(page + i, 0);
	}
	ClearPageSwapCache(page);
	address_space->nrpages -= nr;
	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
	ADD_CACHE_INFO(del_total, nr);
}
	return error;
}

/*
 * This must be called only on pages that have
 * been verified to be in the swap cache.
 */
void __delete_from_swap_cache(struct page *page)
{
	swp_entry_t ent = {.val = page_private(page)};

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageSwapCache(page));
	VM_BUG_ON(PageWriteback(page));

	radix_tree_delete(&swapper_space.page_tree, page_private(page));
	set_page_private(page, 0);
	ClearPageSwapCache(page);
	total_swapcache_pages--;
	__dec_zone_page_state(page, NR_FILE_PAGES);
	INC_CACHE_INFO(del_total);
	mem_cgroup_uncharge_swapcache(page, ent);
}

/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 * @gfp_mask: memory allocation flags
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
示例#29
0
static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_engine *e,
		int (*iterator) (struct pohmelfs_crypto_engine *e,
				  struct scatterlist *dst,
				  struct scatterlist *src))
{
	void *data = t->iovec.iov_base + sizeof(struct netfs_cmd) + t->psb->crypto_attached_size;
	unsigned int size = t->iovec.iov_len - sizeof(struct netfs_cmd) - t->psb->crypto_attached_size;
	struct netfs_cmd *cmd = data;
	unsigned int sz, pages = t->attached_pages, i, csize, cmd_cmd, dpage_idx;
	struct scatterlist sg_src, sg_dst;
	int err;

	while (size) {
		cmd = data;
		cmd_cmd = __be16_to_cpu(cmd->cmd);
		csize = __be32_to_cpu(cmd->size);
		cmd->iv = __cpu_to_be64(e->iv);

		if (cmd_cmd == NETFS_READ_PAGES || cmd_cmd == NETFS_READ_PAGE)
			csize = __be16_to_cpu(cmd->ext);

		sz = csize + __be16_to_cpu(cmd->cpad) + sizeof(struct netfs_cmd);

		dprintk("%s: size: %u, sz: %u, cmd_size: %u, cmd_cpad: %u.\n",
				__func__, size, sz, __be32_to_cpu(cmd->size), __be16_to_cpu(cmd->cpad));

		data += sz;
		size -= sz;

		sg_init_one(&sg_src, cmd->data, sz - sizeof(struct netfs_cmd));
		sg_init_one(&sg_dst, cmd->data, sz - sizeof(struct netfs_cmd));

		err = iterator(e, &sg_dst, &sg_src);
		if (err)
			return err;
	}

	if (!pages)
		return 0;

	dpage_idx = 0;
	for (i = 0; i < t->page_num; ++i) {
		struct page *page = t->pages[i];
		struct page *dpage = e->pages[dpage_idx];

		if (!page)
			continue;

		sg_init_table(&sg_src, 1);
		sg_init_table(&sg_dst, 1);
		sg_set_page(&sg_src, page, page_private(page), 0);
		sg_set_page(&sg_dst, dpage, page_private(page), 0);

		err = iterator(e, &sg_dst, &sg_src);
		if (err)
			return err;

		pages--;
		if (!pages)
			break;
		dpage_idx++;
	}

	return 0;
}
示例#30
0
static void ext4_finish_bio(struct bio *bio)
{
	int i;
	int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec;

	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
		struct page *data_page = NULL;
		struct ext4_crypto_ctx *ctx = NULL;
#endif
		struct buffer_head *bh, *head;
		unsigned bio_start = bvec->bv_offset;
		unsigned bio_end = bio_start + bvec->bv_len;
		unsigned under_io = 0;
		unsigned long flags;

		if (!page)
			continue;

#ifdef CONFIG_EXT4_FS_ENCRYPTION
		if (!page->mapping) {
			/* The bounce data pages are unmapped. */
			data_page = page;
			ctx = (struct ext4_crypto_ctx *)page_private(data_page);
			page = ctx->w.control_page;
		}
#endif

		if (error) {
			SetPageError(page);
			set_bit(AS_EIO, &page->mapping->flags);
		}
		bh = head = page_buffers(page);
		/*
		 * We check all buffers in the page under BH_Uptodate_Lock
		 * to avoid races with other end io clearing async_write flags
		 */
		local_irq_save(flags);
		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
		do {
			if (bh_offset(bh) < bio_start ||
			    bh_offset(bh) + bh->b_size > bio_end) {
				if (buffer_async_write(bh))
					under_io++;
				continue;
			}
			clear_buffer_async_write(bh);
			if (error)
				buffer_io_error(bh);
		} while ((bh = bh->b_this_page) != head);
		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
		local_irq_restore(flags);
		if (!under_io) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
			if (ctx)
				ext4_restore_control_page(data_page);
#endif
			end_page_writeback(page);
		}
	}
}