Exemple #1
0
/**
 * page_remove_pte - free an Page sturct which is related linear address la
 *                 - and clean(invalidate) pte which is related linear address la
 * @param pgdir page directory (not used)
 * @param la logical address of the page to be removed
 * @param page table entry of the page to be removed
 * note: PT is changed, so the TLB need to be invalidate 
 */
void
page_remove_pte(pgd_t *pgdir, uintptr_t la, pte_t *ptep) {
    if (ptep_present(ptep)) {
        struct Page *page = pte2page(*ptep);
        if (!PageSwap(page)) {
            if (page_ref_dec(page) == 0) {
                //Don't free dma pages
                if (!PageIO(page))
                  free_page(page);
            }
        }
        else {
            if (ptep_dirty(ptep)) {
                SetPageDirty(page);
            }
            page_ref_dec(page);
        }
		ptep_unmap(ptep);
        mp_tlb_invalidate(pgdir, la);
    }
    else if (! ptep_invalid(ptep)) {
#ifndef CONFIG_NO_SWAP
        swap_remove_entry(*ptep);
#endif
        ptep_unmap(ptep);
    }
}
Exemple #2
0
/**
 * crystalhd_unmap_sgl - Release mapped resources
 * @adp: Adapter instance
 * @dio: DIO request instance
 *
 * Return:
 *	Status.
 *
 * This routine is to unmap the user buffer pages.
 */
BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *adp, struct crystalhd_dio_req *dio)
{
	struct page *page = NULL;
	int j = 0;

	if (!adp || !dio) {
		printk(KERN_ERR "%s: Invalid arg\n", __func__);
		return BC_STS_INV_ARG;
	}

	if ((dio->page_cnt > 0) && (dio->sig != crystalhd_dio_inv)) {
		for (j = 0; j < dio->page_cnt; j++) {
			page = dio->pages[j];
			if (page) {
				if (!PageReserved(page) &&
				    (dio->direction == DMA_FROM_DEVICE))
					SetPageDirty(page);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
				put_page(page);
#else
				page_cache_release(page);
#endif
			}
		}
	}
	if (dio->sig == crystalhd_dio_sg_mapped)
		pci_unmap_sg(adp->pdev, dio->sg, dio->page_cnt, dio->direction);

	crystalhd_free_dio(adp, dio);

	return BC_STS_SUCCESS;
}
int add_to_swap(struct page *page)
{
	swp_entry_t entry;
	int err;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageUptodate(page));

	entry = get_swap_page();
	if (!entry.val)
		return 0;

	if (unlikely(PageTransHuge(page)))
		if (unlikely(split_huge_page(page))) {
			swapcache_free(entry, NULL);
			return 0;
		}

	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);

	if (!err) {	
		SetPageDirty(page);
		return 1;
	} else {	
		swapcache_free(entry, NULL);
		return 0;
	}
}
Exemple #4
0
// swap_out_vma - try unmap pte & move pages into swap active list.
static int
swap_out_vma(struct mm_struct *mm, struct vma_struct *vma, uintptr_t addr, size_t require) {
    if (require == 0 || !(addr >= vma->vm_start && addr < vma->vm_end)) {
        return 0;
    }
    uintptr_t end;
    size_t free_count = 0;
    addr = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(vma->vm_end, PGSIZE);
    while (addr < end && require != 0) {
        pte_t *ptep = get_pte(mm->pgdir, addr, 0);
        if (ptep == NULL) {
            if (get_pud(mm->pgdir, addr, 0) == NULL) {
                addr = ROUNDDOWN(addr + PUSIZE, PUSIZE);
            }
            else if (get_pmd(mm->pgdir, addr, 0) == NULL) {
                addr = ROUNDDOWN(addr + PMSIZE, PMSIZE);
            }
            else {
                addr = ROUNDDOWN(addr + PTSIZE, PTSIZE);
            }
            continue ;
        }
        if (ptep_present(ptep)) {
            struct Page *page = pte2page(*ptep);
            assert(!PageReserved(page));
            if (ptep_accessed(ptep)) {
                ptep_unset_accessed(ptep);
                mp_tlb_invalidate(mm->pgdir, addr);
                goto try_next_entry;
            }
            if (!PageSwap(page)) {
                if (!swap_page_add(page, 0)) {
                    goto try_next_entry;
                }
                swap_active_list_add(page);
            }
            else if (ptep_dirty(ptep)) {
                SetPageDirty(page);
            }
            swap_entry_t entry = page->index;
            swap_duplicate(entry);
            page_ref_dec(page);
			ptep_copy(ptep, &entry);
            mp_tlb_invalidate(mm->pgdir, addr);
            mm->swap_address = addr + PGSIZE;
            free_count ++, require --;
            if ((vma->vm_flags & VM_SHARE) && page_ref(page) == 1) {
                uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off;
                pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 0);
                assert(sh_ptep != NULL && ! ptep_invalid(sh_ptep));
                if (ptep_present(sh_ptep)) {
                    shmem_insert_entry(vma->shmem, shmem_addr, entry);
                }
            }
        }
    try_next_entry:
        addr += PGSIZE;
    }
    return free_count;
}
Exemple #5
0
static void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 
{
	struct page *page;
	int i;

	switch(vsg->state) {
	case dr_via_device_mapped:
		via_unmap_blit_from_device(pdev, vsg);
	case dr_via_desc_pages_alloc:
		for (i=0; i<vsg->num_desc_pages; ++i) {
			if (vsg->desc_pages[i] != NULL)
			  free_page((unsigned long)vsg->desc_pages[i]);
		}
		kfree(vsg->desc_pages);
	case dr_via_pages_locked:
		for (i=0; i<vsg->num_pages; ++i) {
			if ( NULL != (page = vsg->pages[i])) {
				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 
					SetPageDirty(page);
				page_cache_release(page);
			}
		}
	case dr_via_pages_alloc:
		vfree(vsg->pages);
	default:
		vsg->state = dr_via_sg_init;
	}
	if (vsg->bounce_buffer) {
		vfree(vsg->bounce_buffer);
		vsg->bounce_buffer = NULL;
	}
	vsg->free_on_sequence = 0;
}		
Exemple #6
0
/*
 * Free the swap entry like above, but also try to
 * free the page cache entry if it is the last user.
 */
void free_swap_and_cache(swp_entry_t entry)
{
	struct swap_info_struct * p;
	struct page *page = NULL;

	if (is_migration_entry(entry))
		return;

	p = swap_info_get(entry);
	if (p) {
		if (swap_entry_free(p, swp_offset(entry)) == 1) {
			page = find_get_page(&swapper_space, entry.val);
			if (page && unlikely(TestSetPageLocked(page))) {
				page_cache_release(page);
				page = NULL;
			}
		}
		spin_unlock(&swap_lock);
	}
	if (page) {
		int one_user;

		BUG_ON(PagePrivate(page));
		one_user = (page_count(page) == 2);
		/* Only cache user (+us), or swap space full? Free it! */
		/* Also recheck PageSwapCache after page is locked (above) */
		if (PageSwapCache(page) && !PageWriteback(page) &&
					(one_user || vm_swap_full())) {
			delete_from_swap_cache(page);
			SetPageDirty(page);
		}
		unlock_page(page);
		page_cache_release(page);
	}
}
Exemple #7
0
static inline void shmem_remove_entry_pte(pte_t * ptep)
{
	//TODO
	//assert(0);
	assert(ptep != NULL);
	if (ptep_present(ptep)) {
		struct Page *page = pte2page(*ptep);
#ifdef UCONFIG_SWAP
		if (!PageSwap(page)) {
			if (page_ref_dec(page) == 0) {
				free_page(page);
			}
		} else {
			if (ptep_dirty(ptep)) {
				SetPageDirty(page);
			}
			page_ref_dec(page);
		}
#else
		if (page_ref_dec(page) == 0) {
			free_page(page);
		}
#endif /* UCONFIG_SWAP */
		ptep_unmap(ptep);
	} else if (!ptep_invalid(ptep)) {
#ifdef UCONFIG_SWAP
		swap_remove_entry(*ptep);
		ptep_unmap(ptep);
#else
		assert(0);
#endif
	}
}
Exemple #8
0
static int mmap_mem(struct file *file, struct vm_area_struct *vma)
{
	size_t size = vma->vm_end - vma->vm_start;
	char *ptr_page;
	int i;
	struct page *mmap_page;

	mmap_page = alloc_pages(GFP_KERNEL, get_order(size));

	printk(KERN_NOTICE "mmap:pgoff=%x size=%d pfn=%x\n",
	       (int)vma->vm_pgoff, size, (int)page_to_pfn(mmap_page));

	/* Remap-pfn-range will mark the range VM_IO */
	if (remap_pfn_range(vma,
			    vma->vm_start,
			    page_to_pfn(mmap_page),
			    size,
			    vma->vm_page_prot)) {
		return -EAGAIN;
	}
	ptr_page = kmap_atomic(mmap_page);
	for (i = 0; i < 100; i++)
		ptr_page[i] = (char)vma->vm_pgoff;
	kunmap_atomic(ptr_page);
	SetPageDirty(mmap_page); /* only the first page */
	flush_dcache_page(mmap_page);

	vma->vm_ops = &my_mem_vm_ops;
	vma->vm_ops->open(vma);
	
	return 0;
}
Exemple #9
0
static inline void pages_dirty(void)
{
	unsigned int i;

	/* set the pages as dirty */
	for (i = 0; i < nr_pages; i++)
		SetPageDirty(pages[i]);
}
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
	    goto out;
  }

  SetPageUptodate(page);

  /*
   * There is no guarantee that the page is in swap cache - the software
   * suspend code (at least) uses end_swap_bio_read() against a non-
   * swapcache page.  So we must check PG_swapcache before proceeding with
   * this optimization.
   */
  if (likely(PageSwapCache(page))) {
    /*
     * The swap subsystem performs lazy swap slot freeing,
     * expecting that the page will be swapped out again.
     * So we can avoid an unnecessary write if the page
     * isn't redirtied.
     * This is good for real swap storage because we can
     * reduce unnecessary I/O and enhance wear-leveling
     * if an SSD is used as the as swap device.
     * But if in-memory swap device (eg zram) is used,
     * this causes a duplicated copy between uncompressed
     * data in VM-owned memory and compressed data in
     * zram-owned memory.  So let's free zram-owned memory
     * and make the VM-owned decompressed page *dirty*,
     * so the page should be swapped out somewhere again if
     * we again wish to reclaim it.
     */
    struct gendisk *disk = bio->bi_bdev->bd_disk;
    if (disk->fops->swap_slot_free_notify) {
      swp_entry_t entry;
      unsigned long offset;

      entry.val = page_private(page);
      offset = swp_offset(entry);

      SetPageDirty(page);
      disk->fops->swap_slot_free_notify(bio->bi_bdev,
          offset);
    }
   }

out:
	unlock_page(page);
	bio_put(bio);
}
Exemple #11
0
/*
 * ->writepage to the blockdev's mapping has to redirty the page so that the
 * VM doesn't go and steal it.  We return AOP_WRITEPAGE_ACTIVATE so that the VM
 * won't try to (pointlessly) write the page again for a while.
 *
 * Really, these pages should not be on the LRU at all.
 */
static int ramdisk_writepage(struct page *page, struct writeback_control *wbc)
{
	if (!PageUptodate(page))
		make_page_uptodate(page);
	SetPageDirty(page);
	if (wbc->for_reclaim)
		return AOP_WRITEPAGE_ACTIVATE;
	unlock_page(page);
	return 0;
}
Exemple #12
0
static inline void pages_unmap(void)
{
	unsigned int i;

	/* set the pages as dirty */
	for (i = 0; i < nr_pages; i++) {
		if (!PageReserved(pages[i]))
			SetPageDirty(pages[i]);
		put_page(pages[i]);
	}
}
Exemple #13
0
void unlock_pages(struct page **page_list)
{
        if ( !page_list )
                return;
        while (*page_list) {
                if (!PageReserved(*page_list))
                    SetPageDirty(*page_list);
                put_page(*page_list);
                page_list++;
        }
}
Exemple #14
0
Fichier : rd.c Projet : nhanh0/hah
static int ramdisk_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{
	if (!Page_Uptodate(page)) {
		void *addr = page_address(page);
		memset(addr, 0, PAGE_CACHE_SIZE);
		flush_dcache_page(page);
		SetPageUptodate(page);
	}
	SetPageDirty(page);
	return 0;
}
Exemple #15
0
void vfi_dealloc_pages(struct vfi_smb *smb)
{
	while(smb->num_pages--) {
		struct page *page = smb->pages[smb->num_pages];
		if (smb->address) {
			if (! PageReserved(page))
				SetPageDirty(page);
			page_cache_release(page);
		}
		else 
			__free_pages(smb->pages[smb->num_pages],0);
	}
	kfree(smb->pages);
}
Exemple #16
0
/*
 * Strange swizzling function only for use by shmem_writepage
 */
int move_to_swap_cache(struct page *page, swp_entry_t entry)
{
	int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
	if (!err) {
		remove_from_page_cache(page);
		page_cache_release(page);	/* pagecache ref */
		if (!swap_duplicate(entry))
			BUG();
		SetPageDirty(page);
		INC_CACHE_INFO(add_total);
	} else if (err == -EEXIST)
		INC_CACHE_INFO(exist_race);
	return err;
}
Exemple #17
0
/*
 * Try to make the page cache and handle ready for the inline data case.
 * We can call this function in 2 cases:
 * 1. The inode is created and the first write exceeds inline size. We can
 *    clear the inode state safely.
 * 2. The inode has inline data, then we need to read the data, make it
 *    update and dirty so that ext4_da_writepages can handle it. We don't
 *    need to start the journal since the file's metatdata isn't changed now.
 */
static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
						 struct inode *inode,
						 unsigned flags,
						 void **fsdata)
{
	int ret = 0, inline_size;
	struct page *page;

	page = grab_cache_page_write_begin(mapping, 0, flags);
	if (!page)
		return -ENOMEM;

	down_read(&EXT4_I(inode)->xattr_sem);
	if (!ext4_has_inline_data(inode)) {
		ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
		goto out;
	}

	inline_size = ext4_get_inline_size(inode);

	if (!PageUptodate(page)) {
		ret = ext4_read_inline_page(inode, page);
		if (ret < 0)
			goto out;
	}

	ret = __block_write_begin(page, 0, inline_size,
				  ext4_da_get_block_prep);
	if (ret) {
		up_read(&EXT4_I(inode)->xattr_sem);
		unlock_page(page);
		page_cache_release(page);
		ext4_truncate_failed_write(inode);
		return ret;
	}

	SetPageDirty(page);
	SetPageUptodate(page);
	ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
	*fsdata = (void *)CONVERT_INLINE_DATA;

out:
	up_read(&EXT4_I(inode)->xattr_sem);
	if (page) {
		unlock_page(page);
		page_cache_release(page);
	}
	return ret;
}
/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
int add_to_swap(struct page *page)
{
	swp_entry_t entry;
	int err;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageUptodate(page));

#ifdef CONFIG_HSWAP
	if (!current_is_kswapd())
		entry = get_lowest_prio_swap_page();
	else
#endif
	entry = get_swap_page();
	if (!entry.val)
		return 0;

	if (unlikely(PageTransHuge(page)))
		if (unlikely(split_huge_page(page))) {
			swapcache_free(entry, NULL);
			return 0;
		}

	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
	 * TODO: this could cause a theoretical memory reclaim
	 * deadlock in the swap out path.
	 */
	/*
	 * Add it to the swap cache and mark it dirty
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);

	if (!err) {	/* Success */
		SetPageDirty(page);
		return 1;
	} else {	/* -ENOMEM radix-tree allocation failure */
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
		return 0;
	}
}
Exemple #19
0
// swap_page_add - set PG_swap flag in page, set page->index = entry, and add page to hash_list.
//               - if entry==0, It means ???
static bool
swap_page_add(struct Page *page, swap_entry_t entry) {
    assert(!PageSwap(page));
    if (entry == 0) {
        if ((entry = try_alloc_swap_entry()) == 0) {
            return 0;
        }
        assert(mem_map[swap_offset(entry)] == SWAP_UNUSED);
        mem_map[swap_offset(entry)] = 0;
        SetPageDirty(page);
    }
    SetPageSwap(page);
    page->index = entry;
    list_add(hash_list + entry_hashfn(entry), &(page->page_link));
    return 1;
}
int
mic_unpin_user_pages(struct page **pages, uint32_t nf_pages)
{
	uint32_t j = 0;
	uint32_t status = 0;
	if (pages) {
		for (j = 0; j < nf_pages; j++) {
			if (pages[j]) {
				SetPageDirty(pages[j]);
				page_cache_release(pages[j]);
			}
		}
		kfree(pages);
	}

	return status;
}
Exemple #21
0
void release_user_pages(struct csession *ses)
{
	unsigned int i;

	for (i = 0; i < ses->used_pages; i++) {
		if (!PageReserved(ses->pages[i]))
			SetPageDirty(ses->pages[i]);

		if (ses->readonly_pages == 0)
			flush_dcache_page(ses->pages[i]);
		else
			ses->readonly_pages--;

		page_cache_release(ses->pages[i]);
	}
	ses->used_pages = 0;
}
Exemple #22
0
/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
int add_to_swap(struct page *page)
{
	swp_entry_t entry;
	int err;
	struct user_beancounter *ub;

	VM_BUG_ON(!PageLocked(page));
	VM_BUG_ON(!PageUptodate(page));


	ub = pb_grab_page_ub(page);
	if (IS_ERR(ub))
		return 0;

	entry = get_swap_page(ub);
	put_beancounter(ub);
	if (!entry.val)
		return 0;

	/*
	 * Radix-tree node allocations from PF_MEMALLOC contexts could
	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
	 * stops emergency reserves from being allocated.
	 *
	 * TODO: this could cause a theoretical memory reclaim
	 * deadlock in the swap out path.
	 */
	/*
	 * Add it to the swap cache and mark it dirty
	 */
	err = add_to_swap_cache(page, entry,
			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);

	if (!err) {	/* Success */
		SetPageDirty(page);
		return 1;
	} else {	/* -ENOMEM radix-tree allocation failure */
		/*
		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
		 * clear SWAP_HAS_CACHE flag.
		 */
		swapcache_free(entry, NULL);
		return 0;
	}
}
/**
 * add_to_swap - allocate swap space for a page
 * @page: page we want to move to swap
 *
 * Allocate swap space for the page and add the page to the
 * swap cache.  Caller needs to hold the page lock. 
 */
int add_to_swap(struct page * page, gfp_t gfp_mask)
{
	swp_entry_t entry;
	int err;

	if (!PageLocked(page))
		BUG();

	for (;;) {
		entry = get_swap_page();
		if (!entry.val)
			return 0;

		/*
		 * Radix-tree node allocations from PF_MEMALLOC contexts could
		 * completely exhaust the page allocator. __GFP_NOMEMALLOC
		 * stops emergency reserves from being allocated.
		 *
		 * TODO: this could cause a theoretical memory reclaim
		 * deadlock in the swap out path.
		 */
		/*
		 * Add it to the swap cache and mark it dirty
		 */
		err = __add_to_swap_cache(page, entry,
				gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);

		switch (err) {
		case 0:				/* Success */
			SetPageUptodate(page);
			SetPageDirty(page);
			INC_CACHE_INFO(add_total);
			return 1;
		case -EEXIST:
			/* Raced with "speculative" read_swap_cache_async */
			INC_CACHE_INFO(exist_race);
			swap_free(entry);
			continue;
		default:
			/* -ENOMEM radix-tree allocation failure */
			swap_free(entry);
			return 0;
		}
	}
}
Exemple #24
0
/**
 * sgl_unmap_user_pages() - Release the scatter gather list pages
 * @sgl: The scatter gather list
 * @nr_pages: Number of pages in the list
 * @dirty: Flag indicating whether the pages should be marked dirty
 * @to_user: 1 when transfer is to/from user-space (0 for to/from kernel)
 *
 */
static void sgl_unmap_user_pages(struct scatterlist *sgl,
				 const unsigned int nr_pages, int dirty,
				 int to_user)
{
	int i;

	if (!to_user)
		return;

	for (i = 0; i < nr_pages; i++) {
		struct page *page = sg_page(&sgl[i]);

		if (dirty && !PageReserved(page))
			SetPageDirty(page);

		page_cache_release (page);
	}
}
Exemple #25
0
static int pixel_data(struct ioctl_struct *ctrl, struct device_extension *pdx)
{
	int i;

	if (!pdx->gotPixelData)
		return 0;

	pdx->gotPixelData = 0;
	ctrl->numbytes = pdx->bulk_in_size_returned;
	pdx->bulk_in_size_returned -= pdx->frameSize;

	for (i = 0; i < pdx->maplist_numPagesMapped[pdx->active_frame]; i++)
		SetPageDirty(sg_page(&pdx->sgl[pdx->active_frame][i]));

	pdx->active_frame = ((pdx->active_frame + 1) % pdx->num_frames);

	return ctrl->numbytes;
}
Exemple #26
0
void CGsCachedArea::Invalidate(uint32 memoryStart, uint32 memorySize)
{
	uint32 areaSize = GetSize();

	if(DoMemoryRangesOverlap(memoryStart, memorySize, m_bufPtr, areaSize))
	{
		//Find the pages that are touched by this transfer
		uint32 pageStart = (memoryStart < m_bufPtr) ? 0 : ((memoryStart - m_bufPtr) / CGsPixelFormats::PAGESIZE);
		uint32 pageCount = std::min<uint32>(GetPageCount(), (memorySize + CGsPixelFormats::PAGESIZE - 1) / CGsPixelFormats::PAGESIZE);

		for(unsigned int i = 0; i < pageCount; i++)
		{
			SetPageDirty(pageStart + i);
		}

		//Wouldn't make sense to go through here and not have at least a dirty page
		assert(HasDirtyPages());
	}
}
Exemple #27
0
/*
 * Work out if there are any other processes sharing this
 * swap cache page. Free it if you can. Return success.
 */
int remove_exclusive_swap_page(struct page *page)
{
	int retval;
	struct swap_info_struct * p;
	swp_entry_t entry;

	BUG_ON(PagePrivate(page));
	BUG_ON(!PageLocked(page));

	if (!PageSwapCache(page))
		return 0;
	if (PageWriteback(page))
		return 0;
	if (page_count(page) != 2) /* 2: us + cache */
		return 0;

	entry.val = page_private(page);
	p = swap_info_get(entry);
	if (!p)
		return 0;

	/* Is the only swap cache user the cache itself? */
	retval = 0;
	if (p->swap_map[swp_offset(entry)] == 1) {
		/* Recheck the page count with the swapcache lock held.. */
		write_lock_irq(&swapper_space.tree_lock);
		if ((page_count(page) == 2) && !PageWriteback(page)) {
			__delete_from_swap_cache(page);
			SetPageDirty(page);
			retval = 1;
		}
		write_unlock_irq(&swapper_space.tree_lock);
	}
	spin_unlock(&swap_lock);

	if (retval) {
		swap_free(entry);
		page_cache_release(page);
	}

	return retval;
}
Exemple #28
0
int simple_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{	
	size_t size = vma->vm_end - vma->vm_start;
	int i;
	int ret;
	struct page *map_page;
	char *ptr;

	printk(KERN_NOTICE "vmf:fault=%p flag=%x offset=%lx\n",
	       vmf->virtual_address,
	       vmf->flags,
	       vmf->pgoff);
	printk(KERN_NOTICE "vma:start=0x%lx size=%x pgoff=%lx\n",
	       vma->vm_start,
	       (int)size,
	       vma->vm_pgoff);
	       
	vma->vm_flags |= VM_MIXEDMAP;
	map_page = data_pages[vmf->pgoff];

	ret = vm_insert_mixed(vma,
			      (unsigned long)vmf->virtual_address,
			      page_to_pfn(map_page));
	printk(KERN_NOTICE "ret=%d pfn=%x vaddr=0x%lx cnt=%d\n",
	       ret,
	       (int)page_to_pfn(map_page),
	       (unsigned long)vmf->virtual_address,
	       page_count(map_page));

	/*
	 * Example of the "Direct I/O"
	 */
	ptr = kmap_atomic(map_page);
	for (i = 0; i < 100; i++) {
		ptr[i] = (unsigned char)vmf->pgoff;
	}
	kunmap_atomic(ptr);
	SetPageDirty(map_page);

	return VM_FAULT_NOPAGE;
}
Exemple #29
0
/*
 * "Get" data from frontswap associated with swaptype and offset that were
 * specified when the data was put to frontswap and use it to fill the
 * specified page with data. Page must be locked and in the swap cache.
 */
int __frontswap_load(struct page *page)
{
	int ret = -1;
	swp_entry_t entry = { .val = page_private(page), };
	int type = swp_type(entry);
	struct swap_info_struct *sis = swap_info[type];
	pgoff_t offset = swp_offset(entry);

	BUG_ON(!PageLocked(page));
	BUG_ON(sis == NULL);
	if (frontswap_test(sis, offset))
		ret = frontswap_ops.load(type, offset, page);
	if (ret == 0) {
		inc_frontswap_loads();
		if (frontswap_tmem_exclusive_gets_enabled) {
			SetPageDirty(page);
			frontswap_clear(sis, offset);
		}
	}
	return ret;
}
Exemple #30
0
/*
 * Work out if there are any other processes sharing this
 * swap cache page. Free it if you can. Return success.
 */
int remove_exclusive_swap_page(struct page *page)
{
	int retval;
	struct swap_info_struct * p;
	swp_entry_t entry;

	if (!PageLocked(page))
		BUG();
	if (!PageSwapCache(page))
		return 0;
	if (page_count(page) - !!page->buffers != 2)	/* 2: us + cache */
		return 0;

	entry.val = page->index;
	p = swap_info_get(entry);
	if (!p)
		return 0;

	/* Is the only swap cache user the cache itself? */
	retval = 0;
	if (p->swap_map[SWP_OFFSET(entry)] == 1) {
		/* Recheck the page count with the pagecache lock held.. */
		spin_lock(&pagecache_lock);
		if (page_count(page) - !!page->buffers == 2) {
			__delete_from_swap_cache(page);
			SetPageDirty(page);
			retval = 1;
		}
		spin_unlock(&pagecache_lock);
	}
	swap_info_put(p);

	if (retval) {
		block_flushpage(page, 0);
		swap_free(entry);
		page_cache_release(page);
	}

	return retval;
}