Example #1
0
/**
 * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
 * @pipe:	the pipe that the buffer belongs to
 * @buf:	the buffer to attempt to steal
 *
 * Description:
 *	This function attempts to steal the &struct page attached to
 *	@buf. If successful, this function returns 0 and returns with
 *	the page locked. The caller may then reuse the page for whatever
 *	he wishes; the typical use is insertion into a different file
 *	page cache.
 */
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
			   struct pipe_buffer *buf)
{
	struct page *page = buf->page;

	/*
	 * A reference of one is golden, that means that the owner of this
	 * page is the only one holding a reference to it. lock the page
	 * and return OK.
	 */
	if (page_count(page) == 1) {
		lock_page(page);
		return 0;
	}

	return 1;
}
Example #2
0
static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
{
	int ret;
	int pc;

	for (pc = 0; pc < item->count; pc++) {
		if (page_count(item->pages[pc]) > 1) {
			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
			schedule_delayed_work(&item->gnttab_work,
					      msecs_to_jiffies(delay));
			return;
		}
	}

	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
				item->pages, item->count);
	item->done(ret, item);
}
Example #3
0
static void vnic_destroy_allocator(struct vnic_rx_ring *ring)
{
	struct vnic_rx_alloc *page_alloc;
	int i;

	if (vnic_rx_linear)
		return;

	for (i = 0; i < ring->num_frags; i++) {
		page_alloc = &ring->page_alloc[i];
		vnic_dbg_data(ring->port->name, "Freeing allocator:%d count:%d\n",
			      i, page_count(page_alloc->page));
		if (page_alloc->page) {
			put_page(page_alloc->page);
			page_alloc->page = NULL;
		}
	}
}
Example #4
0
/*
 * do_no_page() tries to create a new page mapping. It aggressively
 * tries to share with existing pages, but makes a separate copy if
 * the "write_access" parameter is true in order to avoid the next
 * page fault.
 *
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 *
 * This is called with the MM semaphore held.
 */
static int do_no_page(struct mm_struct * mm, struct vm_area_struct * vma,
	unsigned long address, int write_access, pte_t *page_table)
{
	struct page * new_page;
	pte_t entry;

	if (!vma->vm_ops || !vma->vm_ops->nopage)
		return do_anonymous_page(mm, vma, page_table, write_access, address);

	/*
	 * The third argument is "no_share", which tells the low-level code
	 * to copy, not share the page even if sharing is possible.  It's
	 * essentially an early COW detection.
	 */
	new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, (vma->vm_flags & VM_SHARED)?0:write_access);
	if (new_page == NULL)	/* no page was available -- SIGBUS */
		return 0;
	if (new_page == NOPAGE_OOM)
		return -1;
	++mm->rss;
	/*
	 * This silly early PAGE_DIRTY setting removes a race
	 * due to the bad i386 page protection. But it's valid
	 * for other architectures too.
	 *
	 * Note that if write_access is true, we either now have
	 * an exclusive copy of the page, or this is a shared mapping,
	 * so we can make it writable and dirty to avoid having to
	 * handle that later.
	 */
	flush_page_to_ram(new_page);
	flush_icache_page(vma, new_page);
	entry = mk_pte(new_page, vma->vm_page_prot);
	if (write_access) {
		entry = pte_mkwrite(pte_mkdirty(entry));
	} else if (page_count(new_page) > 1 &&
		   !(vma->vm_flags & VM_SHARED))
		entry = pte_wrprotect(entry);
	set_pte(page_table, entry);
	/* no need to invalidate: a not-present page shouldn't be cached */
	update_mmu_cache(vma, address, entry);
	return 2;	/* Major fault */
}
Example #5
0
void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
{
	unsigned long flags;
	int i;

	if (pagevec == NULL)
		return;

	balloon_lock(flags);
	for (i = 0; i < nr_pages; i++) {
		BUG_ON(page_count(pagevec[i]) != 1);
		balloon_append(pagevec[i]);
	}
	balloon_unlock(flags);

	kfree(pagevec);

	schedule_work(&balloon_worker);
}
Example #6
0
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

	VM_BUG_ON(PageCompound(page));
	VM_BUG_ON(!page_count(page));

#ifdef CONFIG_KMEMCHECK
	/*
	 * Split shadow pages too, because free(page[0]) would
	 * otherwise free the whole shadow.
	 */
	if (kmemcheck_page_is_tracked(page))
		split_page(virt_to_page(page[0].shadow), order);
#endif

	for (i = 1; i < (1 << order); i++)
		set_page_refcounted(page + i);
}
Example #7
0
void homecache_change_page_home(struct page *page, int order, int home)
{
	int i, pages = (1 << order);
	unsigned long kva;

	BUG_ON(PageHighMem(page));
	BUG_ON(page_count(page) > 1);
	BUG_ON(page_mapcount(page) != 0);
	kva = (unsigned long) page_address(page);
	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
		     NULL, 0);

	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
		pte_t *ptep = virt_to_pte(NULL, kva);
		pte_t pteval = *ptep;
		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
		*ptep = pte_set_home(pteval, home);
	}
}
Example #8
0
int simple_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{	
	size_t size = vma->vm_end - vma->vm_start;
	int i;
	int ret;
	struct page *map_page;
	char *ptr;

	printk(KERN_NOTICE "vmf:fault=%p flag=%x offset=%lx\n",
	       vmf->virtual_address,
	       vmf->flags,
	       vmf->pgoff);
	printk(KERN_NOTICE "vma:start=0x%lx size=%x pgoff=%lx\n",
	       vma->vm_start,
	       (int)size,
	       vma->vm_pgoff);
	       
	vma->vm_flags |= VM_MIXEDMAP;
	map_page = data_pages[vmf->pgoff];

	ret = vm_insert_mixed(vma,
			      (unsigned long)vmf->virtual_address,
			      page_to_pfn(map_page));
	printk(KERN_NOTICE "ret=%d pfn=%x vaddr=0x%lx cnt=%d\n",
	       ret,
	       (int)page_to_pfn(map_page),
	       (unsigned long)vmf->virtual_address,
	       page_count(map_page));

	/*
	 * Example of the "Direct I/O"
	 */
	ptr = kmap_atomic(map_page);
	for (i = 0; i < 100; i++) {
		ptr[i] = (unsigned char)vmf->pgoff;
	}
	kunmap_atomic(ptr);
	SetPageDirty(map_page);

	return VM_FAULT_NOPAGE;
}
    void * allocate( std::size_t size) const
    {
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

        const std::size_t pages( page_count( size) + 1); // add one guard page
        const std::size_t size_ = pages * pagesize();
        BOOST_ASSERT( 0 < size && 0 < size_);

        void * limit = ::VirtualAlloc( 0, size_, MEM_COMMIT, PAGE_READWRITE);
        if ( ! limit) throw std::bad_alloc();

        std::memset( limit, size_, '\0');

        DWORD old_options;
        const BOOL result = ::VirtualProtect(
            limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
        BOOST_ASSERT( FALSE != result);

        return static_cast< char * >( limit) + size_;
    }
Example #10
0
/*
 * Check if we're the only user of a swap page,
 * when the page is locked.
 */
static int exclusive_swap_page(struct page *page)
{
	int retval = 0;
	struct swap_info_struct * p;
	swp_entry_t entry;

	entry.val = page->index;
	p = swap_info_get(entry);
	if (p) {
		/* Is the only swap cache user the cache itself? */
		if (p->swap_map[SWP_OFFSET(entry)] == 1) {
			/* Recheck the page count with the pagecache lock held.. */
			spin_lock(&pagecache_lock);
			if (page_count(page) - !!page->buffers == 2)
				retval = 1;
			spin_unlock(&pagecache_lock);
		}
		swap_info_put(p);
	}
	return retval;
}
/**
 * iscsi_tcp_segment_map - map the current S/G page
 * @segment: iscsi_segment
 * @recv: 1 if called from recv path
 *
 * We only need to possibly kmap data if scatter lists are being used,
 * because the iscsi passthrough and internal IO paths will never use high
 * mem pages.
 */
static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
{
	struct scatterlist *sg;

	if (segment->data != NULL || !segment->sg)
		return;

	sg = segment->sg;
	BUG_ON(segment->sg_mapped);
	BUG_ON(sg->length == 0);

	/*
	 * If the page count is greater than one it is ok to send
	 * to the network layer's zero copy send path. If not we
	 * have to go the slow sendmsg path. We always map for the
	 * recv path.
	 */
	if (page_count(sg_page(sg)) >= 1 && !recv)
		return;

<<<<<<< HEAD
Example #12
0
static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
{
	int i;
	struct drm_buffer_manager *bm = &ttm->dev->bm;
	struct page **cur_page;

	for (i = 0; i < ttm->num_pages; ++i) {
		cur_page = ttm->pages + i;
		if (*cur_page) {
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
			ClearPageReserved(*cur_page);
#endif
			if (page_count(*cur_page) != 1)
				DRM_ERROR("Erroneous page count. Leaking pages.\n");
			if (page_mapped(*cur_page))
				DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
			__free_page(*cur_page);
			--bm->cur_pages;
		}
	}
}
Example #13
0
/*
 * Free the swap entry like above, but also try to
 * free the page cache entry if it is the last user.
 */
void free_swap_and_cache(swp_entry_t entry)
{
	struct swap_info_struct * p;
	struct page *page = NULL;

	p = swap_info_get(entry);
	if (p) {
		if (swap_entry_free(p, SWP_OFFSET(entry)) == 1)
			page = find_trylock_page(&swapper_space, entry.val);
		swap_info_put(p);
	}
	if (page) {
		page_cache_get(page);
		/* Only cache user (+us), or swap space full? Free it! */
		if (page_count(page) - !!page->buffers == 2 || vm_swap_full()) {
			delete_from_swap_cache(page);
			SetPageDirty(page);
		}
		UnlockPage(page);
		page_cache_release(page);
	}
}
Example #14
0
static int bc_io_show(struct seq_file *f, void *v)
{
	struct list_head *lh;
	struct page_beancounter *pb;
	struct page *pg;

	lh = (struct list_head *)v;
	if (lh == &pb_io_list) {
		seq_printf(f, "Races: anon %lu missed %lu\n",
				anon_pages, not_released);

		seq_printf(f, "%-*s %-1s %-*s %-4s %*s %*s "
				"%-*s %-*s %-1s %-*s %-*s\n",
				PTR_SIZE, "pb", "",
				PTR_SIZE, "page", "flg",
				INT_SIZE, "cnt", INT_SIZE, "mcnt",
				PTR_SIZE, "pb_list",
				PTR_SIZE, "page_pb", "",
				PTR_SIZE, "mapping",
				INT_SIZE, "ub");
		return 0;
	}

	pb = list_entry(lh, struct page_beancounter, io_list);
	pg = pb->page;
	seq_printf(f, "%p %c %p %c%c%c%c %*d %*d %p %p %c %p %d\n",
			pb, pb->io_debug ? 'e' : 'm', pg,
			PageDirty(pg) ? 'D' : 'd',
			PageAnon(pg) ? 'A' : 'a',
			PageWriteback(pg) ? 'W' : 'w',
			PageLocked(pg) ? 'L' : 'l',
			INT_SIZE, page_count(pg),
			INT_SIZE, page_mapcount(pg),
			pb->page_pb_list, page_pbc(pg),
			iopb_to_pb(page_pbc(pg)) == pb ? ' ' : '!',
			pg->mapping, pb->ub->ub_uid);
	return 0;
}
Example #15
0
/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 1 if all pages in the range are isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page)) {
			/*
			 * If race between isolatation and allocation happens,
			 * some free pages could be in MIGRATE_MOVABLE list
			 * although pageblock's migratation type of the page
			 * is MIGRATE_ISOLATE. Catch it and move the page into
			 * MIGRATE_ISOLATE list.
			 */
			if (get_freepage_migratetype(page) != MIGRATE_ISOLATE) {
				struct page *end_page;

				end_page = page + (1 << page_order(page)) - 1;
				move_freepages(page_zone(page), page, end_page,
						MIGRATE_ISOLATE);
			}
			pfn += 1 << page_order(page);
		}
		else if (page_count(page) == 0 &&
			get_freepage_migratetype(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else
			break;
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
Example #16
0
/*
 * Test all pages in the range is free(means isolated) or not.
 * all pages in [start_pfn...end_pfn) must be in the same zone.
 * zone->lock must be held before call this.
 *
 * Returns 0 if all pages in the range is isolated.
 */
static int
__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
{
	struct page *page;

	while (pfn < end_pfn) {
		if (!pfn_valid_within(pfn)) {
			pfn++;
			continue;
		}
		page = pfn_to_page(pfn);
		if (PageBuddy(page))
			pfn += 1 << page_order(page);
		else if (page_count(page) == 0 &&
				page_private(page) == MIGRATE_ISOLATE)
			pfn += 1;
		else
			break;
	}
	if (pfn < end_pfn)
		return 0;
	return 1;
}
Example #17
0
/*
 * We can use this swap cache entry directly
 * if there are no other references to it.
 *
 * Here "exclusive_swap_page()" does the real
 * work, but we opportunistically check whether
 * we need to get all the locks first..
 */
int can_share_swap_page(struct page *page)
{
	int retval = 0;

	if (!PageLocked(page))
		BUG();
	switch (page_count(page)) {
	case 3:
		if (!page->buffers)
			break;
		/* Fallthrough */
	case 2:
		if (!PageSwapCache(page))
			break;
		retval = exclusive_swap_page(page);
		break;
	case 1:
		if (PageReserved(page))
			break;
		retval = 1;
	}
	return retval;
}
Example #18
0
static int __init hello_init(void)
{
    int i, total, countUsedPage;
    struct page* curPage;

    char path[] = "./hash.bin";
    printk(KERN_INFO "Starting module. You have %lu pages to play with!\n", get_num_physpages());
    logFile = file_open(path, O_CREAT | O_WRONLY, S_IRWXU);
    curPage = pfn_to_page(node_data[0]->node_start_pfn);
    total = get_num_physpages();
    countUsedPage = 0;
    for(i=0; i<total; i++) {
        curPage = pfn_to_page(node_data[0]->node_start_pfn + i);
        if(page_count(curPage) > 0) {
            write_hash_to_file(countUsedPage, kmap(curPage));
            countUsedPage++;
        }
    }
    file_sync(logFile);
    file_close(logFile);
    printk(KERN_INFO "Save the world! countUsedPage:%d\n", countUsedPage);
    return 0;
}
Example #19
0
static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
				      struct mlx4_en_rx_ring *ring)
{
	struct mlx4_en_rx_alloc *page_alloc;
	int i;

	for (i = 0; i < priv->num_frags; i++) {
		const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];

		page_alloc = &ring->page_alloc[i];
		en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
		       i, page_count(page_alloc->page));

		dma_unmap_page(priv->ddev, page_alloc->dma,
			       page_alloc->page_size, PCI_DMA_FROMDEVICE);
		while (page_alloc->page_offset + frag_info->frag_stride <
		       page_alloc->page_size) {
			put_page(page_alloc->page);
			page_alloc->page_offset += frag_info->frag_stride;
		}
		page_alloc->page = NULL;
	}
}
Example #20
0
/*
 * Batched page_cache_release().  Decrement the reference count on all the
 * passed pages.  If it fell to zero then remove the page from the LRU and
 * free it.
 *
 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
 * for the remainder of the operation.
 *
 * The locking in this function is against shrink_cache(): we recheck the
 * page count inside the lock to see whether shrink_cache grabbed the page
 * via the LRU.  If it did, give up: shrink_cache will free it.
 */
void release_pages(struct page **pages, int nr, int cold)
{
	int i;
	struct pagevec pages_to_free;
	struct zone *zone = NULL;

	pagevec_init(&pages_to_free, cold);
	for (i = 0; i < nr; i++) {
		struct page *page = pages[i];
		struct zone *pagezone;

		if (!put_page_testzero(page))
			continue;

		pagezone = page_zone(page);
		if (pagezone != zone) {
			if (zone)
				spin_unlock_irq(&zone->lru_lock);
			zone = pagezone;
			spin_lock_irq(&zone->lru_lock);
		}
		if (TestClearPageLRU(page))
			del_page_from_lru(zone, page);
		if (page_count(page) == 0) {
			if (!pagevec_add(&pages_to_free, page)) {
				spin_unlock_irq(&zone->lru_lock);
				__pagevec_free(&pages_to_free);
				pagevec_reinit(&pages_to_free);
				zone = NULL;	/* No lock is held */
			}
		}
	}
	if (zone)
		spin_unlock_irq(&zone->lru_lock);

	pagevec_free(&pages_to_free);
}
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
{
	int i;
	struct page *cur_page;
	struct ttm_backend *be = ttm->be;
	void *addr;

	if (be)
		be->func->clear(be);
	(void)ttm_tt_set_caching(ttm, tt_cached);
	for (i = 0; i < ttm->num_pages; ++i) {
		cur_page = ttm->pages[i];
		ttm->pages[i] = NULL;
		if (cur_page) {
			if (page_count(cur_page) != 1)
				printk(KERN_ERR TTM_PFX
				       "Erroneous page count. "
				       "Leaking pages.\n");
			ttm_mem_global_free_page(ttm->glob->mem_glob,
						 cur_page);

			if ((ttm->page_flags & TTM_PAGE_FLAG_DMA32) &&
				xen_pv_domain()) {
				addr = page_address(cur_page);
				WARN_ON(!addr);
				if (addr)
					dma_free_coherent(NULL, PAGE_SIZE, addr,
						  virt_to_bus(addr));
			} else
				__free_page(cur_page);
		}
	}
	ttm->state = tt_unpopulated;
	ttm->first_himem_page = ttm->num_pages;
	ttm->last_lomem_page = -1;
}
Example #22
0
File: elfldr.c Project: tdz/opsys
static int
elf_loader_construct_phdr_load(const struct vmem *as,
                               const Elf32_Phdr * elf_phdr,
                               const unsigned char *img,
                               struct vmem *dst_as)
{
        int err;

        err = vmem_alloc_frames(dst_as,
                                   pageframe_index(elf_phdr->p_offset + img),
                                   page_index((void *)elf_phdr->p_vaddr),
                                   page_count((void *)elf_phdr->p_vaddr,
                                                      elf_phdr->p_filesz),
                                   PTE_FLAG_PRESENT|PTE_FLAG_WRITEABLE|
                                   PTE_FLAG_USERMODE);
        if (err < 0)
        {
                goto err_vmem_alloc_pageframes;
        }

        /*
         * set remaining bytes to zero
         */

        if (elf_phdr->p_filesz < elf_phdr->p_memsz)
        {
                unsigned char *vaddr = (unsigned char *)elf_phdr->p_vaddr;
                memset(vaddr + elf_phdr->p_filesz, 0, elf_phdr->p_memsz -
                       elf_phdr->p_filesz);
        }

        return 0;

err_vmem_alloc_pageframes:
        return err;
}
Example #23
0
static coffee_page_t
create_log(struct file *file, struct file_header *hdr)
{
  uint16_t log_record_size, log_records;
  cfs_offset_t size;
  struct file *log_file;

  adjust_log_config(hdr, &log_record_size, &log_records);

  /* Log index size + log data size. */
  size = log_records * (sizeof(uint16_t) + log_record_size);

  log_file = reserve(hdr->name, page_count(size), 1, HDR_FLAG_LOG);
  if(log_file == NULL) {
    return INVALID_PAGE;
  }

  hdr->flags |= HDR_FLAG_MODIFIED;
  hdr->log_page = log_file->page;
  write_header(hdr, file->page);

  file->flags |= COFFEE_FILE_MODIFIED;
  return log_file->page;
}
/*
 * For inode and page debug
 */
int nilfs_releasepage(struct page *page, gfp_t gfp_mask)
{
	struct address_space *mapping = page->mapping;
	struct inode *inode;
	int verbose = (nilfs_debug_info.verbose[NILFS_VERBOSE_PAGE] > 1);
	int ret;

	if (!verbose && mapping) {
		inode = NILFS_AS_I(mapping);
		if (inode->i_sb && !(inode->i_sb->s_flags & MS_ACTIVE))
			verbose = 1;
	}
	if (unlikely(!PagePrivate(page)))
		NILFS_PAGE_BUG(page, "no buffers");

	if (buffer_nilfs_allocated(page_buffers(page)))
		NILFS_PAGE_BUG(page, "nilfs allocated page");

	/*
	 * Note that non-busy buffer heads may be discarded though the
	 * try_to_free_buffers() call.  This may happen when the page is not
	 * dirty, not in writeback, not locked, and belongs to a mapping.
	 * Before changing the state of buffer heads to busy, the page lock
	 * must be held to protect them.
	 */
	ret = try_to_free_buffers(page);
	if (verbose && ret && mapping && mapping->host) {
		if (page_count(page) > 2 + !PageLRU(page))
			/*
			 * This may happen when the other task just happen to
			 * find and get the page during this invalidation.
			 */
			PAGE_DEBUG(page, "too many page count");
	}
	return ret;
}
Example #25
0
static bool page_empty(void *ptr)
{
	struct page *ptr_page = virt_to_page(ptr);
	return page_count(ptr_page) == 1;
}
int MMapPMR(struct file *pFile, struct vm_area_struct *ps_vma)
{
	PVRSRV_ERROR eError;
	IMG_HANDLE hSecurePMRHandle;
	IMG_SIZE_T uiLength;
	IMG_DEVMEM_OFFSET_T uiOffset;
	unsigned long uiPFN;
	IMG_HANDLE hPMRResmanHandle;
	PMR *psPMR;
	PMR_FLAGS_T ulPMRFlags;
	IMG_UINT32 ui32CPUCacheFlags;
	unsigned long ulNewFlags = 0;
	pgprot_t sPageProt;
#if defined(SUPPORT_DRM)
	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(PVR_DRM_FILE_FROM_FILE(pFile));
#else
	CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
#endif

#if defined(PVR_MMAP_USE_VM_INSERT)
	IMG_BOOL bMixedMap = IMG_FALSE;
#endif
	/*
	 * The pmr lock used here to protect both handle related operations and PMR
	 * operations.
	 * This was introduced to fix lockdep issue.
	 */
	mutex_lock(&g_sMMapMutex);
	PMRLock();

#if defined(SUPPORT_DRM_DC_MODULE)
	psPMR = PVRSRVGEMMMapLookupPMR(pFile, ps_vma);
	if (!psPMR)
#endif
	{
		hSecurePMRHandle = (IMG_HANDLE)((IMG_UINTPTR_T)ps_vma->vm_pgoff);

		eError = PVRSRVLookupHandle(psConnection->psHandleBase,
					    (IMG_HANDLE *) &hPMRResmanHandle,
					    hSecurePMRHandle,
					    PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
		if (eError != PVRSRV_OK)
		{
			goto e0;
		}

		eError = ResManFindPrivateDataByPtr(hPMRResmanHandle,
						    (void **)&psPMR);
		if (eError != PVRSRV_OK)
		{
			goto e0;
		}
	}

	/*
	 * Take a reference on the PMR, make's sure that it can't be freed
	 * while it's mapped into the user process
	 */
	PMRRefPMR(psPMR);

	PMRUnlock();

	eError = PMRLockSysPhysAddresses(psPMR, PAGE_SHIFT);
	if (eError != PVRSRV_OK)
	{
		goto e1;
	}

	if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
	    ((ps_vma->vm_flags & VM_SHARED) == 0))
	{
		eError = PVRSRV_ERROR_INVALID_PARAMS;
		goto e1;
	}

	/*
	 * We ought to call PMR_Flags() here to check the permissions
	 * against the requested mode, and possibly to set up the cache
	 * control protflags
	 */
	eError = PMR_Flags(psPMR, &ulPMRFlags);
	if (eError != PVRSRV_OK)
	{
		goto e1;
	}

	ulNewFlags = ps_vma->vm_flags;
#if 0
	/* Discard user read/write request, we will pull these flags from the PMR */
	ulNewFlags &= ~(VM_READ | VM_WRITE);

	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_READABLE)
	{
		ulNewFlags |= VM_READ;
	}
	if (ulPMRFlags & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE)
	{
		ulNewFlags |= VM_WRITE;
	}
#endif

	ps_vma->vm_flags = ulNewFlags;

#if defined (CONFIG_ARM64)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, 0, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_ARM)
	sPageProt = __pgprot_modify(ps_vma->vm_page_prot, L_PTE_MT_MASK, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_X86)
	sPageProt = pgprot_modify(ps_vma->vm_page_prot, vm_get_page_prot(ulNewFlags));
#elif defined(CONFIG_METAG) || defined(CONFIG_MIPS)
	sPageProt = vm_get_page_prot(ulNewFlags);
#else
#error Please add pgprot_modify equivalent for your system
#endif
	ui32CPUCacheFlags = DevmemCPUCacheMode(ulPMRFlags);
	switch (ui32CPUCacheFlags)
	{
		case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
				sPageProt = pgprot_noncached(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
				sPageProt = pgprot_writecombine(sPageProt);
				break;

		case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
				break;

		default:
				eError = PVRSRV_ERROR_INVALID_PARAMS;
				goto e1;
	}
	ps_vma->vm_page_prot = sPageProt;

    uiLength = ps_vma->vm_end - ps_vma->vm_start;

    ps_vma->vm_flags |= VM_IO;

/* Don't include the mapping in core dumps */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0))
    ps_vma->vm_flags |= VM_DONTDUMP;
#else
    ps_vma->vm_flags |= VM_RESERVED;
#endif

    /*
     * Disable mremap because our nopage handler assumes all
     * page requests have already been validated.
     */
    ps_vma->vm_flags |= VM_DONTEXPAND;
    
    /* Don't allow mapping to be inherited across a process fork */
    ps_vma->vm_flags |= VM_DONTCOPY;

#if defined(PVR_MMAP_USE_VM_INSERT)
	{
		/* Scan the map range for pfns without struct page* handling. If we find
		 * one, this is a mixed map, and we can't use vm_insert_page().
		 */
		for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
		{
			IMG_CPU_PHYADDR sCpuPAddr;
			IMG_BOOL bValid;

			eError = PMR_CpuPhysAddr(psPMR, uiOffset, &sCpuPAddr, &bValid);
			PVR_ASSERT(eError == PVRSRV_OK);
			if (eError)
			{
				goto e2;
			}

			if (bValid)
			{
				uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
				PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

				if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
				{
					bMixedMap = IMG_TRUE;
				}
			}
		}

		if (bMixedMap)
		{
		    ps_vma->vm_flags |= VM_MIXEDMAP;
		}
	}
#endif /* defined(PVR_MMAP_USE_VM_INSERT) */

    for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<PAGE_SHIFT)
    {
        IMG_SIZE_T uiNumContiguousBytes;
        IMG_INT32 iStatus;
        IMG_CPU_PHYADDR sCpuPAddr;
        IMG_BOOL bValid;

        uiNumContiguousBytes = 1ULL<<PAGE_SHIFT;
        eError = PMR_CpuPhysAddr(psPMR,
                                 uiOffset,
                                 &sCpuPAddr,
                                 &bValid);
        PVR_ASSERT(eError == PVRSRV_OK);
        if (eError)
        {
            goto e2;
        }

		/*
			Only map in pages that are valid, any that aren't will be picked up
			by the nopage handler which will return a zeroed page for us
		*/
		if (bValid)
		{
	        uiPFN = sCpuPAddr.uiAddr >> PAGE_SHIFT;
	        PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == sCpuPAddr.uiAddr);

#if defined(PVR_MMAP_USE_VM_INSERT)
			if (bMixedMap)
			{
				/* This path is just for debugging. It should be equivalent
				 * to the remap_pfn_range() path.
				 */
				iStatus = vm_insert_mixed(ps_vma,
										  ps_vma->vm_start + uiOffset,
										  uiPFN);
			}
			else
			{
				iStatus = vm_insert_page(ps_vma,
										 ps_vma->vm_start + uiOffset,
										 pfn_to_page(uiPFN));
			}
#else /* defined(PVR_MMAP_USE_VM_INSERT) */
	        iStatus = remap_pfn_range(ps_vma,
	                                  ps_vma->vm_start + uiOffset,
	                                  uiPFN,
	                                  uiNumContiguousBytes,
	                                  ps_vma->vm_page_prot);
#endif /* defined(PVR_MMAP_USE_VM_INSERT) */

	        PVR_ASSERT(iStatus == 0);
	        if(iStatus)
	        {
	            // N.B. not the right error code, but, it doesn't get propagated anyway... :(
	            eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	
	            goto e2;
	        }

#if defined(PVRSRV_ENABLE_PROCESS_STATS)
    /* USER MAPPING*/
#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
	    PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, PAGE_SIZE);
#else
    	PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
    			 	 	 	 	 (IMG_VOID*)(IMG_UINTPTR_T)(ps_vma->vm_start + uiOffset),
								 sCpuPAddr,
								 PAGE_SIZE,
								 IMG_NULL);
#endif
#endif
		}
        (void)pFile;
    }

    /* let us see the PMR so we can unlock it later */
    ps_vma->vm_private_data = psPMR;

    /* Install open and close handlers for ref-counting */
    ps_vma->vm_ops = &gsMMapOps;

	mutex_unlock(&g_sMMapMutex);

    return 0;

    /*
      error exit paths follow
    */
 e2:
    PVR_DPF((PVR_DBG_ERROR, "don't know how to handle this error.  Abort!"));
    PMRUnlockSysPhysAddresses(psPMR);
 e1:
	PMRUnrefPMR(psPMR);
	goto em1;
 e0:
    PVR_DPF((PVR_DBG_ERROR, "Error in MMapPMR critical section"));
	PMRUnlock();
 em1:
    PVR_ASSERT(eError != PVRSRV_OK);
    PVR_DPF((PVR_DBG_ERROR, "unable to translate error %d", eError));
	mutex_unlock(&g_sMMapMutex);

    return -ENOENT; // -EAGAIN // or what?
}
Example #27
0
/*---------------------------------------------------------------------------*/
int
cfs_coffee_reserve(const char *name, cfs_offset_t size)
{
  return reserve(name, page_count(size), 0, 0) == NULL ? -1 : 0;
}
Example #28
0
/* mm->page_table_lock is held. mmap_sem is not held */
static inline int try_to_swap_out(struct mm_struct * mm, struct vm_area_struct* vma, unsigned long address, pte_t * page_table, struct page *page, zone_t * classzone)
{
	pte_t pte;
	swp_entry_t entry;

	/* Don't look at this pte if it's been accessed recently. */
	if ((vma->vm_flags & VM_LOCKED) || ptep_test_and_clear_young(page_table)) {
		mark_page_accessed(page);
		return 0;
	}

	/* Don't bother unmapping pages that are active */
	if (PageActive(page))
		return 0;

	/* Don't bother replenishing zones not under pressure.. */
	if (!memclass(page->zone, classzone))
		return 0;

	if (TryLockPage(page))
		return 0;

	/* From this point on, the odds are that we're going to
	 * nuke this pte, so read and clear the pte.  This hook
	 * is needed on CPUs which update the accessed and dirty
	 * bits in hardware.
	 */
	flush_cache_page(vma, address);
	pte = ptep_get_and_clear(page_table);
	flush_tlb_page(vma, address);

	if (pte_dirty(pte))
		set_page_dirty(page);

	/*
	 * Is the page already in the swap cache? If so, then
	 * we can just drop our reference to it without doing
	 * any IO - it's already up-to-date on disk.
	 */
	if (PageSwapCache(page)) {
		entry.val = page->index;
		swap_duplicate(entry);
set_swap_pte:
		set_pte(page_table, swp_entry_to_pte(entry));
drop_pte:
		mm->rss--;
		UnlockPage(page);
		{
			int freeable = page_count(page) - !!page->buffers <= 2;
			page_cache_release(page);
			return freeable;
		}
	}

	/*
	 * Is it a clean page? Then it must be recoverable
	 * by just paging it in again, and we can just drop
	 * it..  or if it's dirty but has backing store,
	 * just mark the page dirty and drop it.
	 *
	 * However, this won't actually free any real
	 * memory, as the page will just be in the page cache
	 * somewhere, and as such we should just continue
	 * our scan.
	 *
	 * Basically, this just makes it possible for us to do
	 * some real work in the future in "refill_inactive()".
	 */
	if (page->mapping)
		goto drop_pte;
	if (!PageDirty(page))
		goto drop_pte;

	/*
	 * Anonymous buffercache pages can be left behind by
	 * concurrent truncate and pagefault.
	 */
	if (page->buffers)
		goto preserve;

	/*
	 * This is a dirty, swappable page.  First of all,
	 * get a suitable swap entry for it, and make sure
	 * we have the swap cache set up to associate the
	 * page with that swap entry.
	 */
	for (;;) {
		entry = get_swap_page();
		if (!entry.val)
			break;
		/* Add it to the swap cache and mark it dirty
		 * (adding to the page cache will clear the dirty
		 * and uptodate bits, so we need to do it again)
		 */
		if (add_to_swap_cache(page, entry) == 0) {
			SetPageUptodate(page);
			set_page_dirty(page);
			goto set_swap_pte;
		}
		/* Raced with "speculative" read_swap_cache_async */
		swap_free(entry);
	}

	/* No swap space left */
preserve:
	set_pte(page_table, pte);
	UnlockPage(page);
	return 0;
}
Example #29
0
static int shrink_cache(int nr_pages, zone_t * classzone, unsigned int gfp_mask, int priority)
{
	struct list_head * entry;
	int max_scan = nr_inactive_pages / priority;
	int max_mapped = min((nr_pages << (10 - priority)), max_scan / 10);

	spin_lock(&pagemap_lru_lock);
	while (--max_scan >= 0 && (entry = inactive_list.prev) != &inactive_list) {
		struct page * page;

		/* lock depth is 1 or 2 */
		if (unlikely(current->need_resched)) {
			spin_unlock(&pagemap_lru_lock);
			__set_current_state(TASK_RUNNING);
			schedule();
			spin_lock(&pagemap_lru_lock);
			continue;
		}

		page = list_entry(entry, struct page, lru);

		if (unlikely(!PageLRU(page)))
			BUG();
		if (unlikely(PageActive(page)))
			BUG();

		list_del(entry);
		list_add(entry, &inactive_list);

		/*
		 * Zero page counts can happen because we unlink the pages
		 * _after_ decrementing the usage count..
		 */
		if (unlikely(!page_count(page)))
			continue;

		if (!memclass(page->zone, classzone))
			continue;

		/* Racy check to avoid trylocking when not worthwhile */
		if (!page->buffers && (page_count(page) != 1 || !page->mapping))
			goto page_mapped;

		/*
		 * The page is locked. IO in progress?
		 * Move it to the back of the list.
		 */
		if (unlikely(TryLockPage(page))) {
			if (PageLaunder(page) && (gfp_mask & __GFP_FS)) {
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);
				wait_on_page(page);
				page_cache_release(page);
				spin_lock(&pagemap_lru_lock);
			}
			continue;
		}

		if ((PageDirty(page) || DelallocPage(page)) && is_page_cache_freeable(page) && page->mapping) {
			/*
			 * It is not critical here to write it only if
			 * the page is unmapped beause any direct writer
			 * like O_DIRECT would set the PG_dirty bitflag
			 * on the phisical page after having successfully
			 * pinned it and after the I/O to the page is finished,
			 * so the direct writes to the page cannot get lost.
			 */
			int (*writepage)(struct page *);

			writepage = page->mapping->a_ops->writepage;
			if ((gfp_mask & __GFP_FS) && writepage) {
				ClearPageDirty(page);
				SetPageLaunder(page);
				page_cache_get(page);
				spin_unlock(&pagemap_lru_lock);

				writepage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		/*
		 * If the page has buffers, try to free the buffer mappings
		 * associated with this page. If we succeed we try to free
		 * the page as well.
		 */
		if (page->buffers) {
			spin_unlock(&pagemap_lru_lock);

			/* avoid to free a locked page */
			page_cache_get(page);

			if (try_to_release_page(page, gfp_mask)) {
				if (!page->mapping) {
					/*
					 * We must not allow an anon page
					 * with no buffers to be visible on
					 * the LRU, so we unlock the page after
					 * taking the lru lock
					 */
					spin_lock(&pagemap_lru_lock);
					UnlockPage(page);
					__lru_cache_del(page);

					/* effectively free the page here */
					page_cache_release(page);

					if (--nr_pages)
						continue;
					break;
				} else {
					/*
					 * The page is still in pagecache so undo the stuff
					 * before the try_to_release_page since we've not
					 * finished and we can now try the next step.
					 */
					page_cache_release(page);

					spin_lock(&pagemap_lru_lock);
				}
			} else {
				/* failed to drop the buffers so stop here */
				UnlockPage(page);
				page_cache_release(page);

				spin_lock(&pagemap_lru_lock);
				continue;
			}
		}

		spin_lock(&pagecache_lock);

		/*
		 * this is the non-racy check for busy page.
		 */
		if (!page->mapping || !is_page_cache_freeable(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
page_mapped:
			if (--max_mapped >= 0)
				continue;

			/*
			 * Alert! We've found too many mapped pages on the
			 * inactive list, so we start swapping out now!
			 */
			spin_unlock(&pagemap_lru_lock);
			swap_out(priority, gfp_mask, classzone);
			return nr_pages;
		}

		/*
		 * It is critical to check PageDirty _after_ we made sure
		 * the page is freeable* so not in use by anybody.
		 */
		if (PageDirty(page)) {
			spin_unlock(&pagecache_lock);
			UnlockPage(page);
			continue;
		}

		/* point of no return */
		if (likely(!PageSwapCache(page))) {
			__remove_inode_page(page);
			spin_unlock(&pagecache_lock);
		} else {
			swp_entry_t swap;
			swap.val = page->index;
			__delete_from_swap_cache(page);
			spin_unlock(&pagecache_lock);
			swap_free(swap);
		}

		__lru_cache_del(page);
		UnlockPage(page);

		/* effectively free the page here */
		page_cache_release(page);

		if (--nr_pages)
			continue;
		break;
	}
	spin_unlock(&pagemap_lru_lock);

	return nr_pages;
}
Example #30
0
/*
 * This routine handles present pages, when users try to write
 * to a shared page. It is done by copying the page to a new address
 * and decrementing the shared-page counter for the old page.
 *
 * Goto-purists beware: the only reason for goto's here is that it results
 * in better assembly code.. The "default" path will see no jumps at all.
 *
 * Note that this routine assumes that the protection checks have been
 * done by the caller (the low-level page fault routine in most cases).
 * Thus we can safely just mark it writable once we've done any necessary
 * COW.
 *
 * We also mark the page dirty at this point even though the page will
 * change only once the write actually happens. This avoids a few races,
 * and potentially makes it more efficient.
 *
 * We enter with the page table read-lock held, and need to exit without
 * it.
 */
static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma,
	unsigned long address, pte_t *page_table, pte_t pte)
{
	struct page *old_page, *new_page;

	old_page = pte_page(pte);
	if (!VALID_PAGE(old_page))
		goto bad_wp_page;
	
	/*
	 * We can avoid the copy if:
	 * - we're the only user (count == 1)
	 * - the only other user is the swap cache,
	 *   and the only swap cache user is itself,
	 *   in which case we can just continue to
	 *   use the same swap cache (it will be
	 *   marked dirty).
	 */
	switch (page_count(old_page)) {
	case 2:
		/*
		 * Lock the page so that no one can look it up from
		 * the swap cache, grab a reference and start using it.
		 * Can not do lock_page, holding page_table_lock.
		 */
		if (!PageSwapCache(old_page) || TryLockPage(old_page))
			break;
		if (is_page_shared(old_page)) {
			UnlockPage(old_page);
			break;
		}
		UnlockPage(old_page);
		/* FallThrough */
	case 1:
		flush_cache_page(vma, address);
		establish_pte(vma, address, page_table, pte_mkyoung(pte_mkdirty(pte_mkwrite(pte))));
		spin_unlock(&mm->page_table_lock);
		return 1;	/* Minor fault */
	}

	/*
	 * Ok, we need to copy. Oh, well..
	 */
	spin_unlock(&mm->page_table_lock);
	new_page = page_cache_alloc();
	if (!new_page)
		return -1;
	spin_lock(&mm->page_table_lock);

	/*
	 * Re-check the pte - we dropped the lock
	 */
	if (pte_same(*page_table, pte)) {
		if (PageReserved(old_page))
			++mm->rss;
		break_cow(vma, old_page, new_page, address, page_table);

		/* Free the old page.. */
		new_page = old_page;
	}
	spin_unlock(&mm->page_table_lock);
	page_cache_release(new_page);
	return 1;	/* Minor fault */

bad_wp_page:
	spin_unlock(&mm->page_table_lock);
	printk("do_wp_page: bogus page at address %08lx (page 0x%lx)\n",address,(unsigned long)old_page);
	return -1;
}