Пример #1
0
/*
 * Handle cache congruency of kernel and userspace mappings of page when kernel
 * writes-to/reads-from
 *
 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
 *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
 *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
 *  -In SMP, if hardware caches are coherent
 *
 * There's a corollary case, where kernel READs from a userspace mapped page.
 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	if (!cache_is_vipt_aliasing()) {
		clear_bit(PG_dc_clean, &page->flags);
		return;
	}

	/* don't handle anon pages here */
	mapping = page_mapping(page);
	if (!mapping)
		return;

	/*
	 * pagecache page, file not yet mapped to userspace
	 * Make a note that K-mapping is dirty
	 */
	if (!mapping_mapped(mapping)) {
		clear_bit(PG_dc_clean, &page->flags);
	} else if (page_mapped(page)) {

		/* kernel reading from page with U-mapping */
		void *paddr = page_address(page);
		unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;

		if (addr_not_cache_congruent(paddr, vaddr))
			__flush_dcache_page(paddr, vaddr);
	}
}
Пример #2
0
void __flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	if (PageHighMem(page))
		addr = (unsigned long)__kmap_atomic(page);
	else
		addr = (unsigned long)page_address(page);

	flush_data_cache_page(addr);

	if (PageHighMem(page))
		__kunmap_atomic((void *)addr);
}
Пример #3
0
/* This function is called by both msync() and fsync().
 * TODO: Check if we can avoid calling pmfs_flush_buffer() for fsync. We use
 * movnti to write data to files, so we may want to avoid doing unnecessary
 * pmfs_flush_buffer() on fsync() */
int pmfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
	/* Sync from start to end[inclusive] */
	struct address_space *mapping = file->f_mapping;
	struct inode *inode = mapping->host;
	loff_t isize;
	int error;

	/* if the file is not mmap'ed, there is no need to do clflushes */
	if (mapping_mapped(mapping) == 0)
		goto persist;

	end += 1; /* end is inclusive. We like our indices normal please ! */

	isize = i_size_read(inode);

	if ((unsigned long)end > (unsigned long)isize)
		end = isize;
	if (!isize || (start >= end))
	{
		pmfs_dbg_verbose("[%s:%d] : (ERR) isize(%llx), start(%llx),"
			" end(%llx)\n", __func__, __LINE__, isize, start, end);
		return -ENODATA;
	}

	/* Align start and end to cacheline boundaries */
	start = start & CACHELINE_MASK;
	end = CACHELINE_ALIGN(end);
	do {
		void *xip_mem;
		pgoff_t pgoff;
		loff_t offset;
		unsigned long xip_pfn, nr_flush_bytes;

		pgoff = start >> PAGE_CACHE_SHIFT;
		offset = start & ~PAGE_CACHE_MASK;

		nr_flush_bytes = PAGE_CACHE_SIZE - offset;
		if (nr_flush_bytes > (end - start))
			nr_flush_bytes = end - start;

		error = mapping->a_ops->get_xip_mem(mapping, pgoff, 0,
		&xip_mem, &xip_pfn);

		if (unlikely(error)) {
			/* sparse files could have such holes */
			pmfs_dbg_verbose("[%s:%d] : start(%llx), end(%llx),"
			" pgoff(%lx)\n", __func__, __LINE__, start, end, pgoff);
		} else {
			/* flush the range */
			pmfs_flush_buffer(xip_mem+offset, nr_flush_bytes, 0);
		}

		start += nr_flush_bytes;
	} while (start < end);
persist:
	PERSISTENT_MARK();
	PERSISTENT_BARRIER();
	return 0;
}
Пример #4
0
/*
 * Helpers to fill in size, ctime, mtime, and atime.  We have to be
 * careful because either the client or MDS may have more up to date
 * info, depending on which capabilities are held, and whether
 * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
 * and size are monotonically increasing, except when utimes() or
 * truncate() increments the corresponding _seq values.)
 */
int ceph_fill_file_size(struct inode *inode, int issued,
			u32 truncate_seq, u64 truncate_size, u64 size)
{
	struct ceph_inode_info *ci = ceph_inode(inode);
	int queue_trunc = 0;

	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
	    (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
		dout("size %lld -> %llu\n", inode->i_size, size);
		inode->i_size = size;
		inode->i_blocks = (size + (1<<9) - 1) >> 9;
		ci->i_reported_size = size;
		if (truncate_seq != ci->i_truncate_seq) {
			dout("truncate_seq %u -> %u\n",
			     ci->i_truncate_seq, truncate_seq);
			ci->i_truncate_seq = truncate_seq;
			/*
			 * If we hold relevant caps, or in the case where we're
			 * not the only client referencing this file and we
			 * don't hold those caps, then we need to check whether
			 * the file is either opened or mmaped
			 */
			if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
				       CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
				       CEPH_CAP_FILE_EXCL|
				       CEPH_CAP_FILE_LAZYIO)) ||
			    mapping_mapped(inode->i_mapping) ||
			    __ceph_caps_file_wanted(ci)) {
				ci->i_truncate_pending++;
				queue_trunc = 1;
			}
		}
	}
Пример #5
0
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);


	if (mapping && !mapping_mapped(mapping)) {
		if (!test_bit(PG_arch_1, &page->flags))
			set_bit(PG_arch_1, &page->flags);
		return;

	} else {

		unsigned long phys = page_to_phys(page);
		unsigned long temp = page->index << PAGE_SHIFT;
		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
		unsigned long virt;


		if (!alias && !mapping)
			return;

		__flush_invalidate_dcache_page((long)page_address(page));

		virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);

		if (alias)
			__flush_invalidate_dcache_page_alias(virt, phys);

		if (mapping)
			__invalidate_icache_page_alias(virt, phys);
	}

	
}
Пример #6
0
/*
 * Write back & invalidate the D-cache of the page.
 * (To avoid "alias" issues)
 */
static void sh7705_flush_dcache_page(void *arg)
{
	struct page *page = arg;
	struct address_space *mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		clear_bit(PG_dcache_clean, &page->flags);
	else
		__flush_dcache_page(__pa(page_address(page)));
}
Пример #7
0
/*
 * Write back & invalidate the D-cache of the page.
 * (To avoid "alias" issues)
 */
static void sh7705_flush_dcache_page(void *arg)
{
	struct page *page = arg;
	struct address_space *mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
		__flush_dcache_page(PHYSADDR(page_address(page)));
}
Пример #8
0
/* XXX put nice comment here.  talk about __free_pte -> dirty pages and
 * nopage's reference passing to the pte
 */
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
{
	int rc = -ENOENT;

	LASSERTF(last > first, "last %llu first %llu\n", last, first);
	if (mapping_mapped(mapping)) {
		rc = 0;
		unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
				    last - first + 1, 0);
	}

	return rc;
}
Пример #9
0
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
	{
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
	}
}
Пример #10
0
void __flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (PageHighMem(page))
		return;
	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}

	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}
Пример #11
0
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	/*
	 * If we have a mapping but the page is not mapped to user-space
	 * yet, we simply mark this page dirty and defer flushing the 
	 * caches until update_mmu().
	 */

	if (mapping && !mapping_mapped(mapping)) {
		if (!test_bit(PG_arch_1, &page->flags))
			set_bit(PG_arch_1, &page->flags);
		return;

	} else {

		unsigned long phys = page_to_phys(page);
		unsigned long temp = page->index << PAGE_SHIFT;
		unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
		unsigned long virt;

		/* 
		 * Flush the page in kernel space and user space.
		 * Note that we can omit that step if aliasing is not
		 * an issue, but we do have to synchronize I$ and D$
		 * if we have a mapping.
		 */

		if (!alias && !mapping)
			return;

		virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
		__flush_invalidate_dcache_page_alias(virt, phys);

		virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);

		if (alias)
			__flush_invalidate_dcache_page_alias(virt, phys);

		if (mapping)
			__invalidate_icache_page_alias(virt, phys);
	}

	/* There shouldn't be an entry in the cache for this page anymore. */
}
Пример #12
0
void __flush_dcache_page(struct page *page)
{
#ifdef CONFIG_RALINK_SOC
	void *addr;

	if (page_mapping(page) && !page_mapped(page)) {
		SetPageDcacheDirty(page);
		return;
	}
#else
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

	if (PageHighMem(page))
		return;
	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}
#endif

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
#ifdef CONFIG_RALINK_SOC
	if (PageHighMem(page)) {
		addr = kmap_atomic(page, KM_PTE1);
		flush_data_cache_page((unsigned long)addr);
		kunmap_atomic(addr, KM_PTE1);
	} else {
		addr = (void *) page_address(page);
		flush_data_cache_page((unsigned long)addr);
	}
	ClearPageDcacheDirty(page);
#else
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
#endif
}
Пример #13
0
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	/*
	 * The zero page is never written to, so never has any dirty
	 * cache lines, and therefore never needs to be flushed.
	 */
	if (page == ZERO_PAGE(0))
		return;

	mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		clear_bit(PG_dcache_clean, &page->flags);
	else {
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
		set_bit(PG_dcache_clean, &page->flags);
	}
}
Пример #14
0
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	mapping = page_mapping(page);
	if (mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else {
		int i, pc;
		unsigned long vto, kaddr, flags;
		kaddr = (unsigned long)page_address(page);
		cpu_dcache_wbinval_page(kaddr);
		pc = CACHE_SET(DCACHE) * CACHE_LINE_SIZE(DCACHE) / PAGE_SIZE;
		local_irq_save(flags);
		for (i = 0; i < pc; i++) {
			vto =
			    kremap0(kaddr + i * PAGE_SIZE, page_to_phys(page));
			cpu_dcache_wbinval_page(vto);
			kunmap01(vto);
		}
		local_irq_restore(flags);
	}
}