示例#1
0
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
	struct page *page = pte_page(pte);

	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
		__flush_dcache_area(page_address(page),
				PAGE_SIZE << compound_order(page));
		__flush_icache_all();
	} else if (icache_is_aivivt()) {
		__flush_icache_all();
	}
}
示例#2
0
文件: flush.c 项目: 0-T-0/ps4-linux
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
	struct page *page = pte_page(pte);

	/* no flushing needed for anonymous pages */
	if (!page_mapping(page))
		return;

	if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
		__flush_dcache_area(page_address(page),
				PAGE_SIZE << compound_order(page));
		__flush_icache_all();
	} else if (icache_is_aivivt()) {
		__flush_icache_all();
	}
}
void __init mach_setup (char **cmdline)
{
	/* Enable the instruction and data caches (if present) */
	#if CONFIG_XILINX_MICROBLAZE0_USE_ICACHE==1
	__flush_icache_all();
	__enable_icache();
	#endif

	#if CONFIG_XILINX_MICROBLAZE0_USE_DCACHE==1
	__flush_dcache_all();
	__enable_dcache();
	#endif

	printk (KERN_INFO "CPU: MICROBLAZE\n");

	/* Now called from tty_io.c:init_console(), where it should be (/
	/* xmbrs_console_init();  */
	
	/* 
	 * Enable master control on interrupt controller.  Note
         * this does not enable interrupts in the processor, nor 
	 * does it enable individual IRQs on the controller.  Just
         * initialises the intc in preparation for these things */
	microblaze_intc_master_enable();

#ifdef CONFIG_XILINX_GPIO_0_INSTANCE
	/* Configure the GPIO */
	/* 8 inputs, 16 outputs */
	/* microblaze_gpio_setdir(CONFIG_XILINX_GPIO_0_BASEADDR,MICROBLAZE_GPIO_DIR); */
#endif
}
示例#4
0
文件: flush.c 项目: 0x7f454c46/linux
void sync_icache_aliases(void *kaddr, unsigned long len)
{
	unsigned long addr = (unsigned long)kaddr;

	if (icache_is_aliasing()) {
		__clean_dcache_area_pou(kaddr, len);
		__flush_icache_all();
	} else {
		flush_icache_range(addr, addr + len);
	}
}
示例#5
0
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
	unsigned long flags;

	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
	__tlb_switch_to_guest()(kvm, &flags);

	/*
	 * We could do so much better if we had the VA as well.
	 * Instead, we invalidate Stage-2 for this IPA, and the
	 * whole of Stage-1. Weep...
	 */
	ipa >>= 12;
	__tlbi(ipas2e1is, ipa);

	/*
	 * We have to ensure completion of the invalidation at Stage-2,
	 * since a table walk on another CPU could refill a TLB with a
	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
	 * the Stage-1 invalidation happened first.
	 */
	dsb(ish);
	__tlbi(vmalle1is);
	dsb(ish);
	isb();

	/*
	 * If the host is running at EL1 and we have a VPIPT I-cache,
	 * then we must perform I-cache maintenance at EL2 in order for
	 * it to have an effect on the guest. Since the guest cannot hit
	 * I-cache lines allocated with a different VMID, we don't need
	 * to worry about junk out of guest reset (we nuke the I-cache on
	 * VMID rollover), but we do need to be careful when remapping
	 * executable pages for the same guest. This can happen when KSM
	 * takes a CoW fault on an executable page, copies the page into
	 * a page that was previously mapped in the guest and then needs
	 * to invalidate the guest view of the I-cache for that page
	 * from EL1. To solve this, we invalidate the entire I-cache when
	 * unmapping a page from a guest if we have a VPIPT I-cache but
	 * the host is running at EL1. As above, we could do better if
	 * we had the VA.
	 *
	 * The moral of this story is: if you have a VPIPT I-cache, then
	 * you should be running with VHE enabled.
	 */
	if (!has_vhe() && icache_is_vpipt())
		__flush_icache_all();

	__tlb_switch_to_host()(kvm, flags);
}
示例#6
0
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);

	if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
		set_bit(PG_dcache_dirty, &page->flags);
	else
	{
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
	}
}
示例#7
0
static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
				unsigned long uaddr, void *kaddr,
				unsigned long len)
{
	if (vma->vm_flags & VM_EXEC) {
		unsigned long addr = (unsigned long)kaddr;
		if (icache_is_aliasing()) {
			__flush_dcache_area(kaddr, len);
			__flush_icache_all();
		} else {
			flush_icache_range(addr, addr + len);
		}
	}
}
示例#8
0
文件: flush.c 项目: 0-T-0/ps4-linux
/*
 * Ensure cache coherency between kernel mapping and userspace mapping
 * of this page.
 */
void flush_dcache_page(struct page *page)
{
	struct address_space *mapping;

	/*
	 * The zero page is never written to, so never has any dirty
	 * cache lines, and therefore never needs to be flushed.
	 */
	if (page == ZERO_PAGE(0))
		return;

	mapping = page_mapping(page);

	if (mapping && !mapping_mapped(mapping))
		clear_bit(PG_dcache_clean, &page->flags);
	else {
		__flush_dcache_page(mapping, page);
		if (mapping)
			__flush_icache_all();
		set_bit(PG_dcache_clean, &page->flags);
	}
}
示例#9
0
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	if (vma->vm_flags & VM_EXEC)
		__flush_icache_all();
}