Exemplo n.º 1
0
/**
 * flush_icache_range - Globally flush dcache and invalidate icache for region
 * @start: The starting virtual address of the region.
 * @end: The ending virtual address of the region.
 *
 * This is used by the kernel to globally flush some code it has just written
 * from the dcache back to RAM and then to globally invalidate the icache over
 * that region so that that code can be run on all CPUs in the system.
 */
void flush_icache_range(unsigned long start, unsigned long end)
{
	unsigned long start_page, end_page;
	unsigned long flags;

	flags = smp_lock_cache();

	if (end > 0x80000000UL) {
		/* addresses above 0xa0000000 do not go through the cache */
		if (end > 0xa0000000UL) {
			end = 0xa0000000UL;
			if (start >= end)
				goto done;
		}

		/* kernel addresses between 0x80000000 and 0x9fffffff do not
		 * require page tables, so we just map such addresses
		 * directly */
		start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
		mn10300_local_dcache_flush_range(start_page, end);
		mn10300_local_icache_inv_range(start_page, end);
		smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
		if (start_page == start)
			goto done;
		end = start_page;
	}

	start_page = start & PAGE_MASK;
	end_page = (end - 1) & PAGE_MASK;

	if (start_page == end_page) {
		/* the first and last bytes are on the same page */
		flush_icache_page_range(start, end);
	} else if (start_page + 1 == end_page) {
		/* split over two virtually contiguous pages */
		flush_icache_page_range(start, end_page);
		flush_icache_page_range(end_page, end);
	} else {
		/* more than 2 pages; just flush the entire cache */
		mn10300_dcache_flush();
		mn10300_icache_inv();
		smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
	}

done:
	smp_unlock_cache(flags);
}
void flush_icache_range(unsigned long start, unsigned long end)
{
	unsigned long start_page, end_page;
	unsigned long flags;

	flags = smp_lock_cache();

	if (end > 0x80000000UL) {
		/*                                                        */
		if (end > 0xa0000000UL) {
			end = 0xa0000000UL;
			if (start >= end)
				goto done;
		}

		/*                                                          
                                                       
              */
		start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
		mn10300_local_dcache_flush_range(start_page, end);
		mn10300_local_icache_inv_range(start_page, end);
		smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
		if (start_page == start)
			goto done;
		end = start_page;
	}

	start_page = start & PAGE_MASK;
	end_page = (end - 1) & PAGE_MASK;

	if (start_page == end_page) {
		/*                                               */
		flush_icache_page_range(start, end);
	} else if (start_page + 1 == end_page) {
		/*                                           */
		flush_icache_page_range(start, end_page);
		flush_icache_page_range(end_page, end);
	} else {
		/*                                                */
		mn10300_dcache_flush();
		mn10300_icache_inv();
		smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
	}

done:
	smp_unlock_cache(flags);
}
Exemplo n.º 3
0
/**
 * mn10300_dcache_inv - Globally invalidate data cache
 *
 * Invalidate the data cache on all CPUs.
 */
void mn10300_dcache_inv(void)
{
	unsigned long flags;

	flags = smp_lock_cache();
	mn10300_local_dcache_inv();
	smp_cache_call(SMP_DCACHE_INV, 0, 0);
	smp_unlock_cache(flags);
}
Exemplo n.º 4
0
/**
 * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
 * @start: The start address of the region to be invalidated.
 * @size: The size of the region to be invalidated.
 *
 * Invalidate a range of addresses in the instruction cache on all CPUs,
 * between start and start+size-1 inclusive.
 */
void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
{
	unsigned long flags;

	flags = smp_lock_cache();
	mn10300_local_icache_inv_range2(start, size);
	smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
	smp_unlock_cache(flags);
}
Exemplo n.º 5
0
/**
 * mn10300_dcache_inv_range - Globally invalidate range of data cache
 * @start: The start address of the region to be invalidated.
 * @end: The end address of the region to be invalidated.
 *
 * Invalidate a range of addresses in the data cache on all CPUs, between start
 * and end-1 inclusive.
 */
void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
{
	unsigned long flags;

	flags = smp_lock_cache();
	mn10300_local_dcache_inv_range(start, end);
	smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
	smp_unlock_cache(flags);
}
Exemplo n.º 6
0
/**
 * mn10300_dcache_flush_range2 - Globally flush range of data cache
 * @start: The start address of the region to be flushed.
 * @size: The size of the region to be flushed.
 *
 * Flush a range of addresses in the data cache on all CPUs, between start and
 * start+size-1 inclusive.
 */
void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
{
	unsigned long flags;

	flags = smp_lock_cache();
	mn10300_local_dcache_flush_range2(start, size);
	smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
	smp_unlock_cache(flags);
}
Exemplo n.º 7
0
/**
 * mn10300_dcache_flush - Globally flush data cache
 *
 * Flush the data cache on all CPUs.
 */
void mn10300_dcache_flush(void)
{
	unsigned long flags;

	flags = smp_lock_cache();
	mn10300_local_dcache_flush();
	smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
	smp_unlock_cache(flags);
}
Exemplo n.º 8
0
/**
 * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
 * @start: The address of the page of memory to be invalidated.
 *
 * Invalidate a range of addresses in the instruction cache on all CPUs
 * covering the page that includes the given address.
 */
void mn10300_icache_inv_page(unsigned long start)
{
	unsigned long flags;

	start &= ~(PAGE_SIZE-1);

	flags = smp_lock_cache();
	mn10300_local_icache_inv_page(start);
	smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
	smp_unlock_cache(flags);
}
Exemplo n.º 9
0
/**
 * mn10300_dcache_flush_page - Globally flush a page of data cache
 * @start: The address of the page of memory to be flushed.
 *
 * Flush a range of addresses in the data cache on all CPUs covering
 * the page that includes the given address.
 */
void mn10300_dcache_flush_page(unsigned long start)
{
	unsigned long flags;

	start &= ~(PAGE_SIZE-1);

	flags = smp_lock_cache();
	mn10300_local_dcache_flush_page(start);
	smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
	smp_unlock_cache(flags);
}
Exemplo n.º 10
0
/**
 * flush_icache_page - Flush a page from the dcache and invalidate the icache
 * @vma: The VMA the page is part of.
 * @page: The page to be flushed.
 *
 * Write a page back from the dcache and invalidate the icache so that we can
 * run code from it that we've just written into it
 */
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	unsigned long start = page_to_phys(page);
	unsigned long flags;

	flags = smp_lock_cache();

	mn10300_local_dcache_flush_page(start);
	mn10300_local_icache_inv_page(start);

	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
	smp_unlock_cache(flags);
}
Exemplo n.º 11
0
/**
 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
 *				single page
 * @start: The starting virtual address of the page part.
 * @end: The ending virtual address of the page part.
 *
 * Flush the dcache and invalidate the icache for part of a single page, as
 * determined by the virtual addresses given.  The page must be in the paged
 * area.
 */
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/* work out how much of the page to flush */
	off = start & ~PAGE_MASK;
	size = end - start;

	/* get the physical address the page is mapped to from the page
	 * tables */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/* flush the dcache and invalidate the icache coverage on that
	 * region */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}
static void flush_icache_page_range(unsigned long start, unsigned long end)
{
	unsigned long addr, size, off;
	struct page *page;
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *ppte, pte;

	/*                                        */
	off = start & ~PAGE_MASK;
	size = end - start;

	/*                                                             
           */
	pgd = pgd_offset(current->mm, start);
	if (!pgd || !pgd_val(*pgd))
		return;

	pud = pud_offset(pgd, start);
	if (!pud || !pud_val(*pud))
		return;

	pmd = pmd_offset(pud, start);
	if (!pmd || !pmd_val(*pmd))
		return;

	ppte = pte_offset_map(pmd, start);
	if (!ppte)
		return;
	pte = *ppte;
	pte_unmap(ppte);

	if (pte_none(pte))
		return;

	page = pte_page(pte);
	if (!page)
		return;

	addr = page_to_phys(page);

	/*                                                            
           */
	mn10300_local_dcache_flush_range2(addr + off, size);
	mn10300_local_icache_inv_range2(addr + off, size);
	smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
}