Beispiel #1
0
bt_paddr_t bt_mmu_extract(bt_pgd_t pgd_h, bt_vaddr_t virt, BT_u32 size) {
	bt_pte_t pte;
	bt_vaddr_t start, end, pg;
	bt_paddr_t pa;
	bt_pgd_t pgd = GET_PGD(pgd_h);


	start = BT_PAGE_TRUNC(virt);
	end = BT_PAGE_TRUNC(virt+size-1);

	// Check all pages exist.
	for(pg = start; pg <= end; pg += BT_PAGE_SIZE) {
		if(!pte_present(pgd, pg)) {
			return 0;
		}

		pte = virt_to_pte(pgd, pg);
		if(!page_present(pte, pg)) {
			return 0;
		}
	}

	pte = virt_to_pte(pgd, start);
	pa = (bt_paddr_t) pte_to_phys(pte, start);

	return pa + (bt_paddr_t) (virt - start);
}
void arch_exit_mmap(struct mm_struct *mm)
{
	pte_t *pte;

	pte = virt_to_pte(mm, STUB_CODE);
	if (pte != NULL)
		pte_clear(mm, STUB_CODE, pte);

	pte = virt_to_pte(mm, STUB_DATA);
	if (pte == NULL)
		return;

	pte_clear(mm, STUB_DATA, pte);
}
static pte_t *maybe_map(unsigned long virt, int is_write)
{
	pte_t *pte = virt_to_pte(current->mm, virt);
	int err, dummy_code;

	if ((pte == NULL) || !pte_present(*pte) ||
	    (is_write && !pte_write(*pte))) {
		err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
		if (err)
			return NULL;
		pte = virt_to_pte(current->mm, virt);
	}
	if (!pte_present(*pte))
		pte = NULL;

	return pte;
}
Beispiel #4
0
int page_home(struct page *page)
{
	if (PageHighMem(page)) {
		return initial_page_home();
	} else {
		unsigned long kva = (unsigned long)page_address(page);
		return pte_to_home(*virt_to_pte(NULL, kva));
	}
}
Beispiel #5
0
/*
 * The __w1data area holds data that is only written during initialization,
 * and is read-only and thus freely cacheable thereafter.  Fix the page
 * table entries that cover that region accordingly.
 */
static void mark_w1data_ro(void)
{
	/* Loop over page table entries */
	unsigned long addr = (unsigned long)__w1data_begin;
	BUG_ON((addr & (PAGE_SIZE-1)) != 0);
	for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
		unsigned long pfn = kaddr_to_pfn((void *)addr);
		pte_t *ptep = virt_to_pte(NULL, addr);
		BUG_ON(pte_huge(*ptep));   /* not relevant for kdata_huge */
		set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
	}
}
Beispiel #6
0
static void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
#ifdef CONFIG_HOMECACHE
    int home = initial_heap_home();
#endif
    unsigned long addr = (unsigned long) begin;

    if (kdata_huge && !initfree) {
        pr_warning("Warning: ignoring initfree=0:"
                   " incompatible with kdata=huge\n");
        initfree = 1;
    }
    end = (end + PAGE_SIZE - 1) & PAGE_MASK;
    local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
    for (addr = begin; addr < end; addr += PAGE_SIZE) {
        /*
         * Note we just reset the home here directly in the
         * page table.  We know this is safe because our caller
         * just flushed the caches on all the other cpus,
         * and they won't be touching any of these pages.
         */
        int pfn = kaddr_to_pfn((void *)addr);
        struct page *page = pfn_to_page(pfn);
        pte_t *ptep = virt_to_pte(NULL, addr);
        if (!initfree) {
            /*
             * If debugging page accesses then do not free
             * this memory but mark them not present - any
             * buggy init-section access will create a
             * kernel page fault:
             */
            pte_clear(&init_mm, addr, ptep);
            continue;
        }
#ifdef CONFIG_HOMECACHE
        set_page_home(page, home);
        __clear_bit(PG_homecache_nomigrate, &page->flags);
#endif
        __ClearPageReserved(page);
        init_page_count(page);
        if (pte_huge(*ptep))
            BUG_ON(!kdata_huge);
        else
            set_pte_at(&init_mm, addr, ptep,
                       pfn_pte(pfn, PAGE_KERNEL));
        memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
        free_page(addr);
        totalram_pages++;
    }
    pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
}
Beispiel #7
0
// Return true (!= 0) if any referenced bits are set.
static int ref_bits_set (int exclude_irqhandler) {
    void *cur_addr;
    pte_t *pte;
    int i;
    int ret_val = 0;

    for (i = 0; i < cr_num_drivers; i++) {
        if (exclude_irqhandler) uprintk ("i %d: ", i);
        for (cur_addr = cr_base_address[i];
             cur_addr < cr_base_address[i] + cr_module_size[i];
             cur_addr += PAGE_SIZE) {
            
            pte = virt_to_pte (cur_addr);
            if (pte != NULL) {
                // See if we're excluding the interrupt handler
                // from this check.
                if (exclude_irqhandler &&
                    addr_contains_irq_handler (cur_addr)) {
                    pte_unmap(pte);
                    if (exclude_irqhandler) uprintk ("X");
                    continue;
                }

                // See if the page was referenced lately.
                if (pte_young(*pte) != 0) {
                    // kunmap_atomic (page, KM_IRQ1);
                    pte_unmap(pte);
                    if (exclude_irqhandler) uprintk ("1");
                    ret_val = 1;
                    continue;
                }

                if (exclude_irqhandler) uprintk ("0");
                
                // kunmap_atomic (page, KM_IRQ1);
                pte_unmap(pte);
            }
        }

        if (exclude_irqhandler) uprintk ("\n");
    }

    return ret_val;
}
Beispiel #8
0
void homecache_change_page_home(struct page *page, int order, int home)
{
	int i, pages = (1 << order);
	unsigned long kva;

	BUG_ON(PageHighMem(page));
	BUG_ON(page_count(page) > 1);
	BUG_ON(page_mapcount(page) != 0);
	kva = (unsigned long) page_address(page);
	flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
		     kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
		     NULL, 0);

	for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
		pte_t *ptep = virt_to_pte(NULL, kva);
		pte_t pteval = *ptep;
		BUG_ON(!pte_present(pteval) || pte_huge(pteval));
		*ptep = pte_set_home(pteval, home);
	}
}
Beispiel #9
0
// Reset all referenced bits to 0.
static void clear_ref_bits (void) {
    void *cur_addr;
    pte_t *pte;
    int i;

    for (i = 0; i < cr_num_drivers; i++) {
        for (cur_addr = cr_base_address[i];
             cur_addr < cr_base_address[i] + cr_module_size[i];
             cur_addr += PAGE_SIZE) {
            
            pte = virt_to_pte (cur_addr);
            if (pte != NULL) {
                *pte = pte_mkold(*pte);
                // kunmap_atomic (page, KM_IRQ1);
                pte_unmap(pte);
            }
        }
    }
    
    global_flush_tlb();
}
Beispiel #10
0
/*
 * Walk the kernel page tables and derive the page_home() from
 * the PTEs, so that set_pte() can properly validate the caching
 * of all PTEs it sees.
 */
void __init set_page_homes(void)
{
#ifdef CONFIG_HOMECACHE
    struct zone *zone;
    int home = initial_heap_home();
    unsigned long address;

    /*
     * First walk the zones and set the pages to all have
     * the default heap caching.
     */
    for_each_zone(zone) {
        unsigned long pfn = zone->zone_start_pfn;
        unsigned long end_pfn = pfn + zone->spanned_pages;
        struct page *page = pfn_to_page(pfn);
        for (; pfn < end_pfn; ++pfn, ++page)
            set_page_home(page, home);
    }

    /*
     * Now walk through the loaded pages, update the page homecache,
     * and mark all pages as non-migrateable.  (Init pages that
     * are freed back to the heap are unmarked when we free them.)
     */
    for (address = PAGE_OFFSET; address < (unsigned long) _end;
            address += PAGE_SIZE) {
        enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
        struct page *pg = virt_to_page((void *)address);
        pte_t pte = *virt_to_pte(NULL, address);

        /* Adjust page.home on all loaded pages. */
        BUG_ON(!pte_present(pte));
        set_page_home(pg, get_page_home(pte));
        __SetPageHomecacheNomigrate(pg);
    }
#endif
}
Beispiel #11
0
/*
 * Identify large copies from remotely-cached memory, and copy them
 * via memcpy_multicache() if they look good, otherwise fall back
 * to the particular kind of copying passed as the memcpy_t function.
 */
static unsigned long fast_copy(void *dest, const void *source, int len,
			       memcpy_t func)
{
	/*
	 * Check if it's big enough to bother with.  We may end up doing a
	 * small copy via TLB manipulation if we're near a page boundary,
	 * but presumably we'll make it up when we hit the second page.
	 */
	while (len >= LARGE_COPY_CUTOFF) {
		int copy_size, bytes_left_on_page;
		pte_t *src_ptep, *dst_ptep;
		pte_t src_pte, dst_pte;
		struct page *src_page, *dst_page;

		/* Is the source page oloc'ed to a remote cpu? */
retry_source:
		src_ptep = virt_to_pte(current->mm, (unsigned long)source);
		if (src_ptep == NULL)
			break;
		src_pte = *src_ptep;
		if (!hv_pte_get_present(src_pte) ||
		    !hv_pte_get_readable(src_pte) ||
		    hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
			break;
		if (get_remote_cache_cpu(src_pte) == smp_processor_id())
			break;
		src_page = pfn_to_page(pte_pfn(src_pte));
		get_page(src_page);
		if (pte_val(src_pte) != pte_val(*src_ptep)) {
			put_page(src_page);
			goto retry_source;
		}
		if (pte_huge(src_pte)) {
			/* Adjust the PTE to correspond to a small page */
			int pfn = pte_pfn(src_pte);
			pfn += (((unsigned long)source & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			src_pte = pfn_pte(pfn, src_pte);
			src_pte = pte_mksmall(src_pte);
		}

		/* Is the destination page writable? */
retry_dest:
		dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
		if (dst_ptep == NULL) {
			put_page(src_page);
			break;
		}
		dst_pte = *dst_ptep;
		if (!hv_pte_get_present(dst_pte) ||
		    !hv_pte_get_writable(dst_pte)) {
			put_page(src_page);
			break;
		}
		dst_page = pfn_to_page(pte_pfn(dst_pte));
		if (dst_page == src_page) {
			/*
			 * Source and dest are on the same page; this
			 * potentially exposes us to incoherence if any
			 * part of src and dest overlap on a cache line.
			 * Just give up rather than trying to be precise.
			 */
			put_page(src_page);
			break;
		}
		get_page(dst_page);
		if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
			put_page(dst_page);
			goto retry_dest;
		}
		if (pte_huge(dst_pte)) {
			/* Adjust the PTE to correspond to a small page */
			int pfn = pte_pfn(dst_pte);
			pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			dst_pte = pfn_pte(pfn, dst_pte);
			dst_pte = pte_mksmall(dst_pte);
		}

		/* All looks good: create a cachable PTE and copy from it */
		copy_size = len;
		bytes_left_on_page =
			PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		bytes_left_on_page =
			PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);

		/* Release the pages */
		put_page(dst_page);
		put_page(src_page);

		/* Continue on the next page */
		dest += copy_size;
		source += copy_size;
		len -= copy_size;
	}

	return func(dest, source, len);
}
static unsigned long fast_copy(void *dest, const void *source, int len,
			       memcpy_t func)
{
	/*
                                                                   
                                                                  
                                                                
  */
	while (len >= LARGE_COPY_CUTOFF) {
		int copy_size, bytes_left_on_page;
		pte_t *src_ptep, *dst_ptep;
		pte_t src_pte, dst_pte;
		struct page *src_page, *dst_page;

		/*                                             */
retry_source:
		src_ptep = virt_to_pte(current->mm, (unsigned long)source);
		if (src_ptep == NULL)
			break;
		src_pte = *src_ptep;
		if (!hv_pte_get_present(src_pte) ||
		    !hv_pte_get_readable(src_pte) ||
		    hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
			break;
		if (get_remote_cache_cpu(src_pte) == smp_processor_id())
			break;
		src_page = pfn_to_page(hv_pte_get_pfn(src_pte));
		get_page(src_page);
		if (pte_val(src_pte) != pte_val(*src_ptep)) {
			put_page(src_page);
			goto retry_source;
		}
		if (pte_huge(src_pte)) {
			/*                                              */
			int pfn = hv_pte_get_pfn(src_pte);
			pfn += (((unsigned long)source & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			src_pte = pfn_pte(pfn, src_pte);
			src_pte = pte_mksmall(src_pte);
		}

		/*                                   */
retry_dest:
		dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
		if (dst_ptep == NULL) {
			put_page(src_page);
			break;
		}
		dst_pte = *dst_ptep;
		if (!hv_pte_get_present(dst_pte) ||
		    !hv_pte_get_writable(dst_pte)) {
			put_page(src_page);
			break;
		}
		dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte));
		if (dst_page == src_page) {
			/*
                                                
                                                  
                                                   
                                                    
    */
			put_page(src_page);
			break;
		}
		get_page(dst_page);
		if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
			put_page(dst_page);
			goto retry_dest;
		}
		if (pte_huge(dst_pte)) {
			/*                                              */
			int pfn = hv_pte_get_pfn(dst_pte);
			pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
				>> PAGE_SHIFT);
			dst_pte = pfn_pte(pfn, dst_pte);
			dst_pte = pte_mksmall(dst_pte);
		}

		/*                                                        */
		copy_size = len;
		bytes_left_on_page =
			PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		bytes_left_on_page =
			PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
		if (copy_size > bytes_left_on_page)
			copy_size = bytes_left_on_page;
		memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);

		/*                   */
		put_page(dst_page);
		put_page(src_page);

		/*                           */
		dest += copy_size;
		source += copy_size;
		len -= copy_size;
	}

	return func(dest, source, len);
}
Beispiel #13
0
int bt_mmu_map(bt_pgd_t pgd_h, bt_paddr_t pa, bt_vaddr_t va, BT_u32 size, int type) {
	BT_u32 flag = 0;
	bt_pte_t pte;
	bt_paddr_t pg;
	BT_u32 ng = 0;
	bt_pgd_t pgd = GET_PGD(pgd_h);

	if((va + size) < 0xC0000000) {
		ng = MMU_PTE_NG;
	}

	pa = BT_PAGE_TRUNC(pa);		// Ensure correct alignments.
	va = BT_PAGE_TRUNC(va);
	size = BT_PAGE_ALIGN(size);

	switch(type) {				// Build up the ARM MMU flags from BT page types.
	case BT_PAGE_UNMAP:
		flag = 0;
		break;

	case BT_PAGE_READ:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RO);
		break;

	case BT_PAGE_WRITE:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_USER_RW);
		break;

	case BT_PAGE_SYSTEM:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_WBUF | MMU_PTE_CACHE | MMU_PTE_SYSTEM);
		break;

	case BT_PAGE_IOMEM:
		flag = (BT_u32) (MMU_PTE_PRESENT | MMU_PTE_SYSTEM);
		break;

	default:
		//do_kernel_panic("bt_mmu_map");
		return -1;
		break;
	}

	bt_mmu_flush_tlb();

	while(size > 0) {
		if(pte_present(pgd, va)) {
			pte = virt_to_pte(pgd, va);		// Get the page table from PGD.
		} else {
			// If its a section or super-section then return an error! - (Kernel coherent pool?).
			pg = (bt_paddr_t) BT_CacheAlloc(&g_ptCache);
			if(!pg) {
				return -1;
			}

			memset((void *)pg, 0, MMU_L2TBL_SIZE);
			pte = (bt_pte_t) pg;
			pgd[PAGE_DIR(va)] = (BT_u32) bt_virt_to_phys(pte) | MMU_PDE_PRESENT;
		}

		pte[PAGE_TABLE(va)] = (BT_u32) pa | flag | ng;

		pa += BT_PAGE_SIZE;
		va += BT_PAGE_SIZE;
		size -= BT_PAGE_SIZE;
	}

	bt_mmu_flush_tlb();

	return 0;
}