/* Is address valid for reading? */ static int valid_address(struct KBacktraceIterator *kbt, VirtualAddress address) { HV_PTE *l1_pgtable = kbt->pgtable; HV_PTE *l2_pgtable; unsigned long pfn; HV_PTE pte; struct page *page; if (l1_pgtable == NULL) return 0; /* can't read user space in other tasks */ pte = l1_pgtable[HV_L1_INDEX(address)]; if (!hv_pte_get_present(pte)) return 0; pfn = hv_pte_get_pfn(pte); if (pte_huge(pte)) { if (!pfn_valid(pfn)) { pr_err("huge page has bad pfn %#lx\n", pfn); return 0; } return hv_pte_get_present(pte) && hv_pte_get_readable(pte); } page = pfn_to_page(pfn); if (PageHighMem(page)) { pr_err("L2 page table not in LOWMEM (%#llx)\n", HV_PFN_TO_CPA(pfn)); return 0; } l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); pte = l2_pgtable[HV_L2_INDEX(address)]; return hv_pte_get_present(pte) && hv_pte_get_readable(pte); }
/* Is address valid for reading? */ static int valid_address(struct KBacktraceIterator *kbt, unsigned long address) { HV_PTE *l1_pgtable = kbt->pgtable; HV_PTE *l2_pgtable; unsigned long pfn; HV_PTE pte; struct page *page; if (l1_pgtable == NULL) return 0; /* can't read user space in other tasks */ #ifdef CONFIG_64BIT /* Find the real l1_pgtable by looking in the l0_pgtable. */ pte = l1_pgtable[HV_L0_INDEX(address)]; if (!hv_pte_get_present(pte)) return 0; pfn = hv_pte_get_pfn(pte); if (pte_huge(pte)) { if (!pfn_valid(pfn)) { pr_err("L0 huge page has bad pfn %#lx\n", pfn); return 0; } return hv_pte_get_present(pte) && hv_pte_get_readable(pte); } page = pfn_to_page(pfn); BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */ l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); #endif pte = l1_pgtable[HV_L1_INDEX(address)]; if (!hv_pte_get_present(pte)) return 0; pfn = hv_pte_get_pfn(pte); if (pte_huge(pte)) { if (!pfn_valid(pfn)) { pr_err("huge page has bad pfn %#lx\n", pfn); return 0; } return hv_pte_get_present(pte) && hv_pte_get_readable(pte); } page = pfn_to_page(pfn); if (PageHighMem(page)) { pr_err("L2 page table not in LOWMEM (%#llx)\n", HV_PFN_TO_CPA(pfn)); return 0; } l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn); pte = l2_pgtable[HV_L2_INDEX(address)]; return hv_pte_get_present(pte) && hv_pte_get_readable(pte); }
/* * Identify large copies from remotely-cached memory, and copy them * via memcpy_multicache() if they look good, otherwise fall back * to the particular kind of copying passed as the memcpy_t function. */ static unsigned long fast_copy(void *dest, const void *source, int len, memcpy_t func) { /* * Check if it's big enough to bother with. We may end up doing a * small copy via TLB manipulation if we're near a page boundary, * but presumably we'll make it up when we hit the second page. */ while (len >= LARGE_COPY_CUTOFF) { int copy_size, bytes_left_on_page; pte_t *src_ptep, *dst_ptep; pte_t src_pte, dst_pte; struct page *src_page, *dst_page; /* Is the source page oloc'ed to a remote cpu? */ retry_source: src_ptep = virt_to_pte(current->mm, (unsigned long)source); if (src_ptep == NULL) break; src_pte = *src_ptep; if (!hv_pte_get_present(src_pte) || !hv_pte_get_readable(src_pte) || hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) break; if (get_remote_cache_cpu(src_pte) == smp_processor_id()) break; src_page = pfn_to_page(pte_pfn(src_pte)); get_page(src_page); if (pte_val(src_pte) != pte_val(*src_ptep)) { put_page(src_page); goto retry_source; } if (pte_huge(src_pte)) { /* Adjust the PTE to correspond to a small page */ int pfn = pte_pfn(src_pte); pfn += (((unsigned long)source & (HPAGE_SIZE-1)) >> PAGE_SHIFT); src_pte = pfn_pte(pfn, src_pte); src_pte = pte_mksmall(src_pte); } /* Is the destination page writable? */ retry_dest: dst_ptep = virt_to_pte(current->mm, (unsigned long)dest); if (dst_ptep == NULL) { put_page(src_page); break; } dst_pte = *dst_ptep; if (!hv_pte_get_present(dst_pte) || !hv_pte_get_writable(dst_pte)) { put_page(src_page); break; } dst_page = pfn_to_page(pte_pfn(dst_pte)); if (dst_page == src_page) { /* * Source and dest are on the same page; this * potentially exposes us to incoherence if any * part of src and dest overlap on a cache line. * Just give up rather than trying to be precise. */ put_page(src_page); break; } get_page(dst_page); if (pte_val(dst_pte) != pte_val(*dst_ptep)) { put_page(dst_page); goto retry_dest; } if (pte_huge(dst_pte)) { /* Adjust the PTE to correspond to a small page */ int pfn = pte_pfn(dst_pte); pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) >> PAGE_SHIFT); dst_pte = pfn_pte(pfn, dst_pte); dst_pte = pte_mksmall(dst_pte); } /* All looks good: create a cachable PTE and copy from it */ copy_size = len; bytes_left_on_page = PAGE_SIZE - (((int)source) & (PAGE_SIZE-1)); if (copy_size > bytes_left_on_page) copy_size = bytes_left_on_page; bytes_left_on_page = PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1)); if (copy_size > bytes_left_on_page) copy_size = bytes_left_on_page; memcpy_multicache(dest, source, dst_pte, src_pte, copy_size); /* Release the pages */ put_page(dst_page); put_page(src_page); /* Continue on the next page */ dest += copy_size; source += copy_size; len -= copy_size; } return func(dest, source, len); }
static unsigned long fast_copy(void *dest, const void *source, int len, memcpy_t func) { /* */ while (len >= LARGE_COPY_CUTOFF) { int copy_size, bytes_left_on_page; pte_t *src_ptep, *dst_ptep; pte_t src_pte, dst_pte; struct page *src_page, *dst_page; /* */ retry_source: src_ptep = virt_to_pte(current->mm, (unsigned long)source); if (src_ptep == NULL) break; src_pte = *src_ptep; if (!hv_pte_get_present(src_pte) || !hv_pte_get_readable(src_pte) || hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3) break; if (get_remote_cache_cpu(src_pte) == smp_processor_id()) break; src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); get_page(src_page); if (pte_val(src_pte) != pte_val(*src_ptep)) { put_page(src_page); goto retry_source; } if (pte_huge(src_pte)) { /* */ int pfn = hv_pte_get_pfn(src_pte); pfn += (((unsigned long)source & (HPAGE_SIZE-1)) >> PAGE_SHIFT); src_pte = pfn_pte(pfn, src_pte); src_pte = pte_mksmall(src_pte); } /* */ retry_dest: dst_ptep = virt_to_pte(current->mm, (unsigned long)dest); if (dst_ptep == NULL) { put_page(src_page); break; } dst_pte = *dst_ptep; if (!hv_pte_get_present(dst_pte) || !hv_pte_get_writable(dst_pte)) { put_page(src_page); break; } dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); if (dst_page == src_page) { /* */ put_page(src_page); break; } get_page(dst_page); if (pte_val(dst_pte) != pte_val(*dst_ptep)) { put_page(dst_page); goto retry_dest; } if (pte_huge(dst_pte)) { /* */ int pfn = hv_pte_get_pfn(dst_pte); pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) >> PAGE_SHIFT); dst_pte = pfn_pte(pfn, dst_pte); dst_pte = pte_mksmall(dst_pte); } /* */ copy_size = len; bytes_left_on_page = PAGE_SIZE - (((int)source) & (PAGE_SIZE-1)); if (copy_size > bytes_left_on_page) copy_size = bytes_left_on_page; bytes_left_on_page = PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1)); if (copy_size > bytes_left_on_page) copy_size = bytes_left_on_page; memcpy_multicache(dest, source, dst_pte, src_pte, copy_size); /* */ put_page(dst_page); put_page(src_page); /* */ dest += copy_size; source += copy_size; len -= copy_size; } return func(dest, source, len); }