void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
				   pte_t *pteptr, pte_t pteval)
{
	pmd_t *pmdp = (pmd_t *) pteptr;
	pte_t shadow_pteval = pteval;
	unsigned long mask;

	if (!MACHINE_HAS_HPAGE) {
		pteptr = (pte_t *) pte_page(pteval)[1].index;
		mask = pte_val(pteval) &
				(_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
		pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
		if (mm->context.noexec) {
			pteptr += PTRS_PER_PTE;
			pte_val(shadow_pteval) =
					(_SEGMENT_ENTRY + __pa(pteptr)) | mask;
		}
	}

	pmd_val(*pmdp) = pte_val(pteval);
	if (mm->context.noexec) {
		pmdp = get_shadow_table(pmdp);
		pmd_val(*pmdp) = pte_val(shadow_pteval);
	}
}
Ejemplo n.º 2
0
void crst_table_free(unsigned long *table)
{
	unsigned long *shadow = get_shadow_table(table);

	if (shadow)
		free_pages((unsigned long) shadow, ALLOC_ORDER);
	free_pages((unsigned long) table, ALLOC_ORDER);
}
Ejemplo n.º 3
0
void crst_table_free(struct mm_struct *mm, unsigned long *table)
{
	unsigned long *shadow = get_shadow_table(table);
	struct page *page = virt_to_page(table);

	spin_lock(&mm->page_table_lock);
	list_del(&page->lru);
	spin_unlock(&mm->page_table_lock);
	if (shadow)
		free_pages((unsigned long) shadow, ALLOC_ORDER);
	free_pages((unsigned long) table, ALLOC_ORDER);
}