/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; #ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) mm->mmap_base += mm->delta_mmap; #endif mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); #ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) mm->mmap_base -= mm->delta_mmap + mm->delta_stack; #endif mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base, unsigned long random_factor, unsigned long task_size) { *legacy_base = mmap_legacy_base(random_factor, task_size); if (mmap_is_legacy()) *base = *legacy_base; else *base = mmap_base(random_factor, task_size); }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { if (mmap_is_legacy()) { mm->mmap_base = mmap_legacy_base(); mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { if (!(2 & exec_shield) && mmap_is_legacy()) { mm->mmap_base = mmap_legacy_base(); mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; if (!(current->personality & READ_IMPLIES_EXEC) && mmap_is_ia32()) mm->get_unmapped_exec_area = arch_get_unmapped_exec_area; mm->unmap_area = arch_unmap_area_topdown; } }
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); mm->mmap_legacy_base = mmap_legacy_base(random_factor); if (mmap_is_legacy()) { mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { if (mmap_is_legacy()) { mm->mmap_base = mmap_legacy_base(); mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; #ifdef CONFIG_X86_32 if (!(current->personality & READ_IMPLIES_EXEC) && !(__supported_pte_mask & _PAGE_NX) && mmap_is_ia32()) mm->get_unmapped_exec_area = arch_get_unmapped_exec_area; #endif mm->unmap_area = arch_unmap_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ if (sysctl_legacy_va_layout || (current->personality & ADDR_COMPAT_LAYOUT) || current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }
void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) random_factor = (get_random_int() % (1 << mmap_rnd_bits)) << PAGE_SHIFT; if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) random_factor = arch_mmap_rnd(); /* * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; if (current->flags & PF_RANDOMIZE) { random_factor = get_random_int(); random_factor = random_factor << PAGE_SHIFT; if (TASK_IS_32BIT_ADDR) random_factor &= 0xfffffful; else random_factor &= 0xffffffful; } if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; #ifdef CONFIG_PAX_RANDMMAP if (!(mm->pax_flags & MF_PAX_RANDMMAP)) #endif if (current->flags & PF_RANDOMIZE) { random_factor = get_random_int(); random_factor = random_factor << PAGE_SHIFT; if (TASK_IS_32BIT_ADDR) random_factor &= 0xfffffful; else random_factor &= 0xffffffful; } if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; #ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) mm->mmap_base += mm->delta_mmap; #endif mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); #ifdef CONFIG_PAX_RANDMMAP if (mm->pax_flags & MF_PAX_RANDMMAP) mm->mmap_base -= mm->delta_mmap + mm->delta_stack; #endif mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use: */ void arch_pick_mmap_layout(struct mm_struct *mm) { #if !defined(__tilegx__) int is_32bit = 1; #elif defined(CONFIG_COMPAT) int is_32bit = is_compat_task(); #else int is_32bit = 0; #endif unsigned long random_factor = 0UL; /* * 8 bits of randomness in 32bit mmaps, 24 address space bits * 12 bits of randomness in 64bit mmaps, 28 address space bits */ if (current->flags & PF_RANDOMIZE) { if (is_32bit) random_factor = get_random_int() % (1<<8); else random_factor = get_random_int() % (1<<12); random_factor <<= PAGE_SHIFT; } /* * Use standard layout if the expected stack growth is unlimited * or we are running native 64 bits. */ if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } }
void arch_pick_mmap_layout(struct mm_struct *mm) { #if !defined(__tilegx__) int is_32bit = 1; #elif defined(CONFIG_COMPAT) int is_32bit = is_compat_task(); #else int is_32bit = 0; #endif /* */ if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; mm->unmap_area = arch_unmap_area_topdown; } }