int exec_setup_stack(struct lwp *l, struct exec_package *epp) { u_long max_stack_size; u_long access_linear_min, access_size; u_long noaccess_linear_min, noaccess_size; #ifndef USRSTACK32 #define USRSTACK32 (0x00000000ffffffffL&~PGOFSET) #endif if (epp->ep_flags & EXEC_32) { epp->ep_minsaddr = USRSTACK32; max_stack_size = MAXSSIZ; } else { epp->ep_minsaddr = USRSTACK; max_stack_size = MAXSSIZ; } epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur; #ifdef PAX_ASLR pax_aslr_stack(l, epp, &max_stack_size); #endif /* PAX_ASLR */ l->l_proc->p_stackbase = epp->ep_minsaddr; epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr, max_stack_size); /* * set up commands for stack. note that this takes *two*, one to * map the part of the stack which we can access, and one to map * the part which we can't. * * arguably, it could be made into one, but that would require the * addition of another mapping proc, which is unnecessary */ access_size = epp->ep_ssize; access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size); noaccess_size = max_stack_size - access_size; noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr, access_size), noaccess_size); if (noaccess_size > 0 && noaccess_size <= MAXSSIZ) { NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size, noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK); } KASSERT(access_size > 0 && access_size <= MAXSSIZ); NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size, access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE, VMCMD_STACK); return 0; }
int darwin_exec_setup_stack(struct lwp *l, struct exec_package *epp) { u_long max_stack_size; u_long access_linear_min, access_size; u_long noaccess_linear_min, noaccess_size; if (epp->ep_flags & EXEC_32) { epp->ep_minsaddr = DARWIN_USRSTACK32; max_stack_size = MAXSSIZ; } else { epp->ep_minsaddr = DARWIN_USRSTACK; max_stack_size = MAXSSIZ; } epp->ep_maxsaddr = (u_long)STACK_GROW(epp->ep_minsaddr, max_stack_size); epp->ep_ssize = l->l_proc->p_rlimit[RLIMIT_STACK].rlim_cur; /* * set up commands for stack. note that this takes *two*, one to * map the part of the stack which we can access, and one to map * the part which we can't. * * arguably, it could be made into one, but that would require the * addition of another mapping proc, which is unnecessary */ access_size = epp->ep_ssize; access_linear_min = (u_long)STACK_ALLOC(epp->ep_minsaddr, access_size); noaccess_size = max_stack_size - access_size; noaccess_linear_min = (u_long)STACK_ALLOC(STACK_GROW(epp->ep_minsaddr, access_size), noaccess_size); if (noaccess_size > 0) { NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, noaccess_size, noaccess_linear_min, NULL, 0, VM_PROT_NONE, VMCMD_STACK); } KASSERT(access_size > 0); NEW_VMCMD2(&epp->ep_vmcmds, vmcmd_map_zero, access_size, access_linear_min, NULL, 0, VM_PROT_READ | VM_PROT_WRITE, VMCMD_STACK); return 0; }
/* * Load a psection at the appropriate address */ void ELFNAME(load_psection)(struct exec_vmcmd_set *vcset, struct vnode *vp, Elf_Phdr *ph, Elf_Addr *addr, Elf_Addr *size, int *prot, int flags) { u_long uaddr, msize, lsize, psize, rm, rf; long diff, offset, bdiff; Elf_Addr base; /* * If the user specified an address, then we load there. */ if (*addr != ELFDEFNNAME(NO_ADDR)) { if (ph->p_align > 1) { *addr = ELF_TRUNC(*addr, ph->p_align); diff = ph->p_vaddr - ELF_TRUNC(ph->p_vaddr, ph->p_align); /* page align vaddr */ base = *addr + trunc_page(ph->p_vaddr) - ELF_TRUNC(ph->p_vaddr, ph->p_align); bdiff = ph->p_vaddr - trunc_page(ph->p_vaddr); } else diff = 0; } else { *addr = uaddr = ph->p_vaddr; if (ph->p_align > 1) *addr = ELF_TRUNC(uaddr, ph->p_align); base = trunc_page(uaddr); bdiff = uaddr - base; diff = uaddr - *addr; } *prot |= (ph->p_flags & PF_R) ? VM_PROT_READ : 0; *prot |= (ph->p_flags & PF_W) ? VM_PROT_WRITE : 0; *prot |= (ph->p_flags & PF_X) ? VM_PROT_EXECUTE : 0; msize = ph->p_memsz + diff; offset = ph->p_offset - bdiff; lsize = ph->p_filesz + bdiff; psize = round_page(lsize); /* * Because the pagedvn pager can't handle zero fill of the last * data page if it's not page aligned we map the last page readvn. */ if (ph->p_flags & PF_W) { psize = trunc_page(lsize); if (psize > 0) NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset, *prot, flags); if (psize != lsize) { NEW_VMCMD2(vcset, vmcmd_map_readvn, lsize - psize, base + psize, vp, offset + psize, *prot, flags); } } else { NEW_VMCMD2(vcset, vmcmd_map_pagedvn, psize, base, vp, offset, *prot, flags); } /* * Check if we need to extend the size of the segment */ rm = round_page(*addr + ph->p_memsz + diff); rf = round_page(*addr + ph->p_filesz + diff); if (rm != rf) { NEW_VMCMD2(vcset, vmcmd_map_zero, rm - rf, rf, NULLVP, 0, *prot, flags); } *size = msize; }