/* We don`t use ordinary `setup_arg_pages()' because svr3 has another stack start address. */ static unsigned long svr3_setup_arg_pages (unsigned long p, struct linux_binprm * bprm) { unsigned long stack_base; struct vm_area_struct *mpnt; int i; stack_base = SVR3_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; p += stack_base; mpnt = (struct vm_area_struct *) kmalloc (sizeof (*mpnt), GFP_KERNEL); if (mpnt) { mpnt->vm_mm = current->mm; mpnt->vm_start = PAGE_MASK & (unsigned long) p; mpnt->vm_end = SVR3_STACK_TOP; mpnt->vm_page_prot = PAGE_COPY; mpnt->vm_flags = VM_STACK_FLAGS; mpnt->vm_ops = NULL; mpnt->vm_offset = 0; mpnt->vm_inode = NULL; mpnt->vm_pte = 0; insert_vm_struct (current->mm, mpnt); current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { if (bprm->page[i]) { current->mm->rss++; put_dirty_page(current,bprm->page[i],stack_base); } stack_base += PAGE_SIZE; } return p; }
int ia32_setup_arg_pages(struct linux_binprm *bprm) { unsigned long stack_base; struct vm_area_struct *mpnt; int i, ret; stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; bprm->p += stack_base; if (bprm->loader) bprm->loader += stack_base; bprm->exec += stack_base; mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!mpnt) return -ENOMEM; down_write(¤t->mm->mmap_sem); { mpnt->vm_mm = current->mm; mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p; mpnt->vm_end = IA32_STACK_TOP; mpnt->vm_flags = vm_stack_flags32; mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ? PAGE_COPY_EXEC : PAGE_COPY; mpnt->vm_ops = NULL; mpnt->vm_pgoff = 0; mpnt->vm_file = NULL; mpnt->vm_private_data = (void *) 0; if ((ret = insert_vm_struct(current->mm, mpnt))) { up_write(¤t->mm->mmap_sem); kmem_cache_free(vm_area_cachep, mpnt); return ret; } current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { struct page *page = bprm->page[i]; if (page) { bprm->page[i] = NULL; current->mm->rss++; put_dirty_page(current,page,stack_base); } stack_base += PAGE_SIZE; } up_write(¤t->mm->mmap_sem); return 0; }
unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm) { unsigned long stack_base; struct vm_area_struct *mpnt; int i; stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; p += stack_base; if (bprm->loader) bprm->loader += stack_base; bprm->exec += stack_base; mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL); if (mpnt) { mpnt->vm_mm = current->mm; mpnt->vm_start = PAGE_MASK & (unsigned long) p; mpnt->vm_end = STACK_TOP; mpnt->vm_page_prot = PAGE_COPY; mpnt->vm_flags = VM_STACK_FLAGS; mpnt->vm_ops = NULL; mpnt->vm_offset = 0; mpnt->vm_inode = NULL; mpnt->vm_pte = 0; insert_vm_struct(current->mm, mpnt); current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; for (i = 0 ; i < MAX_ARG_PAGES ; i++) { if (bprm->page[i]) { current->mm->rss++; put_dirty_page(current,bprm->page[i],stack_base); } stack_base += PAGE_SIZE; } } else { /* * This one is tricky. We are already in the new context, so we cannot * return with -ENOMEM. So we _have_ to deallocate argument pages here, * if there is no VMA, they wont be freed at exit_mmap() -> memory leak. * * User space then gets a SIGSEGV when it tries to access argument pages. */ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
int ia32_setup_arg_pages(struct linux_binprm *bprm) { unsigned long stack_base; struct _rde *mpnt; int i, ret; #define IA32_STACK_TOP1 STACK_TOP #define IA32_STACK_TOP1 0x7ffffffffffe0000 stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE; bprm->p += stack_base; if (bprm->loader) bprm->loader += stack_base; bprm->exec += stack_base; mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); if (!mpnt) return -ENOMEM; down_write(¤t->mm->mmap_sem); { mpnt->rde$pq_start_va = PAGE_MASK & (unsigned long) bprm->p; mpnt->rde$q_region_size = IA32_STACK_TOP1 - (unsigned long) mpnt->rde$pq_start_va; mpnt->rde$r_regprot.regprt$l_region_prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED;//PAGE_COPY; mpnt->rde$l_flags = vm_stack_flags32; insrde(mpnt,¤t->pcb$l_phd->phd$ps_p0_va_list_flink); //flush_tlb_range(current->mm, mpnt->rde$pq_start_va, mpnt->rde$pq_start_va + PAGE_SIZE); } for (i = 0 ; i < MAX_ARG_PAGES ; i++) { struct page *page = bprm->page[i]; if (page) { bprm->page[i] = NULL; current->mm->rss++; put_dirty_page(current,page,stack_base); } stack_base += PAGE_SIZE; } up_write(¤t->mm->mmap_sem); return 0; }