/* * merge the list of memory segments if possible. Redundant vm_area_structs are freed. This assumes that the list is ordered by address. * we don't need to traverse the entire list, only those segments which intersect or are adjacent to a given interval. */ void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr) { struct vm_area_struct *prev, *mpnt, *next; mpnt = find_vma(task, start_addr); if(!mpnt) return; avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next); if(!prev) { prev = mpnt; mpnt = next; } for(; mpnt && prev->vm_start < end_addr; prev = mpnt, mpnt = next) { next = mpnt->vm_next; if(mpnt->vm_inode != prev->vm_inode) continue; if(mpnt->vm_pte != prev->vm_pte) continue; if(mpnt->vm_ops != prev->vm_ops) continue; if(mpnt->vm_flags != prev->vm_flags) continue; if(mpnt->vm_start != prev->vm_end) continue; if((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM)) { if(prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset) continue; } avl_remove(mpnt, &task->mm->mmap_avl); prev->vm_end = mpnt->vm_end; prev->vm_next = mpnt->vm_next; if(mpnt->vm_ops && mpnt->vm_ops->close) { mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start; mpnt->vm_start = mpnt->vm_end; mpnt->vm_ops->close(mpnt); } remove_shared_vm_struct(mpnt); if(mpnt->vm_inode) mpnt->vm_inode->i_count++; kfree_s(mpnt,sizeof(*mpnt)); mpnt = prev; } return; }
unsigned long do_munmap(unsigned long addr, int len) { struct vm_area_struct *mpnt, *prev, *next, **npp, *free; if((addr & ~PAGE_MASK) || (addr > PAGE_OFFSET) || (addr + len) > PAGE_OFFSET) return -EINVAL; if((len = PAGE_ALIGN(len)) == 0) return 0; mpnt = find_vma(current, addr); if(!mpnt) return 0; avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next); npp = (prev? &prev->vm_next: ¤t->mm->mmap); free = NULL; for(; mpnt && mpnt->vm_start < addr + len; mpnt = *npp) { *npp = mpnt->vm_next; mpnt->vm_next = free; free = mpnt; avl_remove(mpnt, ¤t->mm->mmap_avl); } if(free == NULL) return 0; while(free) { unsigned long st, end; mpnt = free; free = free->vm_next; remove_shared_vm_struct(mpnt); st = addr < mpnt->vm_start?mpnt->vm_start:addr; end = addr + len; end = end > mpnt->vm_end? mpnt->vm_end:end; if(mpnt->vm_ops && mpnt->vm_ops->unmap) mpnt->vm_ops->unmap(mpnt, st, end-st); unmap_fixup(mpnt, st, end-st); kfree(mpnt); } unmap_page_range(addr, len); return 0; }
int osfmach3_split_vm_struct( unsigned long addr, size_t len) { struct mm_struct *mm; struct vm_area_struct *vmp, *prev, *next, *new_vmp; unsigned long start, end; mm = current->mm; start = addr; end = addr + len; for (vmp = find_vma(mm, addr); vmp && vmp->vm_start < end; vmp = vmp->vm_next) { avl_neighbours(vmp, mm->mmap_avl, &prev, &next); #ifdef VMA_DEBUG if (vma_debug) { printk("VMA: split(addr=%lx,len=%lx) " "vmp=(s=%lx,e=%lx,o=%lx)\n", addr, (unsigned long) len, vmp->vm_start, vmp->vm_end, vmp->vm_offset); } #endif /* VAM_DEBUG */ if (vmp->vm_start < start) { /* * Need to split here. */ new_vmp = (struct vm_area_struct *) kmalloc(sizeof *new_vmp, GFP_KERNEL); if (!new_vmp) { printk("osfmach3_split_vm_struct: no memory\n"); return -ENOMEM; } *new_vmp = *vmp; if (new_vmp->vm_inode) { struct vm_area_struct *share; new_vmp->vm_inode->i_count++; share = vmp->vm_inode->i_mmap; ASSERT(share); new_vmp->vm_next_share = share->vm_next_share; new_vmp->vm_next_share->vm_prev_share = new_vmp; share->vm_next_share = new_vmp; new_vmp->vm_prev_share = share; /* take an extra reference on the mem_obj */ inode_pager_setup(new_vmp->vm_inode); } if (new_vmp->vm_ops && new_vmp->vm_ops->open) { new_vmp->vm_end = new_vmp->vm_start; /* XXX ? */ new_vmp->vm_ops->open(new_vmp); } new_vmp->vm_end = start; new_vmp->vm_next = vmp; vmp->vm_offset += start - vmp->vm_start; vmp->vm_start = start; if (prev) prev->vm_next = new_vmp; else current->mm->mmap = new_vmp; avl_insert(new_vmp, &mm->mmap_avl); #ifdef VMA_DEBUG if (vma_debug) { printk("VMA: split: " "new_vmp(s=%lx,e=%lx,o=%lx) -> " "vmp(s=%lx,e=%lx,o=%lx)\n", new_vmp->vm_start, new_vmp->vm_end, new_vmp->vm_offset, vmp->vm_start, vmp->vm_end, vmp->vm_offset); } #endif /* VAM_DEBUG */ } if (vmp->vm_end > end) { /* * Need to split here. */ new_vmp = (struct vm_area_struct *) kmalloc(sizeof *new_vmp, GFP_KERNEL); if (!new_vmp) { printk("osfmach3_split_vm_struct: no memory\n"); return -ENOMEM; } *new_vmp = *vmp; if (new_vmp->vm_inode) { struct vm_area_struct *share; new_vmp->vm_inode->i_count++; share = vmp->vm_inode->i_mmap; ASSERT(share); new_vmp->vm_next_share = share->vm_next_share; new_vmp->vm_next_share->vm_prev_share = new_vmp; share->vm_next_share = new_vmp; new_vmp->vm_prev_share = share; /* take an extra reference on the mem_obj */ inode_pager_setup(new_vmp->vm_inode); } if (new_vmp->vm_ops && new_vmp->vm_ops->open) { new_vmp->vm_start = new_vmp->vm_end; new_vmp->vm_offset = vmp->vm_offset + (vmp->vm_end - vmp->vm_start); new_vmp->vm_ops->open(new_vmp); } new_vmp->vm_start = end; new_vmp->vm_offset = vmp->vm_offset + start - vmp->vm_start; vmp->vm_end = end; vmp->vm_next = new_vmp; avl_insert(new_vmp, &mm->mmap_avl); #ifdef VMA_DEBUG if (vma_debug) { printk("VMA: split: " "vmp(s=%lx,e=%lx,o=%lx) -> " "new_vmp(s=%lx,e=%lx,o=%lx)\n", vmp->vm_start, vmp->vm_end, vmp->vm_offset, new_vmp->vm_start, new_vmp->vm_end, new_vmp->vm_offset); } #endif /* VAM_DEBUG */ vmp = new_vmp; } } return 0; }