/* * Flush vma caches for threads that share a given mm. * * The operation is safe because the caller holds the mmap_sem * exclusively and other threads accessing the vma cache will * have mmap_sem held at least for read, so no extra locking * is required to maintain the vma cache. */ void vmacache_flush_all(struct mm_struct *mm) { struct task_struct *g, *p; count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); /* * Single threaded tasks need not iterate the entire * list of process. We can avoid the flushing as well * since the mm's seqnum was increased and don't have * to worry about other threads' seqnum. Current's * flush will occur upon the next lookup. */ if (atomic_read(&mm->mm_users) == 1) return; rcu_read_lock(); for_each_process_thread(g, p) { /* * Only flush the vmacache pointers as the * mm seqnum is already set and curr's will * be set upon invalidation when the next * lookup is done. */ if (mm == p->mm) vmacache_flush(p); } rcu_read_unlock(); }
/* * Flush vma caches for threads that share a given mm. * * The operation is safe because the caller holds the mmap_sem * exclusively and other threads accessing the vma cache will * have mmap_sem held at least for read, so no extra locking * is required to maintain the vma cache. */ void vmacache_flush_all(struct mm_struct *mm) { struct task_struct *g, *p; rcu_read_lock(); for_each_process_thread(g, p) { /* * Only flush the vmacache pointers as the * mm seqnum is already set and curr's will * be set upon invalidation when the next * lookup is done. */ if (mm == p->mm) vmacache_flush(p); } rcu_read_unlock(); }
static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) { struct mm_struct *mm, *oldmm; int retval; tsk->min_flt = tsk->maj_flt = 0; tsk->nvcsw = tsk->nivcsw = 0; #ifdef CONFIG_DETECT_HUNG_TASK tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; #endif tsk->mm = NULL; tsk->active_mm = NULL; /* * Are we cloning a kernel thread? * * We need to steal a active VM for that.. */ oldmm = current->mm; if (!oldmm) return 0; /* initialize the new vmacache entries */ vmacache_flush(tsk); if (clone_flags & CLONE_VM) { atomic_inc(&oldmm->mm_users); mm = oldmm; goto good_mm; } retval = -ENOMEM; mm = dup_mm(tsk); if (!mm) goto fail_nomem; good_mm: tsk->mm = mm; tsk->active_mm = mm; return 0; fail_nomem: return retval; }
static bool vmacache_valid(struct mm_struct *mm) { struct task_struct *curr; if (!vmacache_valid_mm(mm)) return false; curr = current; if (mm->vmacache_seqnum != curr->vmacache_seqnum) { /* * First attempt will always be invalid, initialize * the new cache for this task here. */ curr->vmacache_seqnum = mm->vmacache_seqnum; vmacache_flush(curr); return false; } return true; }