mm: get rid of vmacache_flush_all() entirely
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Sep 2018 09:57:48 +0000 (23:57 -1000)
committerBen Hutchings <ben@decadent.org.uk>
Tue, 25 Sep 2018 22:47:35 +0000 (23:47 +0100)
commit 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 upstream.

Jann Horn points out that the vmacache_flush_all() function is not only
potentially expensive, it's buggy too.  It also happens to be entirely
unnecessary, because the sequence number overflow case can be avoided by
simply making the sequence number be 64-bit.  That doesn't even grow the
data structures in question, because the other adjacent fields are
already 64-bit.

So simplify the whole thing by just making the sequence number overflow
case go away entirely, which gets rid of all the complications and makes
the code faster too.  Win-win.

[ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics
  also just goes away entirely with this ]

Reported-by: Jann Horn <jannh@google.com>
Suggested-by: Will Deacon <will.deacon@arm.com>
Acked-by: Davidlohr Bueso <dave@stgolabs.net>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
[bwh: Backported to 3.16: drop changes to mm debug code]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
include/linux/mm_types.h
include/linux/sched.h
include/linux/vmacache.h
mm/vmacache.c

index 489c1307f81ee43e8a70cb47311f950b251c9540..f9466c152c5d4a6fa3bfdc5b7f61d43852c1df3c 100644 (file)
@@ -345,7 +345,7 @@ struct kioctx_table;
 struct mm_struct {
        struct vm_area_struct *mmap;            /* list of VMAs */
        struct rb_root mm_rb;
-       u32 vmacache_seqnum;                   /* per-thread vmacache */
+       u64 vmacache_seqnum;                   /* per-thread vmacache */
 #ifdef CONFIG_MMU
        unsigned long (*get_unmapped_area) (struct file *filp,
                                unsigned long addr, unsigned long len,
index 34af72b8b296eec9cd964f61c53b1b685870ac6e..982f752bff30d778e94c269dbf93e8c3dce5ee8a 100644 (file)
@@ -1291,7 +1291,7 @@ struct task_struct {
        unsigned brk_randomized:1;
 #endif
        /* per-thread vma caching */
-       u32 vmacache_seqnum;
+       u64 vmacache_seqnum;
        struct vm_area_struct *vmacache[VMACACHE_SIZE];
 #if defined(SPLIT_RSS_COUNTING)
        struct task_rss_stat    rss_stat;
index c3fa0fd43949952957603b35e28b26ddc53fb0d3..4f58ff2dacd6985e441e61180e52984f059f4f92 100644 (file)
@@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
        memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
 }
 
-extern void vmacache_flush_all(struct mm_struct *mm);
 extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
 extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
                                                    unsigned long addr);
@@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
 static inline void vmacache_invalidate(struct mm_struct *mm)
 {
        mm->vmacache_seqnum++;
-
-       /* deal with overflows */
-       if (unlikely(mm->vmacache_seqnum == 0))
-               vmacache_flush_all(mm);
 }
 
 #endif /* __LINUX_VMACACHE_H */
index 9f25af825dec6d929348e16db85a6c17ab00f31d..e6e6e92d0d72291d7ae3a50420757497a723c9b8 100644 (file)
@@ -5,42 +5,6 @@
 #include <linux/mm.h>
 #include <linux/vmacache.h>
 
-/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
-       struct task_struct *g, *p;
-
-       /*
-        * Single threaded tasks need not iterate the entire
-        * list of process. We can avoid the flushing as well
-        * since the mm's seqnum was increased and don't have
-        * to worry about other threads' seqnum. Current's
-        * flush will occur upon the next lookup.
-        */
-       if (atomic_read(&mm->mm_users) == 1)
-               return;
-
-       rcu_read_lock();
-       for_each_process_thread(g, p) {
-               /*
-                * Only flush the vmacache pointers as the
-                * mm seqnum is already set and curr's will
-                * be set upon invalidation when the next
-                * lookup is done.
-                */
-               if (mm == p->mm)
-                       vmacache_flush(p);
-       }
-       rcu_read_unlock();
-}
-
 /*
  * This task may be accessing a foreign mm via (for example)
  * get_user_pages()->find_vma().  The vmacache is task-local and this