hugetlbfs: flush TLBs correctly after huge_pmd_unshare
authorNadav Amit <namit@vmware.com>
Sun, 21 Nov 2021 20:40:07 +0000 (12:40 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 8 Dec 2021 07:45:03 +0000 (08:45 +0100)
commit a4a118f2eead1d6c49e00765de89878288d4b890 upstream.

When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB
flush is missing.  This TLB flush must be performed before releasing the
i_mmap_rwsem, in order to prevent an unshared PMDs page from being
released and reused before the TLB flush took place.

Arguably, a comprehensive solution would use mmu_gather interface to
batch the TLB flushes and the PMDs page release, however it is not an
easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call
huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2)
deferring the release of the page reference for the PMDs page until
after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into
thinking PMDs are shared when they are not.

Fix __unmap_hugepage_range() by adding the missing TLB flush, and
forcing a flush when unshare is successful.

Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6
Signed-off-by: Nadav Amit <namit@vmware.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/include/asm/tlb.h
arch/ia64/include/asm/tlb.h
arch/s390/include/asm/tlb.h
arch/sh/include/asm/tlb.h
arch/um/include/asm/tlb.h
include/asm-generic/tlb.h
mm/hugetlb.c
mm/memory.c

index 1e25cd80589e7370eee54dedb18d355d3bf66e48..1cee2d54095663f5fb3aacc3abf6cb120b620375 100644 (file)
@@ -278,6 +278,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
        tlb_add_flush(tlb, addr);
 }
 
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+                   unsigned long size)
+{
+       tlb_add_flush(tlb, address);
+       tlb_add_flush(tlb, address + size - PMD_SIZE);
+}
+
 #define pte_free_tlb(tlb, ptep, addr)  __pte_free_tlb(tlb, ptep, addr)
 #define pmd_free_tlb(tlb, pmdp, addr)  __pmd_free_tlb(tlb, pmdp, addr)
 #define pud_free_tlb(tlb, pudp, addr)  pud_free((tlb)->mm, pudp)
index 77e541cf0e5d560b4325511f2c18d050d2ca5ddf..34f4a53595619e5995f3bd21db1ebcca32cd4f58 100644 (file)
@@ -272,6 +272,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre
        tlb->end_addr = address + PAGE_SIZE;
 }
 
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+                   unsigned long size)
+{
+       if (tlb->start_addr > address)
+               tlb->start_addr = address;
+       if (tlb->end_addr < address + size)
+               tlb->end_addr = address + size;
+}
+
 #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
 
 #define tlb_start_vma(tlb, vma)                        do { } while (0)
index 15711de1040358098fd864caad1d1f3b472b2301..d2681d5a3d5a08730b7b741e291bd10308d71304 100644 (file)
@@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
        return tlb_remove_page(tlb, page);
 }
 
+static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
+                               unsigned long address, unsigned long size)
+{
+       /*
+        * the range might exceed the original range that was provided to
+        * tlb_gather_mmu(), so we need to update it despite the fact it is
+        * usually not updated.
+        */
+       if (tlb->start > address)
+               tlb->start = address;
+       if (tlb->end < address + size)
+               tlb->end = address + size;
+}
+
 /*
  * pte_free_tlb frees a pte table and clears the CRSTE for the
  * page table from the tlb.
index 025cdb1032f6f9db7f30cfa54ccf10047772ee83..9f6ab2cd10fc8d566e439071922ad39704bc5649 100644 (file)
@@ -115,6 +115,16 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
        return __tlb_remove_page(tlb, page);
 }
 
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+                   unsigned long size)
+{
+       if (tlb->start > address)
+               tlb->start = address;
+       if (tlb->end < address + size)
+               tlb->end = address + size;
+}
+
 static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb,
                                         struct page *page)
 {
index 821ff0acfe17f7f9e4264be04d7d346731bd6b4e..6fb47b17179fff13847bf81448d8da0efec10c99 100644 (file)
@@ -128,6 +128,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
        return tlb_remove_page(tlb, page);
 }
 
+static inline void
+tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+                   unsigned long size)
+{
+       tlb->need_flush = 1;
+
+       if (tlb->start > address)
+               tlb->start = address;
+       if (tlb->end < address + size)
+               tlb->end = address + size;
+}
+
 /**
  * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  *
index c6d6671876080e9bdc48b820bb5b5d8d362a470b..e9851100c0f7e0493ae5087d0553bae0ad57fef2 100644 (file)
@@ -123,6 +123,8 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
                                                        unsigned long end);
 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
                                   int page_size);
+void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+                        unsigned long size);
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
                                      unsigned long address)
index de89e9295f6c55b63997c71d405f1696b08f1d7b..7d51211995b942eb4ec463a96227af73259e772e 100644 (file)
@@ -3395,6 +3395,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        unsigned long sz = huge_page_size(h);
        const unsigned long mmun_start = start; /* For mmu_notifiers */
        const unsigned long mmun_end   = end;   /* For mmu_notifiers */
+       bool force_flush = false;
 
        WARN_ON(!is_vm_hugetlb_page(vma));
        BUG_ON(start & ~huge_page_mask(h));
@@ -3411,6 +3412,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, &address, ptep)) {
                        spin_unlock(ptl);
+                       tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
+                       force_flush = true;
                        continue;
                }
 
@@ -3467,6 +3470,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        }
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        tlb_end_vma(tlb, vma);
+
+       /*
+        * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+        * could defer the flush until now, since by holding i_mmap_rwsem we
+        * guaranteed that the last refernece would not be dropped. But we must
+        * do the flushing before we return, as otherwise i_mmap_rwsem will be
+        * dropped and the last reference to the shared PMDs page might be
+        * dropped as well.
+        *
+        * In theory we could defer the freeing of the PMD pages as well, but
+        * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+        * detect sharing, so we cannot defer the release of the page either.
+        * Instead, do flush now.
+        */
+       if (force_flush)
+               tlb_flush_mmu(tlb);
 }
 
 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
index be592d434ad89183ac39e20b2fbc2715397cf22b..c2890dc104d9e925f486f8a4156f3764099ca0a7 100644 (file)
@@ -320,6 +320,22 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
        return false;
 }
 
+void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
+                        unsigned long size)
+{
+       if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
+               tlb_flush_mmu(tlb);
+
+       tlb->page_size = PMD_SIZE;
+       tlb->start = min(tlb->start, address);
+       tlb->end = max(tlb->end, address + size);
+       /*
+        * Track the last address with which we adjusted the range. This
+        * will be used later to adjust again after a mmu_flush due to
+        * failed __tlb_remove_page
+        */
+       tlb->addr = address + size - PMD_SIZE;
+}
 #endif /* HAVE_GENERIC_MMU_GATHER */
 
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE