hugetlbfs: remove call to huge_pte_alloc without i_mmap_rwsem
authorMike Kravetz <mike.kravetz@oracle.com>
Wed, 12 Aug 2020 01:31:38 +0000 (18:31 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 21 Aug 2020 11:07:27 +0000 (13:07 +0200)
commit 34ae204f18519f0920bd50a644abd6fefc8dbfcf upstream.

Commit c0d0381ade79 ("hugetlbfs: use i_mmap_rwsem for more pmd sharing
synchronization") requires callers of huge_pte_alloc to hold i_mmap_rwsem
in at least read mode.  This is because the explicit locking in
huge_pmd_share (called by huge_pte_alloc) was removed.  When restructuring
the code, the call to huge_pte_alloc in the else block at the beginning of
hugetlb_fault was missed.

Unfortunately, that else clause is exercised when there is no page table
entry.  This will likely lead to a call to huge_pmd_share.  If
huge_pmd_share thinks pmd sharing is possible, it will traverse the
mapping tree (i_mmap) without holding i_mmap_rwsem.  If someone else is
modifying the tree, bad things such as addressing exceptions or worse
could happen.

Simply remove the else clause.  It should have been removed previously.
The code following the else will call huge_pte_alloc with the appropriate
locking.

To prevent this type of issue in the future, add routines to assert that
i_mmap_rwsem is held, and call these routines in huge pmd sharing
routines.

Fixes: c0d0381ade79 ("hugetlbfs: use i_mmap_rwsem for more pmd sharing synchronization")
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A.Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Prakash Sangappa <prakash.sangappa@oracle.com>
Cc: <stable@vger.kernel.org>
Link: http://lkml.kernel.org/r/e670f327-5cf9-1959-96e4-6dc7cc30d3d5@oracle.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/fs.h
include/linux/hugetlb.h
mm/hugetlb.c
mm/rmap.c

index 45cc10cdf6ddd760aeadc92d255f65b132ed67cc..db58786c660bfc1295e4501baeedd071a0bdc8e2 100644 (file)
@@ -546,6 +546,16 @@ static inline void i_mmap_unlock_read(struct address_space *mapping)
        up_read(&mapping->i_mmap_rwsem);
 }
 
+static inline void i_mmap_assert_locked(struct address_space *mapping)
+{
+       lockdep_assert_held(&mapping->i_mmap_rwsem);
+}
+
+static inline void i_mmap_assert_write_locked(struct address_space *mapping)
+{
+       lockdep_assert_held_write(&mapping->i_mmap_rwsem);
+}
+
 /*
  * Might pages of this file be mapped into userspace?
  */
index 43a1cef8f0f163b0102badf965098c38129e1f23..214f509bcb88fa9e1ce496794677d4bb7bef5eb3 100644 (file)
@@ -165,7 +165,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                        unsigned long addr, unsigned long sz);
 pte_t *huge_pte_offset(struct mm_struct *mm,
                       unsigned long addr, unsigned long sz);
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long *addr, pte_t *ptep);
 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
                                unsigned long *start, unsigned long *end);
 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
@@ -204,8 +205,9 @@ static inline struct address_space *hugetlb_page_mapping_lock_write(
        return NULL;
 }
 
-static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
-                                       pte_t *ptep)
+static inline int huge_pmd_unshare(struct mm_struct *mm,
+                                       struct vm_area_struct *vma,
+                                       unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
index 7a09ed981e7f7ee472efff8ef2b4b3d20bb42aa4..e4599bc61e71876ea666cbbb63fbaae260e93bf6 100644 (file)
@@ -3840,7 +3840,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        continue;
 
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, &address, ptep)) {
+               if (huge_pmd_unshare(mm, vma, &address, ptep)) {
                        spin_unlock(ptl);
                        /*
                         * We just unmapped a page of PMDs by clearing a PUD.
@@ -4427,10 +4427,6 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
                        return VM_FAULT_HWPOISON_LARGE |
                                VM_FAULT_SET_HINDEX(hstate_index(h));
-       } else {
-               ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
-               if (!ptep)
-                       return VM_FAULT_OOM;
        }
 
        /*
@@ -4907,7 +4903,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                if (!ptep)
                        continue;
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, &address, ptep)) {
+               if (huge_pmd_unshare(mm, vma, &address, ptep)) {
                        pages++;
                        spin_unlock(ptl);
                        shared_pmd = true;
@@ -5288,12 +5284,14 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  * returns: 1 successfully unmapped a shared pte page
  *         0 the underlying pte page is not shared, or it is the last user
  */
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+                                       unsigned long *addr, pte_t *ptep)
 {
        pgd_t *pgd = pgd_offset(mm, *addr);
        p4d_t *p4d = p4d_offset(pgd, *addr);
        pud_t *pud = pud_offset(p4d, *addr);
 
+       i_mmap_assert_write_locked(vma->vm_file->f_mapping);
        BUG_ON(page_count(virt_to_page(ptep)) == 0);
        if (page_count(virt_to_page(ptep)) == 1)
                return 0;
@@ -5311,7 +5309,8 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        return NULL;
 }
 
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
+                               unsigned long *addr, pte_t *ptep)
 {
        return 0;
 }
index f79a206b271a66e7a7b63491c893c8ca9d37d3c5..f3c5562bc5f407580102396e67c98886150b29e6 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1458,7 +1458,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                         * do this outside rmap routines.
                         */
                        VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
-                       if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+                       if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
                                /*
                                 * huge_pmd_unshare unmapped an entire PMD
                                 * page.  There is no way of knowing exactly