diff options
author | Philippe Schenker <philippe.schenker@toradex.com> | 2022-05-19 16:29:13 +0200 |
---|---|---|
committer | Philippe Schenker <philippe.schenker@toradex.com> | 2022-05-19 16:29:13 +0200 |
commit | e6fb5c32f78f99682821f91b3959e222c93e4cb9 (patch) | |
tree | 3f6c281a7d83a6e3c0a153f93718f7cd9a39b6f7 /mm/hugetlb.c | |
parent | 585dc27c53b6c1a32a7e11b724e91d7297388a13 (diff) | |
parent | b7007e1d615b8d4b71c28ebf790c5164fc4491d5 (diff) |
Merge remote-tracking branch 'gh-fslc/5.4-2.3.x-imx' into toradex_5.4-2.3.x-imx
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 95a32749af4d..20da6ede7704 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3589,6 +3589,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, struct hstate *h = hstate_vma(vma); unsigned long sz = huge_page_size(h); struct mmu_notifier_range range; + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -3617,10 +3618,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, &address, ptep)) { spin_unlock(ptl); - /* - * We just unmapped a page of PMDs by clearing a PUD. - * The caller's TLB flush range should cover this area. - */ + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } @@ -3677,6 +3676,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, } mmu_notifier_invalidate_range_end(&range); tlb_end_vma(tlb, vma); + + /* + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We + * could defer the flush until now, since by holding i_mmap_rwsem we + * guaranteed that the last refernece would not be dropped. But we must + * do the flushing before we return, as otherwise i_mmap_rwsem will be + * dropped and the last reference to the shared PMDs page might be + * dropped as well. + * + * In theory we could defer the freeing of the PMD pages as well, but + * huge_pmd_unshare() relies on the exact page_count for the PMD page to + * detect sharing, so we cannot defer the release of the page either. + * Instead, do flush now. + */ + if (force_flush) + tlb_flush_mmu_tlbonly(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, |