vma_hugecache_offset() and applying huge_page_mask() to align the address properly. To make vma_hugecache_offset() available outside of mm/hugetlb.c, move it to include/linux/hugetlb.h as a static inline function. Fixes: 60d4d2d2b40e ("userfaultfd: hugetlbfs: add __mcopy_atomic_hugetlb for huge page UFFDIO_COPY") Reported-by: syzbot+f525fd79634858f478e7@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=f525fd79634858f478e7 Cc: stable@vger.kernel.org Signed-off-by: Jianhui Zhou --- include/linux/hugetlb.h | 17 +++++++++++++++++ mm/hugetlb.c | 11 ----------- mm/userfaultfd.c | 5 ++++- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 65910437be1c..3f994f3e839c 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -796,6 +796,17 @@ static inline unsigned huge_page_shift(struct hstate *h) return h->order + PAGE_SHIFT; } +/* + * Convert the address within this vma to the page offset within + * the mapping, huge page units here. + */ +static inline pgoff_t vma_hugecache_offset(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) +{ + return ((address - vma->vm_start) >> huge_page_shift(h)) + + (vma->vm_pgoff >> huge_page_order(h)); +} + static inline bool order_is_gigantic(unsigned int order) { return order > MAX_PAGE_ORDER; @@ -1197,6 +1208,12 @@ static inline unsigned int huge_page_shift(struct hstate *h) return PAGE_SHIFT; } +static inline pgoff_t vma_hugecache_offset(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) +{ + return linear_page_index(vma, address); +} + static inline bool hstate_is_gigantic(struct hstate *h) { return false; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0beb6e22bc26..b87ed652c748 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1006,17 +1006,6 @@ static long region_count(struct resv_map *resv, long f, long t) return chg; } -/* - * Convert the address within this vma to the page offset within - * the mapping, huge page units here. - */ -static pgoff_t vma_hugecache_offset(struct hstate *h, - struct vm_area_struct *vma, unsigned long address) -{ - return ((address - vma->vm_start) >> huge_page_shift(h)) + - (vma->vm_pgoff >> huge_page_order(h)); -} - /** * vma_kernel_pagesize - Page size granularity for this VMA. * @vma: The user mapping. diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 927086bb4a3c..8efebc47a410 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -507,6 +507,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( pgoff_t idx; u32 hash; struct address_space *mapping; + struct hstate *h; /* * There is no default zero huge page for all huge page sizes as @@ -564,6 +565,8 @@ static __always_inline ssize_t mfill_atomic_hugetlb( goto out_unlock; } + h = hstate_vma(dst_vma); + while (src_addr < src_start + len) { VM_WARN_ON_ONCE(dst_addr >= dst_start + len); @@ -573,7 +576,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb( * in the case of shared pmds. fault mutex prevents * races with other faulting threads. */ - idx = linear_page_index(dst_vma, dst_addr); + idx = vma_hugecache_offset(h, dst_vma, dst_addr & huge_page_mask(h)); mapping = dst_vma->vm_file->f_mapping; hash = hugetlb_fault_mutex_hash(mapping, idx); mutex_lock(&hugetlb_fault_mutex_table[hash]); -- 2.43.0[PATCH] mm/userfaultfd: fix hugetlb fault mutex hash calculationJianhui Zhou undefinedMuchun Song , Oscar Salvador , Andrew Morton , Mike Rapoport undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined¡j†Ä