| 2 +- fs/proc/task_mmu.c | 2 +- include/linux/swapops.h | 21 ++++++++++++++++----- mm/filemap.c | 2 +- mm/hmm.c | 2 +- mm/madvise.c | 4 ++-- mm/memory.c | 8 ++++---- mm/mincore.c | 2 +- mm/userfaultfd.c | 2 +- 10 files changed, 29 insertions(+), 18 deletions(-) diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c index d4c3c36855e2..2c41276a34c5 100644 --- a/arch/s390/mm/gmap_helpers.c +++ b/arch/s390/mm/gmap_helpers.c @@ -28,7 +28,7 @@ */ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) { - if (!non_swap_entry(entry)) + if (!is_non_present_entry(entry)) dec_mm_counter(mm, MM_SWAPENTS); else if (is_migration_entry(entry)) dec_mm_counter(mm, mm_counter(pfn_swap_entry_folio(entry))); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0fde20bbc50b..0c795f3c324f 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -685,7 +685,7 @@ void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep) static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) { - if (!non_swap_entry(entry)) + if (!is_non_present_entry(entry)) dec_mm_counter(mm, MM_SWAPENTS); else if (is_migration_entry(entry)) { struct folio *folio = pfn_swap_entry_folio(entry); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 1c32a0e2b965..28f30e01e504 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1022,7 +1022,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, } else { swp_entry_t swpent = pte_to_swp_entry(ptent); - if (!non_swap_entry(swpent)) { + if (!is_non_present_entry(swpent)) { int mapcount; mss->swap += PAGE_SIZE; diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 8642e590504a..fb463d75fa90 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -645,7 +645,18 @@ static inline int is_pmd_device_private_entry(pmd_t pmd) #endif /* CONFIG_ZONE_DEVICE && CONFIG_ARCH_ENABLE_THP_MIGRATION */ -static inline int non_swap_entry(swp_entry_t entry) +/** + * is_non_present_entry() - Determine if this is a miscellaneous + * non-present entry. + * @entry: The entry to examine. + * + * This function determines whether data encoded in non-present leaf page + * tables is a migration entry, device private entry, marker entry, etc. - + * that is a non-present entry that is not a swap entry. + * + * Returns: true if is a non-present entry, otherwise false. + */ +static inline bool is_non_present_entry(swp_entry_t entry) { return swp_type(entry) >= MAX_SWAPFILES; } @@ -661,9 +672,9 @@ static inline int is_pmd_non_present_folio_entry(pmd_t pmd) * @entryp: Output pointer to a swap entry that will be populated upon * success. * - * Determines if the PTE describes an entry in swap or swap cache (i.e. is a - * swap entry and not a non-swap entry), if so it sets @entryp to the swap - * entry. + * Determines if the PTE describes an entry in swap or swap cache (i.e. is + * a swap entry and not a non-present entry), if so it sets @entryp to the + * swap entry. * * This should only be used if we do not have any prior knowledge of this * PTE's state. @@ -678,7 +689,7 @@ static inline bool get_pte_swap_entry(pte_t pte, swp_entry_t *entryp) return false; *entryp = pte_to_swp_entry(pte); - if (non_swap_entry(*entryp)) + if (is_non_present_entry(*entryp)) return false; return true; diff --git a/mm/filemap.c b/mm/filemap.c index 893ba49808b7..1440e176e124 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -4553,7 +4553,7 @@ static void filemap_cachestat(struct address_space *mapping, swp_entry_t swp = radix_to_swp_entry(folio); /* swapin error results in poisoned entry */ - if (non_swap_entry(swp)) + if (is_non_present_entry(swp)) goto resched; /* diff --git a/mm/hmm.c b/mm/hmm.c index a56081d67ad6..66e18b28a21d 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -274,7 +274,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (!required_fault) goto out; - if (!non_swap_entry(entry)) + if (!is_non_present_entry(entry)) goto fault; if (is_device_private_entry(entry)) diff --git a/mm/madvise.c b/mm/madvise.c index 578036ef6675..a259dae2b899 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -248,7 +248,7 @@ static void shmem_swapin_range(struct vm_area_struct *vma, continue; entry = radix_to_swp_entry(folio); /* There might be swapin error entries in shmem mapping. */ - if (non_swap_entry(entry)) + if (is_non_present_entry(entry)) continue; addr = vma->vm_start + @@ -690,7 +690,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, swp_entry_t entry; entry = pte_to_swp_entry(ptent); - if (!non_swap_entry(entry)) { + if (!is_non_present_entry(entry)) { max_nr = (end - addr) / PAGE_SIZE; nr = swap_pte_batch(pte, max_nr, ptent); nr_swap -= nr; diff --git a/mm/memory.c b/mm/memory.c index cc163060933f..8968ba0b076f 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -847,7 +847,7 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, * @ptep: pte pointer into the locked page table mapping the folio page * @orig_pte: pte value at @ptep * - * Restore a device-exclusive non-swap entry to an ordinary present pte. + * Restore a device-exclusive non-present entry to an ordinary present pte. * * The folio and the page table must be locked, and MMU notifiers must have * been called to invalidate any (exclusive) device mappings. @@ -931,7 +931,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct page *page; swp_entry_t entry = pte_to_swp_entry(orig_pte); - if (likely(!non_swap_entry(entry))) { + if (likely(!is_non_present_entry(entry))) { if (swap_duplicate(entry) < 0) return -EIO; @@ -1739,7 +1739,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, rss[mm_counter(folio)]--; folio_remove_rmap_pte(folio, page, vma); folio_put(folio); - } else if (!non_swap_entry(entry)) { + } else if (!is_non_present_entry(entry)) { /* Genuine swap entries, hence a private anon pages */ if (!should_zap_cows(details)) return 1; @@ -4646,7 +4646,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out; entry = pte_to_swp_entry(vmf->orig_pte); - if (unlikely(non_swap_entry(entry))) { + if (unlikely(is_non_present_entry(entry))) { if (is_migration_entry(entry)) { migration_entry_wait(vma->vm_mm, vmf->pmd, vmf->address); diff --git a/mm/mincore.c b/mm/mincore.c index 8ec4719370e1..61531a7cd8b0 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -63,7 +63,7 @@ static unsigned char mincore_swap(swp_entry_t entry, bool shmem) * absent. Page table may contain migration or hwpoison * entries which are always uptodate. */ - if (non_swap_entry(entry)) + if (is_non_present_entry(entry)) return !shmem; /* diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 00122f42718c..04fab82a1119 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1427,7 +1427,7 @@ static long move_pages_ptes(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd struct folio *folio = NULL; entry = pte_to_swp_entry(orig_src_pte); - if (non_swap_entry(entry)) { + if (is_non_present_entry(entry)) { if (is_migration_entry(entry)) { pte_unmap(src_pte); pte_unmap(dst_pte); -- 2.51.0[RFC PATCH 11/12] mm: rename non_swap_entry() to is_non_present_entry()Lorenzo Stoakes undefinedAndrew Morton undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined undefined