FROMGIT: mm: handle swap and NUMA PTE faults under the VMA lock

Move the FAULT_FLAG_VMA_LOCK check down in handle_pte_fault().  This is
probably not a huge win in its own right, but is a nicely separable bit
from the next patch.

Link: https://lkml.kernel.org/r/20230724185410.1124082-10-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

(cherry picked from commit 51c4fdc72be2287960ab5c1f5beae84f3039fd01
https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable)

Bug: 293665307
Change-Id: I6cf9cb1d40c23287ce179a8c435427c3d88d2528
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2023-07-24 19:54:09 +01:00 committed by Suren Baghdasaryan
parent ffcebdef16
commit 83ab986324

View file

@ -5040,18 +5040,18 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
return do_fault(vmf);
}
if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) {
pte_unmap(vmf->pte);
vma_end_read(vmf->vma);
return VM_FAULT_RETRY;
}
if (!pte_present(vmf->orig_pte))
return do_swap_page(vmf);
if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
return do_numa_page(vmf);
if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) {
pte_unmap(vmf->pte);
vma_end_read(vmf->vma);
return VM_FAULT_RETRY;
}
vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
spin_lock(vmf->ptl);
entry = vmf->orig_pte;