FROMLIST: BACKPORT: mm: Change do_vmi_align_munmap() side tree index
The majority of the calls to munmap a VMA is for a single vma. The maple tree is able to store a single entry at 0, with a size of 1 as a pointer and avoid any allocations. Change do_vmi_align_munmap() to store the VMAs being munmap()'ed into a tree indexed by the count. This will leverage the ability to store the first entry without a node allocation. Storing the entries into a tree by the count and not the vma start and end means changing the functions which iterate over the entries. Update unmap_vmas() and free_pgtables() to take a maple state and a tree end address to support this functionality. Passing through the same maple state to unmap_vmas() and free_pgtables() means the state needs to be reset between calls. This happens in the static unmap_region() and exit_mmap(). Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Link: https://lore.kernel.org/lkml/20230601021605.2823123-5-Liam.Howlett@oracle.com/ [surenb: skip changes passing maple state to unmap_vmas() and free_pgtables()] Bug: 274059236 Change-Id: If38cfecd51da884bcfdbdfdfbf955a0b338d3d60 Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
parent
25bed2fdbc
commit
e9fdabfc2a
4 changed files with 32 additions and 24 deletions
|
|
@ -1904,7 +1904,8 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
|
|||
unsigned long size, struct zap_details *details);
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *start_vma, unsigned long start,
|
||||
unsigned long end);
|
||||
unsigned long end, unsigned long start_t,
|
||||
unsigned long end_t);
|
||||
|
||||
struct mmu_notifier_range;
|
||||
|
||||
|
|
|
|||
|
|
@ -87,7 +87,7 @@ void folio_activate(struct folio *folio);
|
|||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *start_vma, unsigned long floor,
|
||||
unsigned long ceiling);
|
||||
unsigned long ceiling, unsigned long start_t);
|
||||
void pmd_install(struct mm_struct *mm, pmd_t *pmd, pgtable_t *pte);
|
||||
|
||||
struct zap_details;
|
||||
|
|
|
|||
11
mm/memory.c
11
mm/memory.c
|
|
@ -397,9 +397,9 @@ void free_pgd_range(struct mmu_gather *tlb,
|
|||
|
||||
void free_pgtables(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, unsigned long floor,
|
||||
unsigned long ceiling)
|
||||
unsigned long ceiling, unsigned long start_t)
|
||||
{
|
||||
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
|
||||
MA_STATE(mas, mt, start_t, start_t);
|
||||
|
||||
do {
|
||||
unsigned long addr = vma->vm_start;
|
||||
|
|
@ -1704,7 +1704,8 @@ static void unmap_single_vma(struct mmu_gather *tlb,
|
|||
*/
|
||||
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, unsigned long start_addr,
|
||||
unsigned long end_addr)
|
||||
unsigned long end_addr, unsigned long start_t,
|
||||
unsigned long end_t)
|
||||
{
|
||||
struct mmu_notifier_range range;
|
||||
struct zap_details details = {
|
||||
|
|
@ -1712,14 +1713,14 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
|
|||
/* Careful - we need to zap private pages too! */
|
||||
.even_cows = true,
|
||||
};
|
||||
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
|
||||
MA_STATE(mas, mt, start_t, start_t);
|
||||
|
||||
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
|
||||
start_addr, end_addr);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
do {
|
||||
unmap_single_vma(tlb, vma, start_addr, end_addr, &details);
|
||||
} while ((vma = mas_find(&mas, end_addr - 1)) != NULL);
|
||||
} while ((vma = mas_find(&mas, end_t - 1)) != NULL);
|
||||
mmu_notifier_invalidate_range_end(&range);
|
||||
}
|
||||
|
||||
|
|
|
|||
40
mm/mmap.c
40
mm/mmap.c
|
|
@ -80,7 +80,7 @@ core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
|
|||
static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next, unsigned long start,
|
||||
unsigned long end);
|
||||
unsigned long end, unsigned long start_t, unsigned long end_t);
|
||||
|
||||
static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
|
||||
{
|
||||
|
|
@ -2180,17 +2180,17 @@ static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
|
|||
*/
|
||||
static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
|
||||
struct vm_area_struct *vma, struct vm_area_struct *prev,
|
||||
struct vm_area_struct *next,
|
||||
unsigned long start, unsigned long end)
|
||||
struct vm_area_struct *next, unsigned long start,
|
||||
unsigned long end, unsigned long start_t, unsigned long end_t)
|
||||
{
|
||||
struct mmu_gather tlb;
|
||||
|
||||
lru_add_drain();
|
||||
tlb_gather_mmu(&tlb, mm);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, mt, vma, start, end);
|
||||
unmap_vmas(&tlb, mt, vma, start, end, start_t, end_t);
|
||||
free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING, start_t);
|
||||
tlb_finish_mmu(&tlb);
|
||||
}
|
||||
|
||||
|
|
@ -2276,10 +2276,10 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
return __split_vma(mm, vma, addr, new_below);
|
||||
}
|
||||
|
||||
static inline int munmap_sidetree(struct vm_area_struct *vma,
|
||||
static inline int munmap_sidetree(struct vm_area_struct *vma, int count,
|
||||
struct ma_state *mas_detach)
|
||||
{
|
||||
mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1);
|
||||
mas_set(mas_detach, count);
|
||||
if (mas_store_gfp(mas_detach, vma, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -2365,7 +2365,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
|
|||
|
||||
mas_set(mas, end);
|
||||
split = mas_prev(mas, 0);
|
||||
error = munmap_sidetree(split, &mas_detach);
|
||||
error = munmap_sidetree(split, count, &mas_detach);
|
||||
if (error)
|
||||
goto munmap_sidetree_failed;
|
||||
|
||||
|
|
@ -2374,7 +2374,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
|
|||
vma = split;
|
||||
break;
|
||||
}
|
||||
error = munmap_sidetree(next, &mas_detach);
|
||||
error = munmap_sidetree(next, count, &mas_detach);
|
||||
if (error)
|
||||
goto munmap_sidetree_failed;
|
||||
|
||||
|
|
@ -2407,17 +2407,22 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
|
|||
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
|
||||
/* Make sure no VMAs are about to be lost. */
|
||||
{
|
||||
MA_STATE(test, &mt_detach, start, end - 1);
|
||||
MA_STATE(test, &mt_detach, 0, 0);
|
||||
struct vm_area_struct *vma_mas, *vma_test;
|
||||
int test_count = 0;
|
||||
unsigned long s, e;
|
||||
|
||||
mas_set_range(mas, start, end - 1);
|
||||
rcu_read_lock();
|
||||
vma_test = mas_find(&test, end - 1);
|
||||
vma_test = mas_find(&test, count - 1);
|
||||
mas_for_each(mas, vma_mas, end - 1) {
|
||||
if (!test_count)
|
||||
s = vma_mas->vm_start;
|
||||
BUG_ON(vma_mas != vma_test);
|
||||
test_count++;
|
||||
vma_test = mas_next(&test, end - 1);
|
||||
if (test_count == count)
|
||||
e = vma_mas->vm_end;
|
||||
vma_test = mas_next(&test, count - 1);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
BUG_ON(count != test_count);
|
||||
|
|
@ -2443,9 +2448,9 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
|
|||
mmap_write_downgrade(mm);
|
||||
}
|
||||
|
||||
unmap_region(mm, &mt_detach, vma, prev, next, start, end);
|
||||
unmap_region(mm, &mt_detach, vma, prev, next, start, end, 1, count);
|
||||
/* Statistics and freeing VMAs */
|
||||
mas_set(&mas_detach, start);
|
||||
mas_set(&mas_detach, 0);
|
||||
remove_mt(mm, &mas_detach);
|
||||
__mt_destroy(&mt_detach);
|
||||
|
||||
|
|
@ -2757,7 +2762,8 @@ unmap_and_free_vma:
|
|||
vma->vm_file = NULL;
|
||||
|
||||
/* Undo any partial mapping done by a device driver. */
|
||||
unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end);
|
||||
unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end,
|
||||
vma->vm_end, vma->vm_end);
|
||||
if (file && (vm_flags & VM_SHARED))
|
||||
mapping_unmap_writable(file->f_mapping);
|
||||
free_vma:
|
||||
|
|
@ -3090,7 +3096,7 @@ void exit_mmap(struct mm_struct *mm)
|
|||
tlb_gather_mmu_fullmm(&tlb, mm);
|
||||
/* update_hiwater_rss(mm) here? but nobody should be looking */
|
||||
/* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
|
||||
unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX);
|
||||
unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX, vma->vm_end, ULONG_MAX);
|
||||
mmap_read_unlock(mm);
|
||||
|
||||
/*
|
||||
|
|
@ -3101,7 +3107,7 @@ void exit_mmap(struct mm_struct *mm)
|
|||
mmap_write_lock(mm);
|
||||
mt_clear_in_rcu(&mm->mm_mt);
|
||||
free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS,
|
||||
USER_PGTABLES_CEILING);
|
||||
USER_PGTABLES_CEILING, vma->vm_end);
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
/*
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue