diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index a7d07619a094..9a8d317b83f2 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -171,6 +171,11 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_balance_anon_file_reclaim); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_show_max_freq); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dm_bufio_shrink_scan_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cleanup_old_buffers_bypass); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_replace); diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h index 9221cf9e8ca3..8af085c167d5 100644 --- a/include/trace/hooks/mm.h +++ b/include/trace/hooks/mm.h @@ -123,6 +123,24 @@ DECLARE_HOOK(android_vh_mm_compaction_begin, DECLARE_HOOK(android_vh_mm_compaction_end, TP_PROTO(struct compact_control *cc, long vendor_ret), TP_ARGS(cc, vendor_ret)); +DECLARE_HOOK(android_vh_free_unref_page_bypass, + TP_PROTO(struct page *page, int order, int migratetype, bool *bypass), + TP_ARGS(page, order, migratetype, bypass)); +DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc, + TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc), + TP_ARGS(size, kmalloc_flags, use_vmalloc)); +DECLARE_HOOK(android_vh_should_alloc_pages_retry, + TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags, + int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry), + TP_ARGS(gfp_mask, order, alloc_flags, + migratetype, preferred_zone, page, should_alloc_retry)); +DECLARE_HOOK(android_vh_unreserve_highatomic_bypass, + TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic), + TP_ARGS(force, zone, skip_unreserve_highatomic)); +DECLARE_HOOK(android_vh_rmqueue_bulk_bypass, + TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype, + struct list_head *list), + TP_ARGS(order, pcp, migratetype, list)); struct mem_cgroup; DECLARE_HOOK(android_vh_mem_cgroup_alloc, TP_PROTO(struct mem_cgroup *memcg), diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b246d214ce31..ed6c52045448 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1747,11 +1747,15 @@ static void __free_pages_ok(struct page *page, unsigned int order, int migratetype; unsigned long pfn = page_to_pfn(page); struct zone *zone = page_zone(page); + bool skip_free_unref_page = false; if (!free_pages_prepare(page, order, true, fpi_flags)) return; migratetype = get_pfnblock_migratetype(page, pfn); + trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page); + if (skip_free_unref_page) + return; spin_lock_irqsave(&zone->lock, flags); if (unlikely(has_isolate_pageblock(zone) || @@ -2980,6 +2984,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, struct page *page; int order; bool ret; + bool skip_unreserve_highatomic = false; for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, ac->nodemask) { @@ -2991,6 +2996,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, pageblock_nr_pages) continue; + trace_android_vh_unreserve_highatomic_bypass(force, zone, + &skip_unreserve_highatomic); + if (skip_unreserve_highatomic) + continue; + spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct free_area *area = &(zone->free_area[order]); @@ -3247,6 +3257,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone, int batch = READ_ONCE(pcp->batch); int alloced; + trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list); + if (!list_empty(list)) + return list; + /* * Scale batch relative to order if batch implies * free pages can be stored on the PCP. Batch can @@ -3578,10 +3592,16 @@ void free_unref_page(struct page *page, unsigned int order) struct zone *zone; unsigned long pfn = page_to_pfn(page); int migratetype; + bool skip_free_unref_page = false; if (!free_unref_page_prepare(page, pfn, order)) return; + migratetype = get_pcppage_migratetype(page); + trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page); + if (skip_free_unref_page) + return; + /* * We only track unmovable, reclaimable, movable, and CMA on pcp lists. * Place ISOLATE pages on the isolated list because they are being @@ -5140,6 +5160,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned int zonelist_iter_cookie; int reserve_flags; unsigned long alloc_start = jiffies; + bool should_alloc_retry = false; /* * We also sanity check to catch abuse of atomic reserves being used by * callers that are not in atomic context. @@ -5292,6 +5313,11 @@ retry: if (page) goto got_pg; + trace_android_vh_should_alloc_pages_retry(gfp_mask, order, &alloc_flags, + ac->migratetype, ac->preferred_zoneref->zone, &page, &should_alloc_retry); + if (should_alloc_retry) + goto retry; + /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); diff --git a/mm/util.c b/mm/util.c index 29c9470aeeef..c96c359196eb 100644 --- a/mm/util.c +++ b/mm/util.c @@ -29,8 +29,9 @@ #include "internal.h" #include "swap.h" -#ifndef __GENSYMS__ +#ifndef __GENKSYMS__ #include +#include #endif /** @@ -565,7 +566,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) { gfp_t kmalloc_flags = flags; void *ret; + bool use_vmalloc = false; + trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc); + if (use_vmalloc) + goto use_vmalloc_node; /* * We want to attempt a large physically contiguous block first because * it is less likely to fragment multiple larger blocks and therefore @@ -609,6 +614,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) * about the resulting pointer, and cannot play * protection games. */ +use_vmalloc_node: return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0));