ANDROID: mm: add cma pcp list

Add a PCP list for __GFP_CMA allocations so as not to deprive
MIGRATE_MOVABLE allocations quick access to pages on their PCP
lists.

Bug: 158645321
Change-Id: I9831eed113ec9e851b4f651755205ac9cf23b9be
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Chris Goldsworthy <cgoldswo@codeaurora.org>
[isaacm@codeaurora.org: Resolve merge conflicts related to new mm
features]
Signed-off-by: Isaac J. Manjarres <isaacm@quicinc.com>
quic_sukadev@quicinc.com: Resolve merge conflicts due to earlier patch
dropping gfp_flags;drop BUILD_BUG_ON related to MIGRATETYPE_HIGHATOMIC
since its value changed.
Signed-off-by: Sukadev Bhattiprolu <quic_sukadev@quicinc.com>
This commit is contained in:
Chris Goldsworthy 2020-11-16 18:38:13 -08:00 committed by Carlos Llamas
parent f60c5572d2
commit 433445e9a1
3 changed files with 62 additions and 47 deletions

View file

@ -19,9 +19,6 @@ static inline int gfp_migratetype(const gfp_t gfp_flags)
BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE);
BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE);
BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE);
BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >>
GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC);
if (unlikely(page_group_by_mobility_disabled))
return MIGRATE_UNMOVABLE;

View file

@ -46,8 +46,6 @@ enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@ -61,6 +59,8 @@ enum migratetype {
*/
MIGRATE_CMA,
#endif
MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif

View file

@ -399,10 +399,10 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Movable",
"Reclaimable",
"HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
"HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif
@ -3212,6 +3212,39 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return allocated;
}
/*
* Return the pcp list that corresponds to the migrate type if that list isn't
* empty.
* If the list is empty return NULL.
*/
static struct list_head *get_populated_pcp_list(struct zone *zone,
unsigned int order, struct per_cpu_pages *pcp,
int migratetype, unsigned int alloc_flags)
{
struct list_head *list = &pcp->lists[order_to_pindex(migratetype, order)];
if (list_empty(list)) {
int batch = READ_ONCE(pcp->batch);
int alloced;
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
* be 1 for small zones or for boot pagesets which
* should never store free pages as the pages may
* belong to arbitrary zones.
*/
if (batch > 1)
batch = max(batch >> order, 2);
alloced = rmqueue_bulk(zone, order, pcp->batch, list, migratetype, alloc_flags);
pcp->count += alloced << order;
if (list_empty(list))
list = NULL;
}
return list;
}
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@ -3530,7 +3563,7 @@ void free_unref_page(struct page *page, unsigned int order)
return;
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* We only track unmovable, reclaimable, movable, and CMA on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
* offlined but treat HIGHATOMIC as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
@ -3786,34 +3819,23 @@ static inline
struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
int migratetype,
unsigned int alloc_flags,
struct per_cpu_pages *pcp,
struct list_head *list)
struct per_cpu_pages *pcp)
{
struct page *page;
struct page *page = NULL;
struct list_head *list = NULL;
do {
if (list_empty(list)) {
int batch = READ_ONCE(pcp->batch);
int alloced;
/* First try to get CMA pages */
if (migratetype == MIGRATE_MOVABLE && alloc_flags & ALLOC_CMA)
list = get_populated_pcp_list(zone, order, pcp, get_cma_migrate_type(),
alloc_flags);
if (list == NULL) {
/*
* Scale batch relative to order if batch implies
* free pages can be stored on the PCP. Batch can
* be 1 for small zones or for boot pagesets which
* should never store free pages as the pages may
* belong to arbitrary zones.
* Either CMA is not suitable or there are no
* free CMA pages.
*/
if (batch > 1)
batch = max(batch >> order, 2);
if (migratetype == MIGRATE_MOVABLE && alloc_flags & ALLOC_CMA)
alloced = rmqueue_bulk(zone, order, batch, list,
get_cma_migrate_type(), alloc_flags);
if (unlikely(list_empty(list)))
alloced = rmqueue_bulk(zone, order, batch, list, migratetype,
alloc_flags);
pcp->count += alloced << order;
if (unlikely(list_empty(list)))
list = get_populated_pcp_list(zone, order, pcp, migratetype, alloc_flags);
if (unlikely(list == NULL) || unlikely(list_empty(list)))
return NULL;
}
@ -3831,7 +3853,6 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
int migratetype, unsigned int alloc_flags)
{
struct per_cpu_pages *pcp;
struct list_head *list;
struct page *page;
unsigned long flags;
unsigned long __maybe_unused UP_flags;
@ -3853,8 +3874,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
* frees.
*/
pcp->free_factor >>= 1;
list = &pcp->lists[order_to_pindex(migratetype, order)];
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp);
pcp_spin_unlock_irqrestore(pcp, flags);
pcp_trylock_finish(UP_flags);
if (page) {
@ -3891,17 +3911,9 @@ struct page *rmqueue(struct zone *preferred_zone,
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
if (likely(pcp_allowed_order(order))) {
/*
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
* we need to skip it when CMA area isn't allowed.
*/
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
migratetype != MIGRATE_MOVABLE) {
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
if (likely(page))
goto out;
}
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
goto out;
}
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
@ -4075,6 +4087,14 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
continue;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
#ifdef CONFIG_CMA
/*
* Note that this check is needed only
* when MIGRATE_CMA < MIGRATE_PCPTYPES.
*/
if (mt == MIGRATE_CMA)
continue;
#endif
if (!free_area_empty(area, mt))
return true;
}
@ -5431,7 +5451,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
struct zone *zone;
struct zoneref *z;
struct per_cpu_pages *pcp;
struct list_head *pcp_list;
struct alloc_context ac;
gfp_t alloc_gfp;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
@ -5515,7 +5534,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
goto failed_irq;
/* Attempt the batch allocation */
pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)];
while (nr_populated < nr_pages) {
/* Skip existing pages */
@ -5525,7 +5543,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
}
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
pcp, pcp_list);
pcp);
if (unlikely(!page)) {
/* Try and allocate at least one page */
if (!nr_account) {