mm: do not fail non-costly allocs if there are reclaimable pages
should_reclaim_retry can fail for order > 0 allocs even if there are reclaimable pages. The decision to retry is then left to compaction progress in should_compact_retry. Shrinkers like lowmemorykiller which does the reclaim based on page cache levels can take time to free up memory. By this time, compaction can complete scanning (COMPACT_COMPLETE) without success. This results in an early failure (compaction_failed) in should_compact_retry, when there are enough tasks available for lowmemorykiller to kill. This results in premature OOMs. It can be argued, how this condition is different from a case where any reclaimable slab is consuming major part of system's memory. The difference is that for other reclaimable slabs, a pressure equivalent to that put on LRU is put on them, resulting in faster reclaim. But that is the not the case with lowmemorykiller which doesn't even trigger until certain page cache levels are reached, and there can be cases of tasks not available in lower oom_score_adj ranges resulting in no kills. Thus when lowmemorykiller is enabled, retry non-costly allocs till there are enough reclaimable pages and if we are making progress in reclaim. For costly orders leave the decision to existing compaction progress logic. Change-Id: I177cdd11e3f45d7f8fcf02435f7ef5834408fb6f Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
parent
4b7c557841
commit
24601112ff
1 changed files with 43 additions and 0 deletions
|
@ -3602,6 +3602,46 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_LOW_MEMORY_KILLER
|
||||
static inline bool
|
||||
should_compact_lmk_retry(struct alloc_context *ac, int order, int alloc_flags)
|
||||
{
|
||||
struct zone *zone;
|
||||
struct zoneref *z;
|
||||
|
||||
/* Let costly order requests check for compaction progress */
|
||||
if (order > PAGE_ALLOC_COSTLY_ORDER)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* For (0 < order < PAGE_ALLOC_COSTLY_ORDER) allow the shrinkers
|
||||
* to run and free up memory. Do not let these allocations fail
|
||||
* if shrinkers can free up memory. This is similar to
|
||||
* should_compact_retry implementation for !CONFIG_COMPACTION.
|
||||
*/
|
||||
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
|
||||
ac->high_zoneidx, ac->nodemask) {
|
||||
unsigned long available;
|
||||
|
||||
available = zone_reclaimable_pages(zone);
|
||||
available +=
|
||||
zone_page_state_snapshot(zone, NR_FREE_PAGES);
|
||||
|
||||
if (__zone_watermark_ok(zone, 0, min_wmark_pages(zone),
|
||||
ac_classzone_idx(ac), alloc_flags, available))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline bool
|
||||
should_compact_lmk_retry(struct alloc_context *ac, int order, int alloc_flags)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool
|
||||
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
|
||||
enum compact_result compact_result,
|
||||
|
@ -3617,6 +3657,9 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
|
|||
if (!order)
|
||||
return false;
|
||||
|
||||
if (should_compact_lmk_retry(ac, order, alloc_flags))
|
||||
return true;
|
||||
|
||||
if (compaction_made_progress(compact_result))
|
||||
(*compaction_retries)++;
|
||||
|
||||
|
|
Loading…
Reference in a new issue