diff options
author | Zlatko Calusic <zlatko.calusic@iskon.hr> | 2013-02-22 16:34:06 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 17:50:15 -0800 |
commit | 258401a60c4df39332f30ef57afbc6dbf29a7e84 (patch) | |
tree | a397beebf23182f77ca6e688681d79eb3181308a /mm | |
parent | 4db0e950c5b78586bea9e1b027be849631f89a17 (diff) |
mm: don't wait on congested zones in balance_pgdat()
From: Zlatko Calusic <zlatko.calusic@iskon.hr>
Commit 92df3a723f84 ("mm: vmscan: throttle reclaim if encountering too
many dirty pages under writeback") introduced waiting on congested zones
based on a sane algorithm in shrink_inactive_list().
What this means is that there's no more need for throttling and
additional heuristics in balance_pgdat(). So, let's remove it and tidy
up the code.
Signed-off-by: Zlatko Calusic <zlatko.calusic@iskon.hr>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 29 | ||||
-rw-r--r-- | mm/vmstat.c | 1 |
2 files changed, 1 insertions, 29 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 8fde2fc223d9..b93968b71dc6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2617,7 +2617,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, int *classzone_idx) { bool pgdat_is_balanced = false; - struct zone *unbalanced_zone; int i; int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long total_scanned; @@ -2648,9 +2647,6 @@ loop_again: do { unsigned long lru_pages = 0; - int has_under_min_watermark_zone = 0; - - unbalanced_zone = NULL; /* * Scan in the highmem->dma direction for the highest @@ -2790,17 +2786,7 @@ loop_again: continue; } - if (!zone_balanced(zone, testorder, 0, end_zone)) { - unbalanced_zone = zone; - /* - * We are still under min water mark. This - * means that we have a GFP_ATOMIC allocation - * failure risk. Hurry up! - */ - if (!zone_watermark_ok_safe(zone, order, - min_wmark_pages(zone), end_zone, 0)) - has_under_min_watermark_zone = 1; - } else { + if (zone_balanced(zone, testorder, 0, end_zone)) /* * If a zone reaches its high watermark, * consider it to be no longer congested. It's @@ -2809,8 +2795,6 @@ loop_again: * speculatively avoid congestion waits */ zone_clear_flag(zone, ZONE_CONGESTED); - } - } /* @@ -2828,17 +2812,6 @@ loop_again: } /* - * OK, kswapd is getting into trouble. Take a nap, then take - * another pass across the zones. - */ - if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) { - if (has_under_min_watermark_zone) - count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); - else if (unbalanced_zone) - wait_iff_congested(unbalanced_zone, BLK_RW_ASYNC, HZ/10); - } - - /* * We do this so kswapd doesn't build up large priorities for * example when it is freeing in parallel with allocators. It * matches the direct reclaim path behaviour in terms of impact diff --git a/mm/vmstat.c b/mm/vmstat.c index c9d1f68120cd..57f02fd1768b 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -770,7 +770,6 @@ const char * const vmstat_text[] = { "kswapd_inodesteal", "kswapd_low_wmark_hit_quickly", "kswapd_high_wmark_hit_quickly", - "kswapd_skip_congestion_wait", "pageoutrun", "allocstall", |