mm, mem-hotplug: recalculate lowmem_reserve when memory hotplug occurs

Currently, memory hotplug calls setup_per_zone_wmarks() and
calculate_zone_inactive_ratio(), but doesn't call
setup_per_zone_lowmem_reserve().

It means the number of reserved pages aren't updated even if memory hot
plug occur.  This patch fixes it.

Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KOSAKI Motohiro 2011-05-24 17:11:32 -07:00 committed by Linus Torvalds
parent 839a4fcc8a
commit 1b79acc911
3 changed files with 8 additions and 7 deletions

View file

@ -1381,7 +1381,7 @@ extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, extern void memmap_init_zone(unsigned long, int, unsigned long,
unsigned long, enum memmap_context); unsigned long, enum memmap_context);
extern void setup_per_zone_wmarks(void); extern void setup_per_zone_wmarks(void);
extern void calculate_zone_inactive_ratio(struct zone *zone); extern int __meminit init_per_zone_wmark_min(void);
extern void mem_init(void); extern void mem_init(void);
extern void __init mmap_init(void); extern void __init mmap_init(void);
extern void show_mem(unsigned int flags); extern void show_mem(unsigned int flags);

View file

@ -459,8 +459,9 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
zone_pcp_update(zone); zone_pcp_update(zone);
mutex_unlock(&zonelists_mutex); mutex_unlock(&zonelists_mutex);
setup_per_zone_wmarks();
calculate_zone_inactive_ratio(zone); init_per_zone_wmark_min();
if (onlined_pages) { if (onlined_pages) {
kswapd_run(zone_to_nid(zone)); kswapd_run(zone_to_nid(zone));
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY); node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
@ -893,8 +894,8 @@ repeat:
zone->zone_pgdat->node_present_pages -= offlined_pages; zone->zone_pgdat->node_present_pages -= offlined_pages;
totalram_pages -= offlined_pages; totalram_pages -= offlined_pages;
setup_per_zone_wmarks(); init_per_zone_wmark_min();
calculate_zone_inactive_ratio(zone);
if (!node_present_pages(node)) { if (!node_present_pages(node)) {
node_clear_state(node, N_HIGH_MEMORY); node_clear_state(node, N_HIGH_MEMORY);
kswapd_stop(node); kswapd_stop(node);

View file

@ -5094,7 +5094,7 @@ void setup_per_zone_wmarks(void)
* 1TB 101 10GB * 1TB 101 10GB
* 10TB 320 32GB * 10TB 320 32GB
*/ */
void __meminit calculate_zone_inactive_ratio(struct zone *zone) static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
{ {
unsigned int gb, ratio; unsigned int gb, ratio;
@ -5140,7 +5140,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
* 8192MB: 11584k * 8192MB: 11584k
* 16384MB: 16384k * 16384MB: 16384k
*/ */
static int __init init_per_zone_wmark_min(void) int __meminit init_per_zone_wmark_min(void)
{ {
unsigned long lowmem_kbytes; unsigned long lowmem_kbytes;