memcg: revert gfp mask fix

My patch, memcg-fix-gfp_mask-of-callers-of-charge.patch changed gfp_mask
of callers of charge to be GFP_HIGHUSER_MOVABLE for showing what will
happen at memory reclaim.

But in recent discussion, it's NACKed because it sounds ugly.

This patch is for reverting it and add some clean up to gfp_mask of
callers of charge.  No behavior change but need review before generating
HUNK in deep queue.

This patch also adds explanation to meaning of gfp_mask passed to charge
functions in memcontrol.h.

Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
KAMEZAWA Hiroyuki 2009-01-07 18:08:10 -08:00 committed by Linus Torvalds
parent 887007561a
commit 2c26fdd70c
6 changed files with 25 additions and 18 deletions

View file

@ -26,6 +26,16 @@ struct page;
struct mm_struct;
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
/*
* All "charge" functions with gfp_mask should use GFP_KERNEL or
* (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
* alloc memory but reclaims memory from all available zones. So, "where I want
* memory from" bits of gfp_mask has no meaning. So any bits of that field is
* available but adding a rule is better. charge functions' gfp_mask should
* be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
* codes.
* (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
*/
extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask);

View file

@ -460,7 +460,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
VM_BUG_ON(!PageLocked(page));
error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & ~__GFP_HIGHMEM);
gfp_mask & GFP_RECLAIM_MASK);
if (error)
goto out;

View file

@ -1248,7 +1248,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
unlock_page_cgroup(pc);
if (mem) {
ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
css_put(&mem->css);
}
*ptr = mem;
@ -1378,7 +1378,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
break;
progress = try_to_free_mem_cgroup_pages(memcg,
GFP_HIGHUSER_MOVABLE, false);
GFP_KERNEL, false);
if (!progress) retry_count--;
}
return ret;
@ -1418,7 +1418,7 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
break;
oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
if (curusage >= oldusage)
retry_count--;
@ -1464,7 +1464,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
}
spin_unlock_irqrestore(&zone->lru_lock, flags);
ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
if (ret == -ENOMEM)
break;
@ -1550,7 +1550,7 @@ try_to_free:
goto out;
}
progress = try_to_free_mem_cgroup_pages(mem,
GFP_HIGHUSER_MOVABLE, false);
GFP_KERNEL, false);
if (!progress) {
nr_retries--;
/* maybe some writeback is necessary */

View file

@ -2000,7 +2000,7 @@ gotten:
cow_user_page(new_page, old_page, address, vma);
__SetPageUptodate(new_page);
if (mem_cgroup_newpage_charge(new_page, mm, GFP_HIGHUSER_MOVABLE))
if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
goto oom_free_new;
/*
@ -2431,8 +2431,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (mem_cgroup_try_charge_swapin(mm, page,
GFP_HIGHUSER_MOVABLE, &ptr) == -ENOMEM) {
if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
ret = VM_FAULT_OOM;
unlock_page(page);
goto out;
@ -2524,7 +2523,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto oom;
__SetPageUptodate(page);
if (mem_cgroup_newpage_charge(page, mm, GFP_HIGHUSER_MOVABLE))
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
goto oom_free_page;
entry = mk_pte(page, vma->vm_page_prot);
@ -2615,8 +2614,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
if (mem_cgroup_newpage_charge(page,
mm, GFP_HIGHUSER_MOVABLE)) {
if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
ret = VM_FAULT_OOM;
page_cache_release(page);
goto out;

View file

@ -932,8 +932,8 @@ found:
* Charge page using GFP_HIGHUSER_MOVABLE while we can wait.
* charged back to the user(not to caller) when swap account is used.
*/
error = mem_cgroup_cache_charge_swapin(page,
current->mm, GFP_HIGHUSER_MOVABLE, true);
error = mem_cgroup_cache_charge_swapin(page, current->mm, GFP_KERNEL,
true);
if (error)
goto out;
error = radix_tree_preload(GFP_KERNEL);
@ -1275,7 +1275,7 @@ repeat:
* charge against this swap cache here.
*/
if (mem_cgroup_cache_charge_swapin(swappage,
current->mm, gfp, false)) {
current->mm, gfp & GFP_RECLAIM_MASK, false)) {
page_cache_release(swappage);
error = -ENOMEM;
goto failed;
@ -1393,7 +1393,7 @@ repeat:
/* Precharge page while we can wait, compensate after */
error = mem_cgroup_cache_charge(filepage, current->mm,
GFP_HIGHUSER_MOVABLE);
GFP_KERNEL);
if (error) {
page_cache_release(filepage);
shmem_unacct_blocks(info->flags, 1);

View file

@ -698,8 +698,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
pte_t *pte;
int ret = 1;
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page,
GFP_HIGHUSER_MOVABLE, &ptr))
if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr))
ret = -ENOMEM;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);