android_kernel_motorola_sm6225/include/linux/oom.h
Michal Hocko 98748bd722 oom: consider multi-threaded tasks in task_will_free_mem
task_will_free_mem is a misnomer for a more complex PF_EXITING test for
early break out from the oom killer because it is believed that such a
task would release its memory shortly and so we do not have to select an
oom victim and perform a disruptive action.

Currently we make sure that the given task is not participating in the
core dumping because it might get blocked for a long time - see commit
d003f371b2 ("oom: don't assume that a coredumping thread will exit
soon").

The check can still do better though.  We shouldn't consider the task
unless the whole thread group is going down.  This is rather unlikely
but not impossible.  A single exiting thread would surely leave all the
address space behind.  If we are really unlucky it might get stuck on
the exit path and keep its TIF_MEMDIE and so block the oom killer.

Link: http://lkml.kernel.org/r/1460452756-15491-1-git-send-email-mhocko@kernel.org
Signed-off-by: Michal Hocko <mhocko@suse.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-20 17:58:30 -07:00

137 lines
3.5 KiB
C

#ifndef __INCLUDE_LINUX_OOM_H
#define __INCLUDE_LINUX_OOM_H
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/nodemask.h>
#include <uapi/linux/oom.h>
struct zonelist;
struct notifier_block;
struct mem_cgroup;
struct task_struct;
/*
* Details of the page allocation that triggered the oom killer that are used to
* determine what should be killed.
*/
struct oom_control {
/* Used to determine cpuset */
struct zonelist *zonelist;
/* Used to determine mempolicy */
nodemask_t *nodemask;
/* Used to determine cpuset and node locality requirement */
const gfp_t gfp_mask;
/*
* order == -1 means the oom kill is required by sysrq, otherwise only
* for display purposes.
*/
const int order;
};
/*
* Types of limitations to the nodes from which allocations may occur
*/
enum oom_constraint {
CONSTRAINT_NONE,
CONSTRAINT_CPUSET,
CONSTRAINT_MEMORY_POLICY,
CONSTRAINT_MEMCG,
};
enum oom_scan_t {
OOM_SCAN_OK, /* scan thread and find its badness */
OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
OOM_SCAN_ABORT, /* abort the iteration and return */
OOM_SCAN_SELECT, /* always select this thread first */
};
/* Thread is the potential origin of an oom condition; kill first on oom */
#define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
extern struct mutex oom_lock;
static inline void set_current_oom_origin(void)
{
current->signal->oom_flags |= OOM_FLAG_ORIGIN;
}
static inline void clear_current_oom_origin(void)
{
current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
}
static inline bool oom_task_origin(const struct task_struct *p)
{
return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
}
extern void mark_oom_victim(struct task_struct *tsk);
#ifdef CONFIG_MMU
extern void try_oom_reaper(struct task_struct *tsk);
#else
static inline void try_oom_reaper(struct task_struct *tsk)
{
}
#endif
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
unsigned long totalpages);
extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
unsigned int points, unsigned long totalpages,
struct mem_cgroup *memcg, const char *message);
extern void check_panic_on_oom(struct oom_control *oc,
enum oom_constraint constraint,
struct mem_cgroup *memcg);
extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
struct task_struct *task, unsigned long totalpages);
extern bool out_of_memory(struct oom_control *oc);
extern void exit_oom_victim(struct task_struct *tsk);
extern int register_oom_notifier(struct notifier_block *nb);
extern int unregister_oom_notifier(struct notifier_block *nb);
extern bool oom_killer_disabled;
extern bool oom_killer_disable(void);
extern void oom_killer_enable(void);
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
static inline bool task_will_free_mem(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
/*
* A coredumping process may sleep for an extended period in exit_mm(),
* so the oom killer cannot assume that the process will promptly exit
* and release memory.
*/
if (sig->flags & SIGNAL_GROUP_COREDUMP)
return false;
if (!(task->flags & PF_EXITING))
return false;
/* Make sure that the whole thread group is going down */
if (!thread_group_empty(task) && !(sig->flags & SIGNAL_GROUP_EXIT))
return false;
return true;
}
/* sysctls */
extern int sysctl_oom_dump_tasks;
extern int sysctl_oom_kill_allocating_task;
extern int sysctl_panic_on_oom;
#endif /* _INCLUDE_LINUX_OOM_H */