ANDROID: power: wakeup_reason: wake reason enhancements

These changes build upon the existing Android kernel wakeup reason code
to:
* improve the positioning of suspend abort logging calls in suspend flow
* add logging of abnormal wakeup reasons like unexpected HW IRQs and
  IRQs configured as both wake-enabled and no-suspend
* add support for capturing deferred-processing threaded nested IRQs as
  wakeup reasons rather than their synchronously-processed parents

Bug: 150970830
Bug: 140217217

Signed-off-by: Kelly Rossmoyer <krossmo@google.com>
Change-Id: I903b811a0fe11a605a25815c3a341668a23de700
This commit is contained in:
Kelly Rossmoyer 2020-04-07 12:25:33 -07:00 committed by Alistair Delva
parent dba8725f4c
commit e7b509cf04
8 changed files with 368 additions and 133 deletions

View file

@ -1363,6 +1363,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
dev_name(dev), callback, error);
goto Complete;
}
@ -1577,6 +1579,8 @@ Run:
error = dpm_run_callback(callback, dev, state, info);
if (error) {
async_error = error;
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
dev_name(dev), callback, error);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@ -1746,7 +1750,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
@ -1770,9 +1773,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
dev->power.direct_complete = false;
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
async_error = -EBUSY;
goto Complete;
}
@ -1847,6 +1847,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_propagate_wakeup_to_parent(dev);
dpm_clear_superiors_direct_complete(dev);
} else {
log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
dev_name(dev), callback, error);
}
device_unlock(dev);
@ -2061,6 +2064,8 @@ int dpm_prepare(pm_message_t state)
printk(KERN_INFO "PM: Device %s not prepared "
"for power transition: code %d\n",
dev_name(dev), error);
log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
dev_name(dev), error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;

View file

@ -15,7 +15,9 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/wakeup_reason.h>
#include <trace/events/power.h>
#include "power.h"
@ -883,6 +885,7 @@ bool pm_wakeup_pending(void)
{
unsigned long flags;
bool ret = false;
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
raw_spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
@ -895,8 +898,10 @@ bool pm_wakeup_pending(void)
raw_spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
pr_debug("PM: Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
pr_info("PM: %s\n", suspend_abort);
}
return ret || atomic_read(&pm_abort_suspend) > 0;
@ -924,6 +929,18 @@ void pm_wakeup_clear(bool reset)
void pm_system_irq_wakeup(unsigned int irq_number)
{
if (pm_wakeup_irq == 0) {
struct irq_desc *desc;
const char *name = "null";
desc = irq_to_desc(irq_number);
if (desc == NULL)
name = "stray irq";
else if (desc->action && desc->action->name)
name = desc->action->name;
log_irq_wakeup_reason(irq_number);
pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
pm_wakeup_irq = irq_number;
pm_system_wakeup();
}

View file

@ -28,6 +28,8 @@
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/wakeup_reason.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
@ -362,6 +364,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
log_abnormal_wakeup_reason(
"unexpected HW IRQ %u", irqnr);
if (static_branch_likely(&supports_deactivate_key)) {
if (irqnr < 8192)
gic_write_dir(irqnr);

View file

@ -20,11 +20,18 @@
#define MAX_SUSPEND_ABORT_LEN 256
void log_wakeup_reason(int irq);
#ifdef CONFIG_SUSPEND
void log_irq_wakeup_reason(int irq);
void log_threaded_irq_wakeup_reason(int irq, int parent_irq);
void log_suspend_abort_reason(const char *fmt, ...);
void log_abnormal_wakeup_reason(const char *fmt, ...);
void clear_wakeup_reasons(void);
#else
static inline void log_irq_wakeup_reason(int irq) { }
static inline void log_threaded_irq_wakeup_reason(int irq, int parent_irq) { }
static inline void log_suspend_abort_reason(const char *fmt, ...) { }
static inline void log_abnormal_wakeup_reason(const char *fmt, ...) { }
static inline void clear_wakeup_reasons(void) { }
#endif
#endif /* _LINUX_WAKEUP_REASON_H */

View file

@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
#include <linux/wakeup_reason.h>
#include <trace/events/irq.h>
@ -507,8 +508,22 @@ static bool irq_may_run(struct irq_desc *desc)
* If the interrupt is not in progress and is not an armed
* wakeup interrupt, proceed.
*/
if (!irqd_has_set(&desc->irq_data, mask))
if (!irqd_has_set(&desc->irq_data, mask)) {
#ifdef CONFIG_PM_SLEEP
if (unlikely(desc->no_suspend_depth &&
irqd_is_wakeup_set(&desc->irq_data))) {
unsigned int irq = irq_desc_get_irq(desc);
const char *name = "(unnamed)";
if (desc->action && desc->action->name)
name = desc->action->name;
log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
irq, name);
}
#endif
return true;
}
/*
* If the interrupt is an armed wakeup source, mark it pending

View file

@ -22,7 +22,6 @@
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/cpuset.h>
#include <linux/wakeup_reason.h>
/*
* Timeout for stopping processes
@ -39,9 +38,6 @@ static int try_to_freeze_tasks(bool user_only)
unsigned int elapsed_msecs;
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
#ifdef CONFIG_PM_SLEEP
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
#endif
start = ktime_get_boottime();
@ -71,11 +67,6 @@ static int try_to_freeze_tasks(bool user_only)
break;
if (pm_wakeup_pending()) {
#ifdef CONFIG_PM_SLEEP
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
#endif
wakeup = true;
break;
}

View file

@ -155,6 +155,7 @@ static void s2idle_loop(void)
break;
pm_wakeup_clear(false);
clear_wakeup_reasons();
}
pm_pr_dbg("resume from suspend-to-idle\n");
@ -368,6 +369,7 @@ static int suspend_prepare(suspend_state_t state)
if (!error)
return 0;
log_suspend_abort_reason("One or more tasks refusing to freeze");
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
@ -397,7 +399,6 @@ void __weak arch_suspend_enable_irqs(void)
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
char suspend_abort[MAX_SUSPEND_ABORT_LEN];
int error, last_dev;
error = platform_suspend_prepare(state);
@ -409,8 +410,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
pr_err("late suspend of devices failed\n");
log_suspend_abort_reason("%s device failed to power down",
suspend_stats.failed_devs[last_dev]);
log_suspend_abort_reason("late suspend of %s device failed",
suspend_stats.failed_devs[last_dev]);
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
@ -428,7 +429,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
last_dev %= REC_FAILED_NUM;
pr_err("noirq suspend of devices failed\n");
log_suspend_abort_reason("noirq suspend of %s device failed",
suspend_stats.failed_devs[last_dev]);
suspend_stats.failed_devs[last_dev]);
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
@ -459,9 +460,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
trace_suspend_resume(TPS("machine_suspend"),
state, false);
} else if (*wakeup) {
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
error = -EBUSY;
}
syscore_resume();
@ -496,7 +494,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error, last_dev;
int error;
bool wakeup = false;
if (!sleep_state_supported(state))
@ -512,11 +510,9 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
pr_err("Some devices failed to suspend, or early wake event detected\n");
log_suspend_abort_reason("%s device failed to suspend, or early wake event detected",
suspend_stats.failed_devs[last_dev]);
log_suspend_abort_reason(
"Some devices failed to suspend, or early wake event detected");
goto Recover_platform;
}
suspend_test_finish("suspend devices");

View file

@ -4,7 +4,7 @@
* Logs the reasons which caused the kernel to resume from
* the suspend mode.
*
* Copyright (C) 2014 Google, Inc.
* Copyright (C) 2020 Google, Inc.
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@ -26,71 +26,312 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <linux/slab.h>
/*
* struct wakeup_irq_node - stores data and relationships for IRQs logged as
* either base or nested wakeup reasons during suspend/resume flow.
* @siblings - for membership on leaf or parent IRQ lists
* @irq - the IRQ number
* @irq_name - the name associated with the IRQ, or a default if none
*/
struct wakeup_irq_node {
struct list_head siblings;
int irq;
const char *irq_name;
};
#define MAX_WAKEUP_REASON_IRQS 32
static int irq_list[MAX_WAKEUP_REASON_IRQS];
static int irqcount;
static DEFINE_SPINLOCK(wakeup_reason_lock);
static LIST_HEAD(leaf_irqs); /* kept in ascending IRQ sorted order */
static LIST_HEAD(parent_irqs); /* unordered */
static struct kmem_cache *wakeup_irq_nodes_cache;
static const char *default_irq_name = "(unnamed)";
static struct kobject *kobj;
static bool capture_reasons;
static bool suspend_abort;
static char abort_reason[MAX_SUSPEND_ABORT_LEN];
static struct kobject *wakeup_reason;
static spinlock_t resume_reason_lock;
static bool abnormal_wake;
static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
static ktime_t last_monotime; /* monotonic time before last suspend */
static ktime_t curr_monotime; /* monotonic time after last suspend */
static ktime_t last_stime; /* monotonic boottime offset before last suspend */
static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
static void init_node(struct wakeup_irq_node *p, int irq)
{
int irq_no, buf_offset = 0;
struct irq_desc *desc;
unsigned long flags;
spin_lock_irqsave(&resume_reason_lock, flags);
if (suspend_abort) {
buf_offset = sprintf(buf, "Abort: %s", abort_reason);
} else {
for (irq_no = 0; irq_no < irqcount; irq_no++) {
desc = irq_to_desc(irq_list[irq_no]);
if (desc && desc->action && desc->action->name)
buf_offset += sprintf(buf + buf_offset, "%d %s\n",
irq_list[irq_no], desc->action->name);
INIT_LIST_HEAD(&p->siblings);
p->irq = irq;
desc = irq_to_desc(irq);
if (desc && desc->action && desc->action->name)
p->irq_name = desc->action->name;
else
p->irq_name = default_irq_name;
}
static struct wakeup_irq_node *create_node(int irq)
{
struct wakeup_irq_node *result;
result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
if (unlikely(!result))
pr_warn("Failed to log wakeup IRQ %d\n", irq);
else
init_node(result, irq);
return result;
}
static void delete_list(struct list_head *head)
{
struct wakeup_irq_node *n;
while (!list_empty(head)) {
n = list_first_entry(head, struct wakeup_irq_node, siblings);
list_del(&n->siblings);
kmem_cache_free(wakeup_irq_nodes_cache, n);
}
}
static bool add_sibling_node_sorted(struct list_head *head, int irq)
{
struct wakeup_irq_node *n;
struct list_head *predecessor = head;
if (unlikely(WARN_ON(!head)))
return NULL;
if (!list_empty(head))
list_for_each_entry(n, head, siblings) {
if (n->irq < irq)
predecessor = &n->siblings;
else if (n->irq == irq)
return true;
else
buf_offset += sprintf(buf + buf_offset, "%d\n",
irq_list[irq_no]);
break;
}
n = create_node(irq);
if (n) {
list_add(&n->siblings, predecessor);
return true;
}
return false;
}
static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
int irq)
{
struct wakeup_irq_node *n;
if (unlikely(WARN_ON(!head)))
return NULL;
list_for_each_entry(n, head, siblings)
if (n->irq == irq)
return n;
return NULL;
}
void log_irq_wakeup_reason(int irq)
{
unsigned long flags;
spin_lock_irqsave(&wakeup_reason_lock, flags);
if (!capture_reasons) {
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
return;
}
if (find_node_in_list(&parent_irqs, irq) == NULL)
add_sibling_node_sorted(&leaf_irqs, irq);
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
}
void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
{
struct wakeup_irq_node *parent;
unsigned long flags;
/*
* Intentionally unsynchronized. Calls that come in after we have
* resumed should have a fast exit path since there's no work to be
* done, any any coherence issue that could cause a wrong value here is
* both highly improbable - given the set/clear timing - and very low
* impact (parent IRQ gets logged instead of the specific child).
*/
if (!capture_reasons)
return;
spin_lock_irqsave(&wakeup_reason_lock, flags);
if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
return;
}
parent = find_node_in_list(&parent_irqs, parent_irq);
if (parent != NULL)
add_sibling_node_sorted(&leaf_irqs, irq);
else {
parent = find_node_in_list(&leaf_irqs, parent_irq);
if (parent != NULL) {
list_del_init(&parent->siblings);
list_add_tail(&parent->siblings, &parent_irqs);
add_sibling_node_sorted(&leaf_irqs, irq);
}
}
spin_unlock_irqrestore(&resume_reason_lock, flags);
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
}
void __log_abort_or_abnormal_wake(bool abort, const char *fmt, va_list args)
{
unsigned long flags;
spin_lock_irqsave(&wakeup_reason_lock, flags);
/* Suspend abort or abnormal wake reason has already been logged. */
if (suspend_abort || abnormal_wake) {
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
return;
}
suspend_abort = abort;
abnormal_wake = !abort;
vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
}
void log_suspend_abort_reason(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
__log_abort_or_abnormal_wake(true, fmt, args);
va_end(args);
}
void log_abnormal_wakeup_reason(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
__log_abort_or_abnormal_wake(false, fmt, args);
va_end(args);
}
void clear_wakeup_reasons(void)
{
unsigned long flags;
spin_lock_irqsave(&wakeup_reason_lock, flags);
delete_list(&leaf_irqs);
delete_list(&parent_irqs);
suspend_abort = false;
abnormal_wake = false;
capture_reasons = true;
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
}
static void print_wakeup_sources(void)
{
struct wakeup_irq_node *n;
unsigned long flags;
spin_lock_irqsave(&wakeup_reason_lock, flags);
capture_reasons = false;
if (suspend_abort) {
pr_info("Abort: %s\n", non_irq_wake_reason);
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
return;
}
if (!list_empty(&leaf_irqs))
list_for_each_entry(n, &leaf_irqs, siblings)
pr_info("Resume caused by IRQ %d, %s\n", n->irq,
n->irq_name);
else if (abnormal_wake)
pr_info("Resume caused by %s\n", non_irq_wake_reason);
else
pr_info("Resume cause unknown\n");
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
}
static ssize_t last_resume_reason_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
ssize_t buf_offset = 0;
struct wakeup_irq_node *n;
unsigned long flags;
spin_lock_irqsave(&wakeup_reason_lock, flags);
if (suspend_abort) {
buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
non_irq_wake_reason);
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
return buf_offset;
}
if (!list_empty(&leaf_irqs))
list_for_each_entry(n, &leaf_irqs, siblings)
buf_offset += scnprintf(buf + buf_offset,
PAGE_SIZE - buf_offset,
"%d %s\n", n->irq, n->irq_name);
else if (abnormal_wake)
buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
non_irq_wake_reason);
spin_unlock_irqrestore(&wakeup_reason_lock, flags);
return buf_offset;
}
static ssize_t last_suspend_time_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct timespec sleep_time;
struct timespec total_time;
struct timespec suspend_resume_time;
struct timespec64 sleep_time;
struct timespec64 total_time;
struct timespec64 suspend_resume_time;
/*
* total_time is calculated from monotonic bootoffsets because
* unlike CLOCK_MONOTONIC it include the time spent in suspend state.
*/
total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
/*
* suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
* time interval before entering suspend and post suspend.
*/
suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
suspend_resume_time =
ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
/* sleep_time = total_time - suspend_resume_time */
sleep_time = timespec_sub(total_time, suspend_resume_time);
sleep_time = timespec64_sub(total_time, suspend_resume_time);
/* Export suspend_resume_time and sleep_time in pair here. */
return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
sleep_time.tv_sec, sleep_time.tv_nsec);
return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
sleep_time.tv_sec, sleep_time.tv_nsec);
}
static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
@ -105,74 +346,24 @@ static struct attribute_group attr_group = {
.attrs = attrs,
};
/*
* logs all the wake up reasons to the kernel
* stores the irqs to expose them to the userspace via sysfs
*/
void log_wakeup_reason(int irq)
{
struct irq_desc *desc;
unsigned long flags;
desc = irq_to_desc(irq);
if (desc && desc->action && desc->action->name)
printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
desc->action->name);
else
printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
spin_lock_irqsave(&resume_reason_lock, flags);
if (irqcount == MAX_WAKEUP_REASON_IRQS) {
spin_unlock_irqrestore(&resume_reason_lock, flags);
printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
MAX_WAKEUP_REASON_IRQS);
return;
}
irq_list[irqcount++] = irq;
spin_unlock_irqrestore(&resume_reason_lock, flags);
}
void log_suspend_abort_reason(const char *fmt, ...)
{
unsigned long flags;
va_list args;
spin_lock_irqsave(&resume_reason_lock, flags);
//Suspend abort reason has already been logged.
if (suspend_abort) {
spin_unlock_irqrestore(&resume_reason_lock, flags);
return;
}
suspend_abort = true;
va_start(args, fmt);
vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
va_end(args);
spin_unlock_irqrestore(&resume_reason_lock, flags);
}
/* Detects a suspend and clears all the previous wake up reasons*/
static int wakeup_reason_pm_event(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
unsigned long flags;
switch (pm_event) {
case PM_SUSPEND_PREPARE:
spin_lock_irqsave(&resume_reason_lock, flags);
irqcount = 0;
suspend_abort = false;
spin_unlock_irqrestore(&resume_reason_lock, flags);
/* monotonic time since boot */
last_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
last_stime = ktime_get_boottime();
clear_wakeup_reasons();
break;
case PM_POST_SUSPEND:
/* monotonic time since boot */
curr_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
curr_stime = ktime_get_boottime();
print_wakeup_sources();
break;
default:
break;
@ -184,31 +375,40 @@ static struct notifier_block wakeup_reason_pm_notifier_block = {
.notifier_call = wakeup_reason_pm_event,
};
/* Initializes the sysfs parameter
* registers the pm_event notifier
*/
int __init wakeup_reason_init(void)
{
int retval;
spin_lock_init(&resume_reason_lock);
retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
if (retval)
printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
__func__, retval);
if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
pr_warn("[%s] failed to register PM notifier\n", __func__);
goto fail;
}
wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
if (!wakeup_reason) {
printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
__func__);
return 1;
kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
if (!kobj) {
pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
goto fail_unregister_pm_notifier;
}
retval = sysfs_create_group(wakeup_reason, &attr_group);
if (retval) {
kobject_put(wakeup_reason);
printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
__func__, retval);
if (sysfs_create_group(kobj, &attr_group)) {
pr_warn("[%s] failed to create a sysfs group\n", __func__);
goto fail_kobject_put;
}
wakeup_irq_nodes_cache =
kmem_cache_create("wakeup_irq_node_cache",
sizeof(struct wakeup_irq_node), 0, 0, NULL);
if (!wakeup_irq_nodes_cache)
goto fail_remove_group;
return 0;
fail_remove_group:
sysfs_remove_group(kobj, &attr_group);
fail_kobject_put:
kobject_put(kobj);
fail_unregister_pm_notifier:
unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
fail:
return 1;
}
late_initcall(wakeup_reason_init);