android_kernel_motorola_sm6225/arch/x86/kernel/reboot.c

733 lines
19 KiB
C
Raw Normal View History

#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/efi.h>
#include <acpi/reboot.h>
#include <asm/io.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include <asm/hpet.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
#include <asm/reboot_fixups.h>
#include <asm/reboot.h>
#include <asm/pci_x86.h>
#include <asm/virtext.h>
#include <asm/cpu.h>
#ifdef CONFIG_X86_32
# include <linux/dmi.h>
# include <linux/ctype.h>
# include <linux/mc146818rtc.h>
#else
# include <asm/iommu.h>
#endif
/*
* Power off function, if any
*/
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static const struct desc_ptr no_idt = {};
static int reboot_mode;
enum reboot_type reboot_type = BOOT_KBD;
int reboot_force;
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
static int reboot_cpu = -1;
#endif
/* This is set if we need to go through the 'emergency' path.
* When machine_emergency_restart() is called, we may be on
* an inconsistent state and won't be able to do a clean cleanup
*/
static int reboot_emergency;
/* This is set by the PCI code if either type 1 or type 2 PCI is detected */
bool port_cf9_safe = false;
/* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
warm Don't set the cold reboot flag
cold Set the cold reboot flag
bios Reboot by jumping through the BIOS (only for X86_32)
smp Reboot by executing reset on BSP or other CPU (only for X86_32)
triple Force a triple fault (init)
kbd Use the keyboard controller. cold reset (default)
acpi Use the RESET_REG in the FADT
efi Use efi reset_system runtime service
pci Use the so-called "PCI reset register", CF9
force Avoid anything that could hang.
*/
static int __init reboot_setup(char *str)
{
for (;;) {
switch (*str) {
case 'w':
reboot_mode = 0x1234;
break;
case 'c':
reboot_mode = 0;
break;
#ifdef CONFIG_X86_32
#ifdef CONFIG_SMP
case 's':
if (isdigit(*(str+1))) {
reboot_cpu = (int) (*(str+1) - '0');
if (isdigit(*(str+2)))
reboot_cpu = reboot_cpu*10 + (int)(*(str+2) - '0');
}
/* we will leave sorting out the final value
when we are ready to reboot, since we might not
have set up boot_cpu_id or smp_num_cpu */
break;
#endif /* CONFIG_SMP */
case 'b':
#endif
case 'a':
case 'k':
case 't':
case 'e':
case 'p':
reboot_type = *str;
break;
case 'f':
reboot_force = 1;
break;
}
str = strchr(str, ',');
if (str)
str++;
else
break;
}
return 1;
}
__setup("reboot=", reboot_setup);
#ifdef CONFIG_X86_32
/*
* Reboot options and system auto-detection code provided by
* Dell Inc. so their systems "just work". :-)
*/
/*
* Some machines require the "reboot=b" commandline option,
* this quirk makes that automatic.
*/
static int __init set_bios_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_BIOS) {
reboot_type = BOOT_BIOS;
printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
}
return 0;
}
static struct dmi_system_id __initdata reboot_dmi_table[] = {
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
.ident = "Dell E520",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell DM061"),
},
},
{ /* Handle problems with rebooting on Dell 1300's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 1300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1300/"),
},
},
{ /* Handle problems with rebooting on Dell 300's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 300",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 300/"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 745's SFF*/
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 745's DFF*/
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
DMI_MATCH(DMI_BOARD_NAME, "0MM599"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 745 with 0KW626 */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 745",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 745"),
DMI_MATCH(DMI_BOARD_NAME, "0KW626"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 330 with 0KP561 */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 330",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 330"),
DMI_MATCH(DMI_BOARD_NAME, "0KP561"),
},
},
{ /* Handle problems with rebooting on Dell Optiplex 360 with 0T656F */
.callback = set_bios_reboot,
.ident = "Dell OptiPlex 360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 360"),
DMI_MATCH(DMI_BOARD_NAME, "0T656F"),
},
},
{ /* Handle problems with rebooting on Dell 2400's */
.callback = set_bios_reboot,
.ident = "Dell PowerEdge 2400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2400"),
},
},
{ /* Handle problems with rebooting on Dell T5400's */
.callback = set_bios_reboot,
.ident = "Dell Precision T5400",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision WorkStation T5400"),
},
},
{ /* Handle problems with rebooting on HP laptops */
.callback = set_bios_reboot,
.ident = "HP Compaq Laptop",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"),
},
},
{ /* Handle problems with rebooting on Dell XPS710 */
.callback = set_bios_reboot,
.ident = "Dell XPS710",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
},
},
{ /* Handle problems with rebooting on Dell DXP061 */
.callback = set_bios_reboot,
.ident = "Dell DXP061",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Dell DXP061"),
},
},
{ /* Handle problems with rebooting on Sony VGN-Z540N */
.callback = set_bios_reboot,
.ident = "Sony VGN-Z540N",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
},
},
{ }
};
static int __init reboot_init(void)
{
dmi_check_system(reboot_dmi_table);
return 0;
}
core_initcall(reboot_init);
/* The following code and data reboots the machine by switching to real
mode and jumping to the BIOS reset entry point, as if the CPU has
really been reset. The previous version asked the keyboard
controller to pulse the CPU reset line, which is more thorough, but
doesn't work with at least one type of 486 motherboard. It is easy
to stop this code working; hence the copious comments. */
static const unsigned long long
real_mode_gdt_entries [3] =
{
0x0000000000000000ULL, /* Null descriptor */
0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
};
static const struct desc_ptr
real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, (long)real_mode_gdt_entries },
real_mode_idt = { 0x3ff, 0 };
/* This is 16-bit protected mode code to disable paging and the cache,
switch to real mode and jump to the BIOS reset code.
The instruction that switches to real mode by writing to CR0 must be
followed immediately by a far jump instruction, which set CS to a
valid value for real mode, and flushes the prefetch queue to avoid
running instructions that have already been decoded in protected
mode.
Clears all the flags except ET, especially PG (paging), PE
(protected-mode enable) and TS (task switch for coprocessor state
save). Flushes the TLB after paging has been disabled. Sets CD and
NW, to disable the cache on a 486, and invalidates the cache. This
is more like the state of a 486 after reset. I don't know if
something else should be done for other chips.
More could be done here to set up the registers as if a CPU reset had
occurred; hopefully real BIOSs don't assume much. */
static const unsigned char real_mode_switch [] =
{
0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */
0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */
0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */
0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */
0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */
0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */
0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */
0x74, 0x02, /* jz f */
0x0f, 0x09, /* wbinvd */
0x24, 0x10, /* f: andb $0x10,al */
0x66, 0x0f, 0x22, 0xc0 /* movl %eax,%cr0 */
};
static const unsigned char jump_to_bios [] =
{
0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */
};
/*
* Switch to real mode and then execute the code
* specified by the code and length parameters.
* We assume that length will aways be less that 100!
*/
void machine_real_restart(const unsigned char *code, int length)
{
local_irq_disable();
/* Write zero to CMOS register number 0x0f, which the BIOS POST
routine will recognize as telling it to do a proper reboot. (Well
that's what this book in front of me says -- it may only apply to
the Phoenix BIOS though, it's not clear). At the same time,
disable NMIs by setting the top bit in the CMOS address register,
as we're about to do peculiar things to the CPU. I'm not sure if
`outb_p' is needed instead of just `outb'. Use it to be on the
safe side. (Yes, CMOS_WRITE does outb_p's. - Paul G.)
*/
spin_lock(&rtc_lock);
CMOS_WRITE(0x00, 0x8f);
spin_unlock(&rtc_lock);
/* Remap the kernel at virtual address zero, as well as offset zero
from the kernel segment. This assumes the kernel segment starts at
virtual address PAGE_OFFSET. */
memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
/*
* Use `swapper_pg_dir' as our page directory.
*/
load_cr3(swapper_pg_dir);
/* Write 0x1234 to absolute memory location 0x472. The BIOS reads
this on booting to tell it to "Bypass memory test (also warm
boot)". This seems like a fairly standard thing that gets set by
REBOOT.COM programs, and the previous reset routine did this
too. */
*((unsigned short *)0x472) = reboot_mode;
/* For the switch to real mode, copy some code to low memory. It has
to be in the first 64k because it is running in 16-bit mode, and it
has to have the same physical and virtual address, because it turns
off paging. Copy it near the end of the first page, out of the way
of BIOS variables. */
memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
real_mode_switch, sizeof (real_mode_switch));
memcpy((void *)(0x1000 - 100), code, length);
/* Set up the IDT for real mode. */
load_idt(&real_mode_idt);
/* Set up a GDT from which we can load segment descriptors for real
mode. The GDT is not used in real mode; it is just needed here to
prepare the descriptors. */
load_gdt(&real_mode_gdt);
/* Load the data segment registers, and thus the descriptors ready for
real mode. The base address of each segment is 0x100, 16 times the
selector value being loaded here. This is so that the segment
registers don't have to be reloaded after switching to real mode:
the values are consistent for real mode operation already. */
__asm__ __volatile__ ("movl $0x0010,%%eax\n"
"\tmovl %%eax,%%ds\n"
"\tmovl %%eax,%%es\n"
"\tmovl %%eax,%%fs\n"
"\tmovl %%eax,%%gs\n"
"\tmovl %%eax,%%ss" : : : "eax");
/* Jump to the 16-bit code that we copied earlier. It disables paging
and the cache, switches to real mode, and jumps to the BIOS reset
entry point. */
__asm__ __volatile__ ("ljmp $0x0008,%0"
:
: "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
#endif
#endif /* CONFIG_X86_32 */
static inline void kb_wait(void)
{
int i;
for (i = 0; i < 0x10000; i++) {
if ((inb(0x64) & 0x02) == 0)
break;
udelay(2);
}
}
static void vmxoff_nmi(int cpu, struct die_args *args)
{
cpu_emergency_vmxoff();
}
/* Use NMIs as IPIs to tell all CPUs to disable virtualization
*/
static void emergency_vmx_disable_all(void)
{
/* Just make sure we won't change CPUs while doing this */
local_irq_disable();
/* We need to disable VMX on all CPUs before rebooting, otherwise
* we risk hanging up the machine, because the CPU ignore INIT
* signals when VMX is enabled.
*
* We can't take any locks and we may be on an inconsistent
* state, so we use NMIs as IPIs to tell the other CPUs to disable
* VMX and halt.
*
* For safety, we will avoid running the nmi_shootdown_cpus()
* stuff unnecessarily, but we don't have a way to check
* if other CPUs have VMX enabled. So we will call it only if the
* CPU we are running on has VMX enabled.
*
* We will miss cases where VMX is not enabled on all CPUs. This
* shouldn't do much harm because KVM always enable VMX on all
* CPUs anyway. But we can miss it on the small window where KVM
* is still enabling VMX.
*/
if (cpu_has_vmx() && cpu_vmx_enabled()) {
/* Disable VMX on this CPU.
*/
cpu_vmxoff();
/* Halt and disable VMX on the other CPUs */
nmi_shootdown_cpus(vmxoff_nmi);
}
}
void __attribute__((weak)) mach_reboot_fixups(void)
{
}
static void native_machine_emergency_restart(void)
{
int i;
if (reboot_emergency)
emergency_vmx_disable_all();
/* Tell the BIOS if we want cold or warm reboot */
*((unsigned short *)__va(0x472)) = reboot_mode;
for (;;) {
/* Could also try the reset bit in the Hammer NB */
switch (reboot_type) {
case BOOT_KBD:
mach_reboot_fixups(); /* for board specific fixups */
for (i = 0; i < 10; i++) {
kb_wait();
udelay(50);
outb(0xfe, 0x64); /* pulse reset low */
udelay(50);
}
case BOOT_TRIPLE:
load_idt(&no_idt);
__asm__ __volatile__("int3");
reboot_type = BOOT_KBD;
break;
#ifdef CONFIG_X86_32
case BOOT_BIOS:
machine_real_restart(jump_to_bios, sizeof(jump_to_bios));
reboot_type = BOOT_KBD;
break;
#endif
case BOOT_ACPI:
acpi_reboot();
reboot_type = BOOT_KBD;
break;
case BOOT_EFI:
if (efi_enabled)
efi.reset_system(reboot_mode ?
EFI_RESET_WARM :
EFI_RESET_COLD,
EFI_SUCCESS, 0, NULL);
reboot_type = BOOT_KBD;
break;
case BOOT_CF9:
port_cf9_safe = true;
/* fall through */
case BOOT_CF9_COND:
if (port_cf9_safe) {
u8 cf9 = inb(0xcf9) & ~6;
outb(cf9|2, 0xcf9); /* Request hard reset */
udelay(50);
outb(cf9|6, 0xcf9); /* Actually do the reset */
udelay(50);
}
reboot_type = BOOT_KBD;
break;
}
}
}
void native_machine_shutdown(void)
{
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
/* The boot cpu is always logical cpu 0 */
cpumask: Replace cpumask_of_cpu with cpumask_of_cpu_ptr * This patch replaces the dangerous lvalue version of cpumask_of_cpu with new cpumask_of_cpu_ptr macros. These are patterned after the node_to_cpumask_ptr macros. In general terms, if there is a cpumask_of_cpu_map[] then a pointer to the cpumask_of_cpu_map[cpu] entry is used. The cpumask_of_cpu_map is provided when there is a large NR_CPUS count, reducing greatly the amount of code generated and stack space used for cpumask_of_cpu(). The pointer to the cpumask_t value is needed for calling set_cpus_allowed_ptr() to reduce the amount of stack space needed to pass the cpumask_t value. If there isn't a cpumask_of_cpu_map[], then a temporary variable is declared and filled in with value from cpumask_of_cpu(cpu) as well as a pointer variable pointing to this temporary variable. Afterwards, the pointer is used to reference the cpumask value. The compiler will optimize out the extra dereference through the pointer as well as the stack space used for the pointer, resulting in identical code. A good example of the orthogonal usages is in net/sunrpc/svc.c: case SVC_POOL_PERCPU: { unsigned int cpu = m->pool_to[pidx]; cpumask_of_cpu_ptr(cpumask, cpu); *oldmask = current->cpus_allowed; set_cpus_allowed_ptr(current, cpumask); return 1; } case SVC_POOL_PERNODE: { unsigned int node = m->pool_to[pidx]; node_to_cpumask_ptr(nodecpumask, node); *oldmask = current->cpus_allowed; set_cpus_allowed_ptr(current, nodecpumask); return 1; } Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-15 23:14:30 +02:00
int reboot_cpu_id = 0;
#ifdef CONFIG_X86_32
/* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
cpu_online(reboot_cpu))
reboot_cpu_id = reboot_cpu;
#endif
/* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(reboot_cpu_id))
reboot_cpu_id = smp_processor_id();
/* Make certain I only run on the appropriate processor */
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
/* O.K Now that I'm on the appropriate processor,
* stop all of the others.
*/
smp_send_stop();
#endif
lapic_shutdown();
#ifdef CONFIG_X86_IO_APIC
disable_IO_APIC();
#endif
#ifdef CONFIG_HPET_TIMER
hpet_disable();
#endif
#ifdef CONFIG_X86_64
pci_iommu_shutdown();
#endif
}
static void __machine_emergency_restart(int emergency)
{
reboot_emergency = emergency;
machine_ops.emergency_restart();
}
static void native_machine_restart(char *__unused)
{
printk("machine restart\n");
if (!reboot_force)
machine_shutdown();
__machine_emergency_restart(0);
}
static void native_machine_halt(void)
{
/* stop other cpus and apics */
machine_shutdown();
/* stop this cpu */
stop_this_cpu(NULL);
}
static void native_machine_power_off(void)
{
[PATCH] i386/x86-64: Don't IPI to offline cpus on shutdown So why are we calling smp_send_stop from machine_halt? We don't. Looking more closely at the bug report the problem here is that halt -p is called which triggers not a halt but an attempt to power off. machine_power_off calls machine_shutdown which calls smp_send_stop. If pm_power_off is set we should never make it out machine_power_off to the call of do_exit. So pm_power_off must not be set in this case. When pm_power_off is not set we expect machine_power_off to devolve into machine_halt. So how do we fix this? Playing too much with smp_send_stop is dangerous because it must also be safe to be called from panic. It looks like the obviously correct fix is to only call machine_shutdown when pm_power_off is defined. Doing that will make Andi's assumption about not scheduling true and generally simplify what must be supported. This turns machine_power_off into a noop like machine_halt when pm_power_off is not defined. If the expected behavior is that sys_reboot(LINUX_REBOOT_CMD_POWER_OFF) becomes sys_reboot(LINUX_REBOOT_CMD_HALT) if pm_power_off is NULL this is not quite a comprehensive fix as we pass a different parameter to the reboot notifier and we set system_state to a different value before calling device_shutdown(). Unfortunately any fix more comprehensive I can think of is not obviously correct. The core problem is that there is no architecture independent way to detect if machine_power will become a noop, without calling it. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-11 22:43:12 +01:00
if (pm_power_off) {
if (!reboot_force)
machine_shutdown();
pm_power_off();
[PATCH] i386/x86-64: Don't IPI to offline cpus on shutdown So why are we calling smp_send_stop from machine_halt? We don't. Looking more closely at the bug report the problem here is that halt -p is called which triggers not a halt but an attempt to power off. machine_power_off calls machine_shutdown which calls smp_send_stop. If pm_power_off is set we should never make it out machine_power_off to the call of do_exit. So pm_power_off must not be set in this case. When pm_power_off is not set we expect machine_power_off to devolve into machine_halt. So how do we fix this? Playing too much with smp_send_stop is dangerous because it must also be safe to be called from panic. It looks like the obviously correct fix is to only call machine_shutdown when pm_power_off is defined. Doing that will make Andi's assumption about not scheduling true and generally simplify what must be supported. This turns machine_power_off into a noop like machine_halt when pm_power_off is not defined. If the expected behavior is that sys_reboot(LINUX_REBOOT_CMD_POWER_OFF) becomes sys_reboot(LINUX_REBOOT_CMD_HALT) if pm_power_off is NULL this is not quite a comprehensive fix as we pass a different parameter to the reboot notifier and we set system_state to a different value before calling device_shutdown(). Unfortunately any fix more comprehensive I can think of is not obviously correct. The core problem is that there is no architecture independent way to detect if machine_power will become a noop, without calling it. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-01-11 22:43:12 +01:00
}
}
struct machine_ops machine_ops = {
.power_off = native_machine_power_off,
.shutdown = native_machine_shutdown,
.emergency_restart = native_machine_emergency_restart,
.restart = native_machine_restart,
.halt = native_machine_halt,
#ifdef CONFIG_KEXEC
.crash_shutdown = native_machine_crash_shutdown,
#endif
};
void machine_power_off(void)
{
machine_ops.power_off();
}
void machine_shutdown(void)
{
machine_ops.shutdown();
}
void machine_emergency_restart(void)
{
__machine_emergency_restart(1);
}
void machine_restart(char *cmd)
{
machine_ops.restart(cmd);
}
void machine_halt(void)
{
machine_ops.halt();
}
#ifdef CONFIG_KEXEC
void machine_crash_shutdown(struct pt_regs *regs)
{
machine_ops.crash_shutdown(regs);
}
#endif
#if defined(CONFIG_SMP)
/* This keeps a track of which one is crashing cpu. */
static int crashing_cpu;
static nmi_shootdown_cb shootdown_callback;
static atomic_t waiting_for_crash_ipi;
static int crash_nmi_callback(struct notifier_block *self,
unsigned long val, void *data)
{
int cpu;
if (val != DIE_NMI_IPI)
return NOTIFY_OK;
cpu = raw_smp_processor_id();
/* Don't do anything if this handler is invoked on crashing cpu.
* Otherwise, system will completely hang. Crashing cpu can get
* an NMI if system was initially booted with nmi_watchdog parameter.
*/
if (cpu == crashing_cpu)
return NOTIFY_STOP;
local_irq_disable();
shootdown_callback(cpu, (struct die_args *)data);
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
halt();
for (;;)
cpu_relax();
return 1;
}
static void smp_send_nmi_allbutself(void)
{
apic->send_IPI_allbutself(NMI_VECTOR);
}
static struct notifier_block crash_nmi_nb = {
.notifier_call = crash_nmi_callback,
};
/* Halt all other CPUs, calling the specified function on each of them
*
* This function can be used to halt all other CPUs on crash
* or emergency reboot time. The function passed as parameter
* will be called inside a NMI handler on all CPUs.
*/
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
unsigned long msecs;
local_irq_disable();
/* Make a note of crashing cpu. Will be used in NMI callback.*/
crashing_cpu = safe_smp_processor_id();
shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_die_notifier(&crash_nmi_nb))
return; /* return what? */
/* Ensure the new callback function is set before sending
* out the NMI
*/
wmb();
smp_send_nmi_allbutself();
msecs = 1000; /* Wait at most a second for the other cpus to stop */
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
mdelay(1);
msecs--;
}
/* Leave the nmi callback set */
}
#else /* !CONFIG_SMP */
void nmi_shootdown_cpus(nmi_shootdown_cb callback)
{
/* No other CPUs to shoot down */
}
#endif