62be90012c
Use cpu_relax() in the busy loops, as atomic_read() doesn't automatically imply volatility for i386 and x86_64. x86_64 doesn't have this issue because it open-codes the while loop in smpboot.c:smp_callin() itself that already uses cpu_relax(). For i386, however, smpboot.c:smp_callin() calls wait_for_init_deassert() which is buggy for mach-default and mach-es7000 cases. [ I test-built a kernel -- smp_callin() itself got inlined in its only callsite, smpboot.c:start_secondary() -- and the relevant piece of code disassembles to the following: 0xc1019704 <start_secondary+12>: mov 0xc144c4c8,%eax 0xc1019709 <start_secondary+17>: test %eax,%eax 0xc101970b <start_secondary+19>: je 0xc1019709 <start_secondary+17> init_deasserted (at 0xc144c4c8) gets fetched into %eax only once and then we loop over the test of the stale value in the register only, so these look like real bugs to me. With the fix below, this becomes: 0xc1019706 <start_secondary+14>: pause 0xc1019708 <start_secondary+16>: cmpl $0x0,0xc144c4c8 0xc101970f <start_secondary+23>: je 0xc1019706 <start_secondary+14> which looks nice and healthy. ] Thanks to Heiko Carstens for noticing this. Signed-off-by: Satyam Sharma <satyam@infradead.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
42 lines
932 B
C
42 lines
932 B
C
#ifndef __ASM_MACH_WAKECPU_H
|
|
#define __ASM_MACH_WAKECPU_H
|
|
|
|
/*
|
|
* This file copes with machines that wakeup secondary CPUs by the
|
|
* INIT, INIT, STARTUP sequence.
|
|
*/
|
|
|
|
#define WAKE_SECONDARY_VIA_INIT
|
|
|
|
#define TRAMPOLINE_LOW phys_to_virt(0x467)
|
|
#define TRAMPOLINE_HIGH phys_to_virt(0x469)
|
|
|
|
#define boot_cpu_apicid boot_cpu_physical_apicid
|
|
|
|
static inline void wait_for_init_deassert(atomic_t *deassert)
|
|
{
|
|
while (!atomic_read(deassert))
|
|
cpu_relax();
|
|
return;
|
|
}
|
|
|
|
/* Nothing to do for most platforms, since cleared by the INIT cycle */
|
|
static inline void smp_callin_clear_local_apic(void)
|
|
{
|
|
}
|
|
|
|
static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
|
|
{
|
|
}
|
|
|
|
static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
|
|
{
|
|
}
|
|
|
|
#if APIC_DEBUG
|
|
#define inquire_remote_apic(apicid) __inquire_remote_apic(apicid)
|
|
#else
|
|
#define inquire_remote_apic(apicid) {}
|
|
#endif
|
|
|
|
#endif /* __ASM_MACH_WAKECPU_H */
|