Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] Fix wrong checksum for split TCP packets on 64-bit MIPS
  [MIPS] Fix BUG(), BUG_ON() handling
  [MIPS] Retry {save,restore}_fp_context if failed in atomic context.
  [MIPS] Disallow CpU exception in kernel again.
  [MIPS] Add missing silicon revisions for BCM112x
This commit is contained in:
Linus Torvalds 2007-04-20 22:57:51 -07:00
commit ea8df8c5e6
12 changed files with 131 additions and 71 deletions

View file

@ -49,7 +49,8 @@ LEAF(resume)
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
mfc0 t2, CP0_STATUS
mfc0 t1, CP0_STATUS
sw t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
@ -59,8 +60,8 @@ LEAF(resume)
lw t3, TASK_THREAD_INFO(a0)
lw t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
and t1, t0
beqz t1, 1f
and t2, t0, t1
beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
@ -73,13 +74,10 @@ LEAF(resume)
li t1, ~ST0_CU1
and t0, t0, t1
sw t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_single a0, t0 # clobbers t0
1:
sw t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.

View file

@ -48,7 +48,8 @@
#ifndef CONFIG_CPU_HAS_LLSC
sw zero, ll_bit
#endif
mfc0 t2, CP0_STATUS
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
cpu_save_nonscratch a0
LONG_S ra, THREAD_REG31(a0)
@ -58,8 +59,8 @@
PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, TI_FLAGS(t3)
li t1, _TIF_USEDFPU
and t1, t0
beqz t1, 1f
and t2, t0, t1
beqz t2, 1f
nor t1, zero, t1
and t0, t0, t1
@ -72,13 +73,10 @@
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
/* clear thread_struct CU1 bit */
and t2, t1
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
1:
LONG_S t2, THREAD_STATUS(a0)
/*
* The order of restoring the registers takes care of the race

View file

@ -34,4 +34,13 @@ extern int install_sigtramp(unsigned int __user *tramp, unsigned int syscall);
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
/* Make sure we will not lose FPU ownership */
#ifdef CONFIG_PREEMPT
#define lock_fpu_owner() preempt_disable()
#define unlock_fpu_owner() preempt_enable()
#else
#define lock_fpu_owner() pagefault_disable()
#define unlock_fpu_owner() pagefault_enable()
#endif
#endif /* __SIGNAL_COMMON_H */

View file

@ -20,6 +20,7 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
@ -27,7 +28,6 @@
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
@ -78,6 +78,46 @@ struct rt_sigframe {
/*
* Helper routines
*/
static int protected_save_fp_context(struct sigcontext __user *sc)
{
int err;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(1);
err = save_fp_context(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fp_context(struct sigcontext __user *sc)
{
int err, tmp;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
err = restore_fp_context(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
@ -113,10 +153,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
own_fpu(1);
enable_fp_in_kernel();
err |= save_fp_context(sc);
disable_fp_in_kernel();
err |= protected_save_fp_context(sc);
}
return err;
}
@ -148,7 +185,7 @@ check_and_restore_fp_context(struct sigcontext __user *sc)
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= restore_fp_context(sc);
err |= protected_restore_fp_context(sc);
return err ?: sig;
}
@ -187,11 +224,8 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
if (used_math) {
/* restore fpu context if we have used it before */
own_fpu(0);
enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context(sc);
disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);

View file

@ -22,6 +22,7 @@
#include <linux/compat.h>
#include <linux/suspend.h>
#include <linux/compiler.h>
#include <linux/uaccess.h>
#include <asm/abi.h>
#include <asm/asm.h>
@ -29,7 +30,6 @@
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/ucontext.h>
#include <asm/system.h>
#include <asm/fpu.h>
@ -176,6 +176,46 @@ struct rt_sigframe32 {
/*
* sigcontext handlers
*/
static int protected_save_fp_context32(struct sigcontext32 __user *sc)
{
int err;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(1);
err = save_fp_context32(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __put_user(0, &sc->sc_fpregs[0]) |
__put_user(0, &sc->sc_fpregs[31]) |
__put_user(0, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
{
int err, tmp;
while (1) {
lock_fpu_owner();
own_fpu_inatomic(0);
err = restore_fp_context32(sc); /* this might fail */
unlock_fpu_owner();
if (likely(!err))
break;
/* touch the sigcontext and try again */
err = __get_user(tmp, &sc->sc_fpregs[0]) |
__get_user(tmp, &sc->sc_fpregs[31]) |
__get_user(tmp, &sc->sc_fpc_csr);
if (err)
break; /* really bad sigcontext */
}
return err;
}
static int setup_sigcontext32(struct pt_regs *regs,
struct sigcontext32 __user *sc)
{
@ -209,10 +249,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
* Save FPU state to signal context. Signal handler
* will "inherit" current FPU state.
*/
own_fpu(1);
enable_fp_in_kernel();
err |= save_fp_context32(sc);
disable_fp_in_kernel();
err |= protected_save_fp_context32(sc);
}
return err;
}
@ -225,7 +262,7 @@ check_and_restore_fp_context32(struct sigcontext32 __user *sc)
err = sig = fpcsr_pending(&sc->sc_fpc_csr);
if (err > 0)
err = 0;
err |= restore_fp_context32(sc);
err |= protected_restore_fp_context32(sc);
return err ?: sig;
}
@ -261,11 +298,8 @@ static int restore_sigcontext32(struct pt_regs *regs,
if (used_math) {
/* restore fpu context if we have used it before */
own_fpu(0);
enable_fp_in_kernel();
if (!err)
err = check_and_restore_fp_context32(sc);
disable_fp_in_kernel();
} else {
/* signal handler may have used FPU. Give it up. */
lose_fpu(0);

View file

@ -650,7 +650,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
unsigned int opcode, bcode;
siginfo_t info;
if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
@ -700,7 +700,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
unsigned int opcode, tcode = 0;
siginfo_t info;
if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
@ -757,11 +757,12 @@ asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int cpid;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
die_if_kernel("do_cpu invoked from kernel context!", regs);
if (!cpu_has_llsc)
if (!simulate_llsc(regs))
return;
@ -772,9 +773,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
break;
case 1:
if (!test_thread_flag(TIF_ALLOW_FP_IN_KERNEL))
die_if_kernel("do_cpu invoked from kernel context!",
regs);
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
@ -782,19 +780,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
set_used_math();
}
if (raw_cpu_has_fpu) {
if (test_thread_flag(TIF_ALLOW_FP_IN_KERNEL)) {
local_irq_disable();
if (cpu_has_fpu)
regs->cp0_status |= ST0_CU1;
/*
* We must return without enabling
* interrupts to ensure keep FPU
* ownership until resume.
*/
return;
}
} else {
if (!raw_cpu_has_fpu) {
int sig;
sig = fpu_emulator_cop1Handler(regs,
&current->thread.fpu, 0);
@ -836,7 +822,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
case 2:
case 3:
die_if_kernel("do_cpu invoked from kernel context!", regs);
break;
}

View file

@ -141,6 +141,18 @@ static int __init setup_bcm112x(void)
periph_rev = 3;
pass_str = "A2";
break;
case K_SYS_REVISION_BCM112x_A3:
periph_rev = 3;
pass_str = "A3";
break;
case K_SYS_REVISION_BCM112x_A4:
periph_rev = 3;
pass_str = "A4";
break;
case K_SYS_REVISION_BCM112x_B0:
periph_rev = 3;
pass_str = "B0";
break;
default:
printk("Unknown %s rev %x\n", soc_str, soc_pass);
ret = 1;

View file

@ -18,7 +18,8 @@ do { \
#define BUG_ON(condition) \
do { \
__asm__ __volatile__("tne $0, %0" : : "r" (condition)); \
__asm__ __volatile__("tne $0, %0, %1" \
: : "r" (condition), "i" (BRK_BUG)); \
} while (0)
#define HAVE_ARCH_BUG_ON

View file

@ -166,7 +166,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
#else
"r" (proto + len),
#endif
"r" (sum));
"r" ((__force unsigned long)sum));
return sum;
}

View file

@ -68,8 +68,6 @@ do { \
/* We don't care about the c0 hazard here */ \
} while (0)
#define __fpu_enabled() (read_c0_status() & ST0_CU1)
#define enable_fpu() \
do { \
if (cpu_has_fpu) \
@ -102,14 +100,19 @@ static inline void __own_fpu(void)
set_thread_flag(TIF_USEDFPU);
}
static inline void own_fpu(int restore)
static inline void own_fpu_inatomic(int restore)
{
preempt_disable();
if (cpu_has_fpu && !__is_fpu_owner()) {
__own_fpu();
if (restore)
_restore_fp(current);
}
}
static inline void own_fpu(int restore)
{
preempt_disable();
own_fpu_inatomic(restore);
preempt_enable();
}
@ -162,18 +165,4 @@ static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
return tsk->thread.fpu.fpr;
}
static inline void enable_fp_in_kernel(void)
{
set_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
/* make sure CU1 and FPU ownership are consistent */
if (!__is_fpu_owner() && __fpu_enabled())
__disable_fpu();
}
static inline void disable_fp_in_kernel(void)
{
BUG_ON(!__is_fpu_owner() && __fpu_enabled());
clear_thread_flag(TIF_ALLOW_FP_IN_KERNEL);
}
#endif /* _ASM_FPU_H */

View file

@ -84,6 +84,7 @@
#define K_SYS_REVISION_BCM112x_A2 0x21
#define K_SYS_REVISION_BCM112x_A3 0x22
#define K_SYS_REVISION_BCM112x_A4 0x23
#define K_SYS_REVISION_BCM112x_B0 0x30
#define K_SYS_REVISION_BCM1480_S0 0x01
#define K_SYS_REVISION_BCM1480_A1 0x02

View file

@ -119,7 +119,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_MEMDIE 18
#define TIF_FREEZE 19
#define TIF_ALLOW_FP_IN_KERNEL 20
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)