2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* Code for the vsyscall page. This version uses the sysenter instruction.
|
|
|
|
*
|
|
|
|
* NOTE:
|
|
|
|
* 1) __kernel_vsyscall _must_ be first in this page.
|
|
|
|
* 2) there are alignment constraints on this stub, see vsyscall-sigreturn.S
|
|
|
|
* for details.
|
|
|
|
*/
|
|
|
|
|
2006-02-14 22:53:20 +01:00
|
|
|
/*
|
|
|
|
* The caller puts arg2 in %ecx, which gets pushed. The kernel will use
|
|
|
|
* %ecx itself for arg2. The pushing is because the sysexit instruction
|
|
|
|
* (found in entry.S) requires that we clobber %ecx with the desired %esp.
|
|
|
|
* User code might expect that %ecx is unclobbered though, as it would be
|
|
|
|
* for returning via the iret instruction, so we must push and pop.
|
|
|
|
*
|
|
|
|
* The caller puts arg3 in %edx, which the sysexit instruction requires
|
|
|
|
* for %eip. Thus, exactly as for arg2, we must push and pop.
|
|
|
|
*
|
|
|
|
* Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
|
|
|
|
* instruction clobbers %esp, the user's %esp won't even survive entry
|
|
|
|
* into the kernel. We store %esp in %ebp. Code in entry.S must fetch
|
|
|
|
* arg6 from the stack.
|
2006-03-23 11:59:48 +01:00
|
|
|
*
|
|
|
|
* You can not use this vsyscall for the clone() syscall because the
|
|
|
|
* three dwords on the parent stack do not get copied to the child.
|
2006-02-14 22:53:20 +01:00
|
|
|
*/
|
2005-04-17 00:20:36 +02:00
|
|
|
.text
|
|
|
|
.globl __kernel_vsyscall
|
|
|
|
.type __kernel_vsyscall,@function
|
|
|
|
__kernel_vsyscall:
|
|
|
|
.LSTART_vsyscall:
|
|
|
|
push %ecx
|
|
|
|
.Lpush_ecx:
|
|
|
|
push %edx
|
|
|
|
.Lpush_edx:
|
|
|
|
push %ebp
|
|
|
|
.Lenter_kernel:
|
|
|
|
movl %esp,%ebp
|
|
|
|
sysenter
|
|
|
|
|
|
|
|
/* 7: align return point with nop's to make disassembly easier */
|
|
|
|
.space 7,0x90
|
|
|
|
|
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 11:53:50 +02:00
|
|
|
/* 14: System call restart point is here! (SYSENTER_RETURN-2) */
|
2005-04-17 00:20:36 +02:00
|
|
|
jmp .Lenter_kernel
|
|
|
|
/* 16: System call normal return point is here! */
|
[PATCH] vdso: randomize the i386 vDSO by moving it into a vma
Move the i386 VDSO down into a vma and thus randomize it.
Besides the security implications, this feature also helps debuggers, which
can COW a vma-backed VDSO just like a normal DSO and can thus do
single-stepping and other debugging features.
It's good for hypervisors (Xen, VMWare) too, which typically live in the same
high-mapped address space as the VDSO, hence whenever the VDSO is used, they
get lots of guest pagefaults and have to fix such guest accesses up - which
slows things down instead of speeding things up (the primary purpose of the
VDSO).
There's a new CONFIG_COMPAT_VDSO (default=y) option, which provides support
for older glibcs that still rely on a prelinked high-mapped VDSO. Newer
distributions (using glibc 2.3.3 or later) can turn this option off. Turning
it off is also recommended for security reasons: attackers cannot use the
predictable high-mapped VDSO page as syscall trampoline anymore.
There is a new vdso=[0|1] boot option as well, and a runtime
/proc/sys/vm/vdso_enabled sysctl switch, that allows the VDSO to be turned
on/off.
(This version of the VDSO-randomization patch also has working ELF
coredumping, the previous patch crashed in the coredumping code.)
This code is a combined work of the exec-shield VDSO randomization
code and Gerd Hoffmann's hypervisor-centric VDSO patch. Rusty Russell
started this patch and i completed it.
[akpm@osdl.org: cleanups]
[akpm@osdl.org: compile fix]
[akpm@osdl.org: compile fix 2]
[akpm@osdl.org: compile fix 3]
[akpm@osdl.org: revernt MAXMEM change]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@infradead.org>
Cc: Gerd Hoffmann <kraxel@suse.de>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Andi Kleen <ak@muc.de>
Cc: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 11:53:50 +02:00
|
|
|
.globl SYSENTER_RETURN /* Symbol used by sysenter.c */
|
2005-04-17 00:20:36 +02:00
|
|
|
SYSENTER_RETURN:
|
|
|
|
pop %ebp
|
|
|
|
.Lpop_ebp:
|
|
|
|
pop %edx
|
|
|
|
.Lpop_edx:
|
|
|
|
pop %ecx
|
|
|
|
.Lpop_ecx:
|
|
|
|
ret
|
|
|
|
.LEND_vsyscall:
|
|
|
|
.size __kernel_vsyscall,.-.LSTART_vsyscall
|
|
|
|
.previous
|
|
|
|
|
|
|
|
.section .eh_frame,"a",@progbits
|
|
|
|
.LSTARTFRAMEDLSI:
|
|
|
|
.long .LENDCIEDLSI-.LSTARTCIEDLSI
|
|
|
|
.LSTARTCIEDLSI:
|
|
|
|
.long 0 /* CIE ID */
|
|
|
|
.byte 1 /* Version number */
|
|
|
|
.string "zR" /* NUL-terminated augmentation string */
|
|
|
|
.uleb128 1 /* Code alignment factor */
|
|
|
|
.sleb128 -4 /* Data alignment factor */
|
|
|
|
.byte 8 /* Return address register column */
|
|
|
|
.uleb128 1 /* Augmentation value length */
|
|
|
|
.byte 0x1b /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
|
|
|
|
.byte 0x0c /* DW_CFA_def_cfa */
|
|
|
|
.uleb128 4
|
|
|
|
.uleb128 4
|
|
|
|
.byte 0x88 /* DW_CFA_offset, column 0x8 */
|
|
|
|
.uleb128 1
|
|
|
|
.align 4
|
|
|
|
.LENDCIEDLSI:
|
|
|
|
.long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
|
|
|
|
.LSTARTFDEDLSI:
|
|
|
|
.long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
|
|
|
|
.long .LSTART_vsyscall-. /* PC-relative start address */
|
|
|
|
.long .LEND_vsyscall-.LSTART_vsyscall
|
|
|
|
.uleb128 0
|
|
|
|
/* What follows are the instructions for the table generation.
|
|
|
|
We have to record all changes of the stack pointer. */
|
|
|
|
.byte 0x04 /* DW_CFA_advance_loc4 */
|
|
|
|
.long .Lpush_ecx-.LSTART_vsyscall
|
|
|
|
.byte 0x0e /* DW_CFA_def_cfa_offset */
|
|
|
|
.byte 0x08 /* RA at offset 8 now */
|
|
|
|
.byte 0x04 /* DW_CFA_advance_loc4 */
|
|
|
|
.long .Lpush_edx-.Lpush_ecx
|
|
|
|
.byte 0x0e /* DW_CFA_def_cfa_offset */
|
|
|
|
.byte 0x0c /* RA at offset 12 now */
|
|
|
|
.byte 0x04 /* DW_CFA_advance_loc4 */
|
|
|
|
.long .Lenter_kernel-.Lpush_edx
|
|
|
|
.byte 0x0e /* DW_CFA_def_cfa_offset */
|
|
|
|
.byte 0x10 /* RA at offset 16 now */
|
|
|
|
.byte 0x85, 0x04 /* DW_CFA_offset %ebp -16 */
|
|
|
|
/* Finally the epilogue. */
|
|
|
|
.byte 0x04 /* DW_CFA_advance_loc4 */
|
|
|
|
.long .Lpop_ebp-.Lenter_kernel
|
|
|
|
.byte 0x0e /* DW_CFA_def_cfa_offset */
|
|
|
|
.byte 0x0c /* RA at offset 12 now */
|
|
|
|
.byte 0xc5 /* DW_CFA_restore %ebp */
|
|
|
|
.byte 0x04 /* DW_CFA_advance_loc4 */
|
|
|
|
.long .Lpop_edx-.Lpop_ebp
|
|
|
|
.byte 0x0e /* DW_CFA_def_cfa_offset */
|
|
|
|
.byte 0x08 /* RA at offset 8 now */
|
|
|
|
.byte 0x04 /* DW_CFA_advance_loc4 */
|
|
|
|
.long .Lpop_ecx-.Lpop_edx
|
|
|
|
.byte 0x0e /* DW_CFA_def_cfa_offset */
|
|
|
|
.byte 0x04 /* RA at offset 4 now */
|
|
|
|
.align 4
|
|
|
|
.LENDFDEDLSI:
|
|
|
|
.previous
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the common code for the sigreturn entry points.
|
|
|
|
*/
|
2007-10-11 11:13:03 +02:00
|
|
|
#include "vsyscall-sigreturn_32.S"
|