2005-04-17 00:20:36 +02:00
|
|
|
#ifndef __UM_CACHE_H
|
|
|
|
#define __UM_CACHE_H
|
|
|
|
|
[PATCH] uml: reuse i386 cpu-specific tuning
Make UML share the underlying cpu-specific tuning done on i386.
Actually, for now many config options aren't used a lot - but that can be done
later. Also, UML relies on GCC optimization for things like memcpy and such
more than i386, so specifying the correct -march and -mtune should be enough.
Later, we may want to correct some other stuff.
For instance, since FPU context switching, for us, is done (at least
partially, i.e. between our kernelspace and userspace) by the host, we may
allow usage of FPU operations by GCC. This doesn't hold for kernelspace vs.
kernelspace, but we don't support preemption.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-31 00:00:07 +01:00
|
|
|
#include <linux/config.h>
|
2005-04-17 00:20:36 +02:00
|
|
|
|
[PATCH] uml: reuse i386 cpu-specific tuning
Make UML share the underlying cpu-specific tuning done on i386.
Actually, for now many config options aren't used a lot - but that can be done
later. Also, UML relies on GCC optimization for things like memcpy and such
more than i386, so specifying the correct -march and -mtune should be enough.
Later, we may want to correct some other stuff.
For instance, since FPU context switching, for us, is done (at least
partially, i.e. between our kernelspace and userspace) by the host, we may
allow usage of FPU operations by GCC. This doesn't hold for kernelspace vs.
kernelspace, but we don't support preemption.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-31 00:00:07 +01:00
|
|
|
#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
|
|
|
|
# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
|
|
|
|
#elif defined(CONFIG_UML_X86) /* 64-bit */
|
|
|
|
# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
|
|
|
|
#else
|
|
|
|
/* XXX: this was taken from x86, now it's completely random. Luckily only
|
|
|
|
* affects SMP padding. */
|
|
|
|
# define L1_CACHE_SHIFT 5
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
#endif
|