x86, mm: rename TASK_SIZE64 => TASK_SIZE_MAX
Impact: cleanup Rename TASK_SIZE64 to TASK_SIZE_MAX, and provide the define on 32-bit too. (mapped to TASK_SIZE) This allows 32-bit code to make use of the (former-) TASK_SIZE64 symbol as well, in a clean way. Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
c3731c6866
commit
d951734654
4 changed files with 9 additions and 8 deletions
|
@ -861,6 +861,7 @@ static inline void spin_lock_prefetch(const void *x)
|
||||||
* User space process size: 3GB (default).
|
* User space process size: 3GB (default).
|
||||||
*/
|
*/
|
||||||
#define TASK_SIZE PAGE_OFFSET
|
#define TASK_SIZE PAGE_OFFSET
|
||||||
|
#define TASK_SIZE_MAX TASK_SIZE
|
||||||
#define STACK_TOP TASK_SIZE
|
#define STACK_TOP TASK_SIZE
|
||||||
#define STACK_TOP_MAX STACK_TOP
|
#define STACK_TOP_MAX STACK_TOP
|
||||||
|
|
||||||
|
@ -920,7 +921,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||||
/*
|
/*
|
||||||
* User space process size. 47bits minus one guard page.
|
* User space process size. 47bits minus one guard page.
|
||||||
*/
|
*/
|
||||||
#define TASK_SIZE64 ((1UL << 47) - PAGE_SIZE)
|
#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
|
||||||
|
|
||||||
/* This decides where the kernel will search for a free chunk of vm
|
/* This decides where the kernel will search for a free chunk of vm
|
||||||
* space during mmap's.
|
* space during mmap's.
|
||||||
|
@ -929,12 +930,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||||
0xc0000000 : 0xFFFFe000)
|
0xc0000000 : 0xFFFFe000)
|
||||||
|
|
||||||
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
|
#define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
|
||||||
IA32_PAGE_OFFSET : TASK_SIZE64)
|
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
|
||||||
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
|
#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
|
||||||
IA32_PAGE_OFFSET : TASK_SIZE64)
|
IA32_PAGE_OFFSET : TASK_SIZE_MAX)
|
||||||
|
|
||||||
#define STACK_TOP TASK_SIZE
|
#define STACK_TOP TASK_SIZE
|
||||||
#define STACK_TOP_MAX TASK_SIZE64
|
#define STACK_TOP_MAX TASK_SIZE_MAX
|
||||||
|
|
||||||
#define INIT_THREAD { \
|
#define INIT_THREAD { \
|
||||||
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
|
||||||
|
|
|
@ -268,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task)
|
||||||
if (test_tsk_thread_flag(task, TIF_IA32))
|
if (test_tsk_thread_flag(task, TIF_IA32))
|
||||||
return IA32_PAGE_OFFSET - 3;
|
return IA32_PAGE_OFFSET - 3;
|
||||||
#endif
|
#endif
|
||||||
return TASK_SIZE64 - 7;
|
return TASK_SIZE_MAX - 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
|
@ -963,7 +963,7 @@ static int fault_in_kernel_space(unsigned long address)
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
return address >= TASK_SIZE;
|
return address >= TASK_SIZE;
|
||||||
#else
|
#else
|
||||||
return address >= TASK_SIZE64;
|
return address >= TASK_SIZE_MAX;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,8 +85,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
|
||||||
unsigned long addr, end;
|
unsigned long addr, end;
|
||||||
unsigned offset;
|
unsigned offset;
|
||||||
end = (start + PMD_SIZE - 1) & PMD_MASK;
|
end = (start + PMD_SIZE - 1) & PMD_MASK;
|
||||||
if (end >= TASK_SIZE64)
|
if (end >= TASK_SIZE_MAX)
|
||||||
end = TASK_SIZE64;
|
end = TASK_SIZE_MAX;
|
||||||
end -= len;
|
end -= len;
|
||||||
/* This loses some more bits than a modulo, but is cheaper */
|
/* This loses some more bits than a modulo, but is cheaper */
|
||||||
offset = get_random_int() & (PTRS_PER_PTE - 1);
|
offset = get_random_int() & (PTRS_PER_PTE - 1);
|
||||||
|
|
Loading…
Reference in a new issue