2005-04-17 00:20:36 +02:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2002-2003 Hewlett-Packard Co
|
|
|
|
* David Mosberger-Tang <davidm@hpl.hp.com>
|
|
|
|
*/
|
|
|
|
#ifndef _ASM_IA64_THREAD_INFO_H
|
|
|
|
#define _ASM_IA64_THREAD_INFO_H
|
|
|
|
|
2005-09-13 17:50:39 +02:00
|
|
|
#ifndef ASM_OFFSETS_C
|
2005-09-09 20:57:26 +02:00
|
|
|
#include <asm/asm-offsets.h>
|
2005-09-13 17:50:39 +02:00
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
#include <asm/processor.h>
|
|
|
|
#include <asm/ptrace.h>
|
|
|
|
|
|
|
|
#define PREEMPT_ACTIVE_BIT 30
|
|
|
|
#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
|
|
|
|
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On IA-64, we want to keep the task structure and kernel stack together, so they can be
|
|
|
|
* mapped by a single TLB entry and so they can be addressed by the "current" pointer
|
|
|
|
* without having to do pointer masking.
|
|
|
|
*/
|
|
|
|
struct thread_info {
|
|
|
|
struct task_struct *task; /* XXX not really needed, except for dup_task_struct() */
|
|
|
|
struct exec_domain *exec_domain;/* execution domain */
|
|
|
|
__u32 flags; /* thread_info flags (see TIF_*) */
|
|
|
|
__u32 cpu; /* current CPU */
|
2006-01-27 00:55:52 +01:00
|
|
|
__u32 last_cpu; /* Last CPU thread ran on */
|
2006-06-26 13:59:11 +02:00
|
|
|
__u32 status; /* Thread synchronous flags */
|
2005-04-17 00:20:36 +02:00
|
|
|
mm_segment_t addr_limit; /* user-level address space limit */
|
2005-06-23 09:09:07 +02:00
|
|
|
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
|
2005-04-17 00:20:36 +02:00
|
|
|
struct restart_block restart_block;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define THREAD_SIZE KERNEL_STACK_SIZE
|
|
|
|
|
|
|
|
#define INIT_THREAD_INFO(tsk) \
|
|
|
|
{ \
|
|
|
|
.task = &tsk, \
|
|
|
|
.exec_domain = &default_exec_domain, \
|
|
|
|
.flags = 0, \
|
|
|
|
.cpu = 0, \
|
|
|
|
.addr_limit = KERNEL_DS, \
|
|
|
|
.preempt_count = 0, \
|
|
|
|
.restart_block = { \
|
|
|
|
.fn = do_no_restart_syscall, \
|
|
|
|
}, \
|
|
|
|
}
|
|
|
|
|
2005-09-13 17:50:39 +02:00
|
|
|
#ifndef ASM_OFFSETS_C
|
2005-04-17 00:20:36 +02:00
|
|
|
/* how to get the thread information struct from C */
|
|
|
|
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
|
|
|
|
#define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
2006-01-12 10:06:05 +01:00
|
|
|
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
|
2005-09-13 17:50:39 +02:00
|
|
|
#else
|
|
|
|
#define current_thread_info() ((struct thread_info *) 0)
|
|
|
|
#define alloc_thread_info(tsk) ((struct thread_info *) 0)
|
2006-01-12 10:06:05 +01:00
|
|
|
#define task_thread_info(tsk) ((struct thread_info *) 0)
|
2005-09-13 17:50:39 +02:00
|
|
|
#endif
|
2005-04-17 00:20:36 +02:00
|
|
|
#define free_thread_info(ti) /* nothing */
|
2006-01-12 10:06:05 +01:00
|
|
|
#define task_stack_page(tsk) ((void *)(tsk))
|
|
|
|
|
|
|
|
#define __HAVE_THREAD_FUNCTIONS
|
|
|
|
#define setup_thread_stack(p, org) \
|
|
|
|
*task_thread_info(p) = *task_thread_info(org); \
|
|
|
|
task_thread_info(p)->task = (p);
|
|
|
|
#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
|
2006-07-03 09:25:41 +02:00
|
|
|
#define alloc_task_struct() ((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))
|
2005-04-17 00:20:36 +02:00
|
|
|
#define free_task_struct(tsk) free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* thread information flags
|
|
|
|
* - these are process state flags that various assembly files may need to access
|
|
|
|
* - pending work-to-be-done flags are in least-significant 16 bits, other flags
|
|
|
|
* in top 16 bits
|
|
|
|
*/
|
|
|
|
#define TIF_NOTIFY_RESUME 0 /* resumption notification requested */
|
|
|
|
#define TIF_SIGPENDING 1 /* signal pending */
|
|
|
|
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
|
|
|
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
|
|
|
|
#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
|
|
|
|
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
|
|
|
#define TIF_MEMDIE 17
|
2005-09-11 09:20:14 +02:00
|
|
|
#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
|
2005-12-30 11:27:01 +01:00
|
|
|
#define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
|
|
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
|
|
|
#define _TIF_SYSCALL_TRACEAUDIT (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
|
|
|
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
|
|
|
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
|
|
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
|
|
|
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
2005-09-11 09:20:14 +02:00
|
|
|
#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
|
2005-12-30 11:27:01 +01:00
|
|
|
#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
|
2005-04-17 00:20:36 +02:00
|
|
|
|
|
|
|
/* "work to do on user-return" bits */
|
2006-01-22 00:55:25 +01:00
|
|
|
#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
|
2005-04-17 00:20:36 +02:00
|
|
|
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
|
|
|
|
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
|
|
|
|
|
2006-06-26 13:59:11 +02:00
|
|
|
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
|
|
|
|
|
|
|
|
#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
|
|
|
|
|
2005-04-17 00:20:36 +02:00
|
|
|
#endif /* _ASM_IA64_THREAD_INFO_H */
|