thread_info.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #ifndef __ASM_SH_THREAD_INFO_H
  2. #define __ASM_SH_THREAD_INFO_H
  3. /* SuperH version
  4. * Copyright (C) 2002 Niibe Yutaka
  5. *
  6. * The copyright of original i386 version is:
  7. *
  8. * Copyright (C) 2002 David Howells (dhowells@redhat.com)
  9. * - Incorporating suggestions made by Linus Torvalds and Dave Miller
  10. */
  11. #ifdef __KERNEL__
  12. #include <asm/page.h>
  13. /*
  14. * Page fault error code bits
  15. */
  16. #define FAULT_CODE_WRITE (1 << 0) /* write access */
  17. #define FAULT_CODE_INITIAL (1 << 1) /* initial page write */
  18. #define FAULT_CODE_ITLB (1 << 2) /* ITLB miss */
  19. #define FAULT_CODE_PROT (1 << 3) /* protection fault */
  20. #define FAULT_CODE_USER (1 << 4) /* user-mode access */
  21. #ifndef __ASSEMBLY__
  22. #include <asm/processor.h>
  23. struct thread_info {
  24. struct task_struct *task; /* main task structure */
  25. struct exec_domain *exec_domain; /* execution domain */
  26. unsigned long flags; /* low level flags */
  27. __u32 status; /* thread synchronous flags */
  28. __u32 cpu;
  29. int preempt_count; /* 0 => preemptable, <0 => BUG */
  30. mm_segment_t addr_limit; /* thread address space */
  31. struct restart_block restart_block;
  32. unsigned long previous_sp; /* sp of previous stack in case
  33. of nested IRQ stacks */
  34. __u8 supervisor_stack[0];
  35. };
  36. #endif
  37. #define PREEMPT_ACTIVE 0x10000000
  38. #if defined(CONFIG_4KSTACKS)
  39. #define THREAD_SHIFT 12
  40. #else
  41. #define THREAD_SHIFT 13
  42. #endif
  43. #define THREAD_SIZE (1 << THREAD_SHIFT)
  44. #define STACK_WARN (THREAD_SIZE >> 3)
  45. /*
  46. * macros/functions for gaining access to the thread information structure
  47. */
  48. #ifndef __ASSEMBLY__
  49. #define INIT_THREAD_INFO(tsk) \
  50. { \
  51. .task = &tsk, \
  52. .exec_domain = &default_exec_domain, \
  53. .flags = 0, \
  54. .status = 0, \
  55. .cpu = 0, \
  56. .preempt_count = INIT_PREEMPT_COUNT, \
  57. .addr_limit = KERNEL_DS, \
  58. .restart_block = { \
  59. .fn = do_no_restart_syscall, \
  60. }, \
  61. }
  62. #define init_thread_info (init_thread_union.thread_info)
  63. #define init_stack (init_thread_union.stack)
  64. /* how to get the current stack pointer from C */
  65. register unsigned long current_stack_pointer asm("r15") __used;
  66. /* how to get the thread information struct from C */
  67. static inline struct thread_info *current_thread_info(void)
  68. {
  69. struct thread_info *ti;
  70. #if defined(CONFIG_SUPERH64)
  71. __asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
  72. #elif defined(CONFIG_CPU_HAS_SR_RB)
  73. __asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
  74. #else
  75. unsigned long __dummy;
  76. __asm__ __volatile__ (
  77. "mov r15, %0\n\t"
  78. "and %1, %0\n\t"
  79. : "=&r" (ti), "=r" (__dummy)
  80. : "1" (~(THREAD_SIZE - 1))
  81. : "memory");
  82. #endif
  83. return ti;
  84. }
  85. #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
  86. extern void arch_task_cache_init(void);
  87. extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
  88. extern void arch_release_task_struct(struct task_struct *tsk);
  89. extern void init_thread_xstate(void);
  90. #endif /* __ASSEMBLY__ */
  91. /*
  92. * Thread information flags
  93. *
  94. * - Limited to 24 bits, upper byte used for fault code encoding.
  95. *
  96. * - _TIF_ALLWORK_MASK and _TIF_WORK_MASK need to fit within 2 bytes, or
  97. * we blow the tst immediate size constraints and need to fix up
  98. * arch/sh/kernel/entry-common.S.
  99. */
  100. #define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  101. #define TIF_SIGPENDING 1 /* signal pending */
  102. #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  103. #define TIF_SINGLESTEP 4 /* singlestepping active */
  104. #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */
  105. #define TIF_SECCOMP 6 /* secure computing */
  106. #define TIF_NOTIFY_RESUME 7 /* callback before returning to user */
  107. #define TIF_SYSCALL_TRACEPOINT 8 /* for ftrace syscall instrumentation */
  108. #define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling TIF_NEED_RESCHED */
  109. #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
  110. #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  111. #define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  112. #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  113. #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
  114. #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  115. #define _TIF_SECCOMP (1 << TIF_SECCOMP)
  116. #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
  117. #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
  118. #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
  119. /* work to do in syscall trace */
  120. #define _TIF_WORK_SYSCALL_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
  121. _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
  122. _TIF_SYSCALL_TRACEPOINT)
  123. /* work to do on any return to u-space */
  124. #define _TIF_ALLWORK_MASK (_TIF_SYSCALL_TRACE | _TIF_SIGPENDING | \
  125. _TIF_NEED_RESCHED | _TIF_SYSCALL_AUDIT | \
  126. _TIF_SINGLESTEP | _TIF_NOTIFY_RESUME | \
  127. _TIF_SYSCALL_TRACEPOINT)
  128. /* work to do on interrupt/exception return */
  129. #define _TIF_WORK_MASK (_TIF_ALLWORK_MASK & ~(_TIF_SYSCALL_TRACE | \
  130. _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP))
  131. /*
  132. * Thread-synchronous status.
  133. *
  134. * This is different from the flags in that nobody else
  135. * ever touches our thread-synchronous status, so we don't
  136. * have to worry about atomic accesses.
  137. */
  138. #define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
  139. #define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
  140. #ifndef __ASSEMBLY__
  141. #define HAVE_SET_RESTORE_SIGMASK 1
  142. static inline void set_restore_sigmask(void)
  143. {
  144. struct thread_info *ti = current_thread_info();
  145. ti->status |= TS_RESTORE_SIGMASK;
  146. WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
  147. }
  148. #define TI_FLAG_FAULT_CODE_SHIFT 24
  149. /*
  150. * Additional thread flag encoding
  151. */
  152. static inline void set_thread_fault_code(unsigned int val)
  153. {
  154. struct thread_info *ti = current_thread_info();
  155. ti->flags = (ti->flags & (~0 >> (32 - TI_FLAG_FAULT_CODE_SHIFT)))
  156. | (val << TI_FLAG_FAULT_CODE_SHIFT);
  157. }
  158. static inline unsigned int get_thread_fault_code(void)
  159. {
  160. struct thread_info *ti = current_thread_info();
  161. return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
  162. }
  163. static inline void clear_restore_sigmask(void)
  164. {
  165. current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
  166. }
  167. static inline bool test_restore_sigmask(void)
  168. {
  169. return current_thread_info()->status & TS_RESTORE_SIGMASK;
  170. }
  171. static inline bool test_and_clear_restore_sigmask(void)
  172. {
  173. struct thread_info *ti = current_thread_info();
  174. if (!(ti->status & TS_RESTORE_SIGMASK))
  175. return false;
  176. ti->status &= ~TS_RESTORE_SIGMASK;
  177. return true;
  178. }
  179. #endif /* !__ASSEMBLY__ */
  180. #endif /* __KERNEL__ */
  181. #endif /* __ASM_SH_THREAD_INFO_H */