processor.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_PROCESSOR_H
  15. #define _ASM_TILE_PROCESSOR_H
  16. #include <arch/chip.h>
  17. #ifndef __ASSEMBLY__
  18. /*
  19. * NOTE: we don't include <linux/ptrace.h> or <linux/percpu.h> as one
  20. * normally would, due to #include dependencies.
  21. */
  22. #include <linux/types.h>
  23. #include <asm/ptrace.h>
  24. #include <asm/percpu.h>
  25. #include <arch/spr_def.h>
  26. struct task_struct;
  27. struct thread_struct;
  28. typedef struct {
  29. unsigned long seg;
  30. } mm_segment_t;
  31. /*
  32. * Default implementation of macro that returns current
  33. * instruction pointer ("program counter").
  34. */
  35. void *current_text_addr(void);
  36. #if CHIP_HAS_TILE_DMA()
  37. /* Capture the state of a suspended DMA. */
  38. struct tile_dma_state {
  39. int enabled;
  40. unsigned long src;
  41. unsigned long dest;
  42. unsigned long strides;
  43. unsigned long chunk_size;
  44. unsigned long src_chunk;
  45. unsigned long dest_chunk;
  46. unsigned long byte;
  47. unsigned long status;
  48. };
  49. /*
  50. * A mask of the DMA status register for selecting only the 'running'
  51. * and 'done' bits.
  52. */
  53. #define DMA_STATUS_MASK \
  54. (SPR_DMA_STATUS__RUNNING_MASK | SPR_DMA_STATUS__DONE_MASK)
  55. #endif
  56. /*
  57. * Track asynchronous TLB events (faults and access violations)
  58. * that occur while we are in kernel mode from DMA or the SN processor.
  59. */
  60. struct async_tlb {
  61. short fault_num; /* original fault number; 0 if none */
  62. char is_fault; /* was it a fault (vs an access violation) */
  63. char is_write; /* for fault: was it caused by a write? */
  64. unsigned long address; /* what address faulted? */
  65. };
  66. #ifdef CONFIG_HARDWALL
  67. struct hardwall_info;
  68. struct hardwall_task {
  69. /* Which hardwall is this task tied to? (or NULL if none) */
  70. struct hardwall_info *info;
  71. /* Chains this task into the list at info->task_head. */
  72. struct list_head list;
  73. };
  74. #ifdef __tilepro__
  75. #define HARDWALL_TYPES 1 /* udn */
  76. #else
  77. #define HARDWALL_TYPES 3 /* udn, idn, and ipi */
  78. #endif
  79. #endif
  80. struct thread_struct {
  81. /* kernel stack pointer */
  82. unsigned long ksp;
  83. /* kernel PC */
  84. unsigned long pc;
  85. /* starting user stack pointer (for page migration) */
  86. unsigned long usp0;
  87. /* pid of process that created this one */
  88. pid_t creator_pid;
  89. #if CHIP_HAS_TILE_DMA()
  90. /* DMA info for suspended threads (byte == 0 means no DMA state) */
  91. struct tile_dma_state tile_dma_state;
  92. #endif
  93. /* User EX_CONTEXT registers */
  94. unsigned long ex_context[2];
  95. /* User SYSTEM_SAVE registers */
  96. unsigned long system_save[4];
  97. /* User interrupt mask */
  98. unsigned long long interrupt_mask;
  99. /* User interrupt-control 0 state */
  100. unsigned long intctrl_0;
  101. /* Is this task currently doing a backtrace? */
  102. bool in_backtrace;
  103. /* Any other miscellaneous processor state bits */
  104. unsigned long proc_status;
  105. #if !CHIP_HAS_FIXED_INTVEC_BASE()
  106. /* Interrupt base for PL0 interrupts */
  107. unsigned long interrupt_vector_base;
  108. #endif
  109. /* Tile cache retry fifo high-water mark */
  110. unsigned long tile_rtf_hwm;
  111. #if CHIP_HAS_DSTREAM_PF()
  112. /* Data stream prefetch control */
  113. unsigned long dstream_pf;
  114. #endif
  115. #ifdef CONFIG_HARDWALL
  116. /* Hardwall information for various resources. */
  117. struct hardwall_task hardwall[HARDWALL_TYPES];
  118. #endif
  119. #if CHIP_HAS_TILE_DMA()
  120. /* Async DMA TLB fault information */
  121. struct async_tlb dma_async_tlb;
  122. #endif
  123. };
  124. #endif /* !__ASSEMBLY__ */
  125. /*
  126. * Start with "sp" this many bytes below the top of the kernel stack.
  127. * This allows us to be cache-aware when handling the initial save
  128. * of the pt_regs value to the stack.
  129. */
  130. #define STACK_TOP_DELTA 64
  131. /*
  132. * When entering the kernel via a fault, start with the top of the
  133. * pt_regs structure this many bytes below the top of the page.
  134. * This aligns the pt_regs structure optimally for cache-line access.
  135. */
  136. #ifdef __tilegx__
  137. #define KSTK_PTREGS_GAP 48
  138. #else
  139. #define KSTK_PTREGS_GAP 56
  140. #endif
  141. #ifndef __ASSEMBLY__
  142. #ifdef __tilegx__
  143. #define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
  144. #else
  145. #define TASK_SIZE_MAX PAGE_OFFSET
  146. #endif
  147. /* TASK_SIZE and related variables are always checked in "current" context. */
  148. #ifdef CONFIG_COMPAT
  149. #define COMPAT_TASK_SIZE (1UL << 31)
  150. #define TASK_SIZE ((current_thread_info()->status & TS_COMPAT) ?\
  151. COMPAT_TASK_SIZE : TASK_SIZE_MAX)
  152. #else
  153. #define TASK_SIZE TASK_SIZE_MAX
  154. #endif
  155. #define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
  156. #define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
  157. #define STACK_TOP TASK_SIZE
  158. /* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
  159. #define STACK_TOP_MAX TASK_SIZE_MAX
  160. /*
  161. * This decides where the kernel will search for a free chunk of vm
  162. * space during mmap's, if it is using bottom-up mapping.
  163. */
  164. #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
  165. #define HAVE_ARCH_PICK_MMAP_LAYOUT
  166. #define INIT_THREAD { \
  167. .ksp = (unsigned long)init_stack + THREAD_SIZE - STACK_TOP_DELTA, \
  168. .interrupt_mask = -1ULL \
  169. }
  170. /* Kernel stack top for the task that first boots on this cpu. */
  171. DECLARE_PER_CPU(unsigned long, boot_sp);
  172. /* PC to boot from on this cpu. */
  173. DECLARE_PER_CPU(unsigned long, boot_pc);
  174. /* Do necessary setup to start up a newly executed thread. */
  175. static inline void start_thread(struct pt_regs *regs,
  176. unsigned long pc, unsigned long usp)
  177. {
  178. regs->pc = pc;
  179. regs->sp = usp;
  180. single_step_execve();
  181. }
  182. /* Free all resources held by a thread. */
  183. static inline void release_thread(struct task_struct *dead_task)
  184. {
  185. /* Nothing for now */
  186. }
  187. extern int do_work_pending(struct pt_regs *regs, u32 flags);
  188. /*
  189. * Return saved (kernel) PC of a blocked thread.
  190. * Only used in a printk() in kernel/sched/core.c, so don't work too hard.
  191. */
  192. #define thread_saved_pc(t) ((t)->thread.pc)
  193. unsigned long get_wchan(struct task_struct *p);
  194. /* Return initial ksp value for given task. */
  195. #define task_ksp0(task) \
  196. ((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
  197. /* Return some info about the user process TASK. */
  198. #define task_pt_regs(task) \
  199. ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
  200. #define current_pt_regs() \
  201. ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
  202. STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
  203. #define task_sp(task) (task_pt_regs(task)->sp)
  204. #define task_pc(task) (task_pt_regs(task)->pc)
  205. /* Aliases for pc and sp (used in fs/proc/array.c) */
  206. #define KSTK_EIP(task) task_pc(task)
  207. #define KSTK_ESP(task) task_sp(task)
  208. /* Fine-grained unaligned JIT support */
  209. #define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
  210. #define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
  211. extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
  212. extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
  213. /* Standard format for printing registers and other word-size data. */
  214. #ifdef __tilegx__
  215. # define REGFMT "0x%016lx"
  216. #else
  217. # define REGFMT "0x%08lx"
  218. #endif
  219. /*
  220. * Do some slow action (e.g. read a slow SPR).
  221. * Note that this must also have compiler-barrier semantics since
  222. * it may be used in a busy loop reading memory.
  223. */
  224. static inline void cpu_relax(void)
  225. {
  226. __insn_mfspr(SPR_PASS);
  227. barrier();
  228. }
  229. /* Info on this processor (see fs/proc/cpuinfo.c) */
  230. struct seq_operations;
  231. extern const struct seq_operations cpuinfo_op;
  232. /* Provide information about the chip model. */
  233. extern char chip_model[64];
  234. /* Data on which physical memory controller corresponds to which NUMA node. */
  235. extern int node_controller[];
  236. /* Does the heap allocator return hash-for-home pages by default? */
  237. extern int hash_default;
  238. /* Should kernel stack pages be hash-for-home? */
  239. extern int kstack_hash;
  240. /* Does MAP_ANONYMOUS return hash-for-home pages by default? */
  241. #define uheap_hash hash_default
  242. /* Are we using huge pages in the TLB for kernel data? */
  243. extern int kdata_huge;
  244. /* Support standard Linux prefetching. */
  245. #define ARCH_HAS_PREFETCH
  246. #define prefetch(x) __builtin_prefetch(x)
  247. #define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
  248. /* Bring a value into the L1D, faulting the TLB if necessary. */
  249. #ifdef __tilegx__
  250. #define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
  251. #else
  252. #define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
  253. #endif
  254. #else /* __ASSEMBLY__ */
  255. /* Do some slow action (e.g. read a slow SPR). */
  256. #define CPU_RELAX mfspr zero, SPR_PASS
  257. #endif /* !__ASSEMBLY__ */
  258. /* Assembly code assumes that the PL is in the low bits. */
  259. #if SPR_EX_CONTEXT_1_1__PL_SHIFT != 0
  260. # error Fix assembly assumptions about PL
  261. #endif
  262. /* We sometimes use these macros for EX_CONTEXT_0_1 as well. */
  263. #if SPR_EX_CONTEXT_1_1__PL_SHIFT != SPR_EX_CONTEXT_0_1__PL_SHIFT || \
  264. SPR_EX_CONTEXT_1_1__PL_RMASK != SPR_EX_CONTEXT_0_1__PL_RMASK || \
  265. SPR_EX_CONTEXT_1_1__ICS_SHIFT != SPR_EX_CONTEXT_0_1__ICS_SHIFT || \
  266. SPR_EX_CONTEXT_1_1__ICS_RMASK != SPR_EX_CONTEXT_0_1__ICS_RMASK
  267. # error Fix assumptions that EX1 macros work for both PL0 and PL1
  268. #endif
  269. /* Allow pulling apart and recombining the PL and ICS bits in EX_CONTEXT. */
  270. #define EX1_PL(ex1) \
  271. (((ex1) >> SPR_EX_CONTEXT_1_1__PL_SHIFT) & SPR_EX_CONTEXT_1_1__PL_RMASK)
  272. #define EX1_ICS(ex1) \
  273. (((ex1) >> SPR_EX_CONTEXT_1_1__ICS_SHIFT) & SPR_EX_CONTEXT_1_1__ICS_RMASK)
  274. #define PL_ICS_EX1(pl, ics) \
  275. (((pl) << SPR_EX_CONTEXT_1_1__PL_SHIFT) | \
  276. ((ics) << SPR_EX_CONTEXT_1_1__ICS_SHIFT))
  277. /*
  278. * Provide symbolic constants for PLs.
  279. */
  280. #define USER_PL 0
  281. #if CONFIG_KERNEL_PL == 2
  282. #define GUEST_PL 1
  283. #endif
  284. #define KERNEL_PL CONFIG_KERNEL_PL
  285. /* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
  286. #ifdef __tilegx__
  287. #define CPU_SHIFT 48
  288. #if CHIP_VA_WIDTH() > CPU_SHIFT
  289. # error Too many VA bits!
  290. #endif
  291. #define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
  292. #define raw_smp_processor_id() \
  293. ((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
  294. #define get_current_ksp0() \
  295. ((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
  296. (64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
  297. #define next_current_ksp0(task) ({ \
  298. unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
  299. unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
  300. __ksp0 | __cpu; \
  301. })
  302. #else
  303. #define LOG2_NR_CPU_IDS 6
  304. #define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
  305. #define raw_smp_processor_id() \
  306. ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
  307. #define get_current_ksp0() \
  308. (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
  309. #define next_current_ksp0(task) ({ \
  310. unsigned long __ksp0 = task_ksp0(task); \
  311. int __cpu = raw_smp_processor_id(); \
  312. BUG_ON(__ksp0 & MAX_CPU_ID); \
  313. __ksp0 | __cpu; \
  314. })
  315. #endif
  316. #if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
  317. # error Too many cpus!
  318. #endif
  319. #endif /* _ASM_TILE_PROCESSOR_H */