processor.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. #ifndef __ASM_PPC64_PROCESSOR_H
  2. #define __ASM_PPC64_PROCESSOR_H
  3. /*
  4. * Copyright (C) 2001 PPC 64 Team, IBM Corp
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/stringify.h>
  12. #ifndef __ASSEMBLY__
  13. #include <linux/config.h>
  14. #include <asm/atomic.h>
  15. #include <asm/ppcdebug.h>
  16. #include <asm/a.out.h>
  17. #endif
  18. #include <asm/ptrace.h>
  19. #include <asm/types.h>
  20. #include <asm/systemcfg.h>
  21. #include <asm/cputable.h>
  22. /* Machine State Register (MSR) Fields */
  23. #define MSR_SF_LG 63 /* Enable 64 bit mode */
  24. #define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
  25. #define MSR_HV_LG 60 /* Hypervisor state */
  26. #define MSR_VEC_LG 25 /* Enable AltiVec */
  27. #define MSR_POW_LG 18 /* Enable Power Management */
  28. #define MSR_WE_LG 18 /* Wait State Enable */
  29. #define MSR_TGPR_LG 17 /* TLB Update registers in use */
  30. #define MSR_CE_LG 17 /* Critical Interrupt Enable */
  31. #define MSR_ILE_LG 16 /* Interrupt Little Endian */
  32. #define MSR_EE_LG 15 /* External Interrupt Enable */
  33. #define MSR_PR_LG 14 /* Problem State / Privilege Level */
  34. #define MSR_FP_LG 13 /* Floating Point enable */
  35. #define MSR_ME_LG 12 /* Machine Check Enable */
  36. #define MSR_FE0_LG 11 /* Floating Exception mode 0 */
  37. #define MSR_SE_LG 10 /* Single Step */
  38. #define MSR_BE_LG 9 /* Branch Trace */
  39. #define MSR_DE_LG 9 /* Debug Exception Enable */
  40. #define MSR_FE1_LG 8 /* Floating Exception mode 1 */
  41. #define MSR_IP_LG 6 /* Exception prefix 0x000/0xFFF */
  42. #define MSR_IR_LG 5 /* Instruction Relocate */
  43. #define MSR_DR_LG 4 /* Data Relocate */
  44. #define MSR_PE_LG 3 /* Protection Enable */
  45. #define MSR_PX_LG 2 /* Protection Exclusive Mode */
  46. #define MSR_PMM_LG 2 /* Performance monitor */
  47. #define MSR_RI_LG 1 /* Recoverable Exception */
  48. #define MSR_LE_LG 0 /* Little Endian */
  49. #ifdef __ASSEMBLY__
  50. #define __MASK(X) (1<<(X))
  51. #else
  52. #define __MASK(X) (1UL<<(X))
  53. #endif
  54. #define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
  55. #define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
  56. #define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
  57. #define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
  58. #define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
  59. #define MSR_WE __MASK(MSR_WE_LG) /* Wait State Enable */
  60. #define MSR_TGPR __MASK(MSR_TGPR_LG) /* TLB Update registers in use */
  61. #define MSR_CE __MASK(MSR_CE_LG) /* Critical Interrupt Enable */
  62. #define MSR_ILE __MASK(MSR_ILE_LG) /* Interrupt Little Endian */
  63. #define MSR_EE __MASK(MSR_EE_LG) /* External Interrupt Enable */
  64. #define MSR_PR __MASK(MSR_PR_LG) /* Problem State / Privilege Level */
  65. #define MSR_FP __MASK(MSR_FP_LG) /* Floating Point enable */
  66. #define MSR_ME __MASK(MSR_ME_LG) /* Machine Check Enable */
  67. #define MSR_FE0 __MASK(MSR_FE0_LG) /* Floating Exception mode 0 */
  68. #define MSR_SE __MASK(MSR_SE_LG) /* Single Step */
  69. #define MSR_BE __MASK(MSR_BE_LG) /* Branch Trace */
  70. #define MSR_DE __MASK(MSR_DE_LG) /* Debug Exception Enable */
  71. #define MSR_FE1 __MASK(MSR_FE1_LG) /* Floating Exception mode 1 */
  72. #define MSR_IP __MASK(MSR_IP_LG) /* Exception prefix 0x000/0xFFF */
  73. #define MSR_IR __MASK(MSR_IR_LG) /* Instruction Relocate */
  74. #define MSR_DR __MASK(MSR_DR_LG) /* Data Relocate */
  75. #define MSR_PE __MASK(MSR_PE_LG) /* Protection Enable */
  76. #define MSR_PX __MASK(MSR_PX_LG) /* Protection Exclusive Mode */
  77. #define MSR_PMM __MASK(MSR_PMM_LG) /* Performance monitor */
  78. #define MSR_RI __MASK(MSR_RI_LG) /* Recoverable Exception */
  79. #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */
  80. #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF
  81. #define MSR_KERNEL MSR_ | MSR_SF | MSR_HV
  82. #define MSR_USER32 MSR_ | MSR_PR | MSR_EE
  83. #define MSR_USER64 MSR_USER32 | MSR_SF
  84. /* Floating Point Status and Control Register (FPSCR) Fields */
  85. #define FPSCR_FX 0x80000000 /* FPU exception summary */
  86. #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */
  87. #define FPSCR_VX 0x20000000 /* Invalid operation summary */
  88. #define FPSCR_OX 0x10000000 /* Overflow exception summary */
  89. #define FPSCR_UX 0x08000000 /* Underflow exception summary */
  90. #define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
  91. #define FPSCR_XX 0x02000000 /* Inexact exception summary */
  92. #define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
  93. #define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
  94. #define FPSCR_VXIDI 0x00400000 /* Invalid op for Inv / Inv */
  95. #define FPSCR_VXZDZ 0x00200000 /* Invalid op for Zero / Zero */
  96. #define FPSCR_VXIMZ 0x00100000 /* Invalid op for Inv * Zero */
  97. #define FPSCR_VXVC 0x00080000 /* Invalid op for Compare */
  98. #define FPSCR_FR 0x00040000 /* Fraction rounded */
  99. #define FPSCR_FI 0x00020000 /* Fraction inexact */
  100. #define FPSCR_FPRF 0x0001f000 /* FPU Result Flags */
  101. #define FPSCR_FPCC 0x0000f000 /* FPU Condition Codes */
  102. #define FPSCR_VXSOFT 0x00000400 /* Invalid op for software request */
  103. #define FPSCR_VXSQRT 0x00000200 /* Invalid op for square root */
  104. #define FPSCR_VXCVI 0x00000100 /* Invalid op for integer convert */
  105. #define FPSCR_VE 0x00000080 /* Invalid op exception enable */
  106. #define FPSCR_OE 0x00000040 /* IEEE overflow exception enable */
  107. #define FPSCR_UE 0x00000020 /* IEEE underflow exception enable */
  108. #define FPSCR_ZE 0x00000010 /* IEEE zero divide exception enable */
  109. #define FPSCR_XE 0x00000008 /* FP inexact exception enable */
  110. #define FPSCR_NI 0x00000004 /* FPU non IEEE-Mode */
  111. #define FPSCR_RN 0x00000003 /* FPU rounding control */
  112. /* Special Purpose Registers (SPRNs)*/
  113. #define SPRN_CTR 0x009 /* Count Register */
  114. #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */
  115. #define DABR_TRANSLATION (1UL << 2)
  116. #define SPRN_DAR 0x013 /* Data Address Register */
  117. #define SPRN_DEC 0x016 /* Decrement Register */
  118. #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
  119. #define DSISR_NOHPTE 0x40000000 /* no translation found */
  120. #define DSISR_PROTFAULT 0x08000000 /* protection fault */
  121. #define DSISR_ISSTORE 0x02000000 /* access was a store */
  122. #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
  123. #define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
  124. #define SPRN_HID0 0x3F0 /* Hardware Implementation Register 0 */
  125. #define SPRN_MSRDORM 0x3F1 /* Hardware Implementation Register 1 */
  126. #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
  127. #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */
  128. #define SPRN_NIADORM 0x3F3 /* Hardware Implementation Register 2 */
  129. #define SPRN_HID4 0x3F4 /* 970 HID4 */
  130. #define SPRN_HID5 0x3F6 /* 970 HID5 */
  131. #define SPRN_HID6 0x3F9 /* BE HID 6 */
  132. #define HID6_LB (0x0F<<12) /* Concurrent Large Page Modes */
  133. #define HID6_DLP (1<<20) /* Disable all large page modes (4K only) */
  134. #define SPRN_TSCR 0x399 /* Thread switch control on BE */
  135. #define SPRN_TTR 0x39A /* Thread switch timeout on BE */
  136. #define TSCR_DEC_ENABLE 0x200000 /* Decrementer Interrupt */
  137. #define TSCR_EE_ENABLE 0x100000 /* External Interrupt */
  138. #define TSCR_EE_BOOST 0x080000 /* External Interrupt Boost */
  139. #define SPRN_TSC 0x3FD /* Thread switch control on others */
  140. #define SPRN_TST 0x3FC /* Thread switch timeout on others */
  141. #define SPRN_L2CR 0x3F9 /* Level 2 Cache Control Regsiter */
  142. #define SPRN_LR 0x008 /* Link Register */
  143. #define SPRN_PIR 0x3FF /* Processor Identification Register */
  144. #define SPRN_PIT 0x3DB /* Programmable Interval Timer */
  145. #define SPRN_PURR 0x135 /* Processor Utilization of Resources Register */
  146. #define SPRN_PVR 0x11F /* Processor Version Register */
  147. #define SPRN_RPA 0x3D6 /* Required Physical Address Register */
  148. #define SPRN_SDA 0x3BF /* Sampled Data Address Register */
  149. #define SPRN_SDR1 0x019 /* MMU Hash Base Register */
  150. #define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
  151. #define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
  152. #define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
  153. #define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
  154. #define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
  155. #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
  156. #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
  157. #define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
  158. #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
  159. #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, W/O) */
  160. #define SPRN_TBWU 0x11D /* Time Base Write Upper Register (super, W/O) */
  161. #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */
  162. #define SPRN_USIA 0x3AB /* User Sampled Instruction Address Register */
  163. #define SPRN_XER 0x001 /* Fixed Point Exception Register */
  164. #define SPRN_VRSAVE 0x100 /* Vector save */
  165. #define SPRN_CTRLF 0x088
  166. #define SPRN_CTRLT 0x098
  167. #define CTRL_RUNLATCH 0x1
  168. /* Performance monitor SPRs */
  169. #define SPRN_SIAR 780
  170. #define SPRN_SDAR 781
  171. #define SPRN_MMCRA 786
  172. #define MMCRA_SIHV 0x10000000UL /* state of MSR HV when SIAR set */
  173. #define MMCRA_SIPR 0x08000000UL /* state of MSR PR when SIAR set */
  174. #define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
  175. #define SPRN_PMC1 787
  176. #define SPRN_PMC2 788
  177. #define SPRN_PMC3 789
  178. #define SPRN_PMC4 790
  179. #define SPRN_PMC5 791
  180. #define SPRN_PMC6 792
  181. #define SPRN_PMC7 793
  182. #define SPRN_PMC8 794
  183. #define SPRN_MMCR0 795
  184. #define MMCR0_FC 0x80000000UL /* freeze counters. set to 1 on a perfmon exception */
  185. #define MMCR0_FCS 0x40000000UL /* freeze in supervisor state */
  186. #define MMCR0_KERNEL_DISABLE MMCR0_FCS
  187. #define MMCR0_FCP 0x20000000UL /* freeze in problem state */
  188. #define MMCR0_PROBLEM_DISABLE MMCR0_FCP
  189. #define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
  190. #define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
  191. #define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
  192. #define MMCR0_FCECE 0x02000000UL /* freeze counters on enabled condition or event */
  193. /* time base exception enable */
  194. #define MMCR0_TBEE 0x00400000UL /* time base exception enable */
  195. #define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
  196. #define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
  197. #define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
  198. #define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
  199. #define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
  200. #define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
  201. #define MMCR0_FCTA 0x00000004UL /* freeze counters in tags active mode */
  202. #define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
  203. #define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
  204. #define SPRN_MMCR1 798
  205. /* Short-hand versions for a number of the above SPRNs */
  206. #define CTR SPRN_CTR /* Counter Register */
  207. #define DAR SPRN_DAR /* Data Address Register */
  208. #define DABR SPRN_DABR /* Data Address Breakpoint Register */
  209. #define DEC SPRN_DEC /* Decrement Register */
  210. #define DSISR SPRN_DSISR /* Data Storage Interrupt Status Register */
  211. #define HID0 SPRN_HID0 /* Hardware Implementation Register 0 */
  212. #define MSRDORM SPRN_MSRDORM /* MSR Dormant Register */
  213. #define NIADORM SPRN_NIADORM /* NIA Dormant Register */
  214. #define TSC SPRN_TSC /* Thread switch control */
  215. #define TST SPRN_TST /* Thread switch timeout */
  216. #define IABR SPRN_IABR /* Instruction Address Breakpoint Register */
  217. #define L2CR SPRN_L2CR /* PPC 750 L2 control register */
  218. #define __LR SPRN_LR
  219. #define PVR SPRN_PVR /* Processor Version */
  220. #define PIR SPRN_PIR /* Processor ID */
  221. #define PURR SPRN_PURR /* Processor Utilization of Resource Register */
  222. #define SDR1 SPRN_SDR1 /* MMU hash base register */
  223. #define SPR0 SPRN_SPRG0 /* Supervisor Private Registers */
  224. #define SPR1 SPRN_SPRG1
  225. #define SPR2 SPRN_SPRG2
  226. #define SPR3 SPRN_SPRG3
  227. #define SPRG0 SPRN_SPRG0
  228. #define SPRG1 SPRN_SPRG1
  229. #define SPRG2 SPRN_SPRG2
  230. #define SPRG3 SPRN_SPRG3
  231. #define SRR0 SPRN_SRR0 /* Save and Restore Register 0 */
  232. #define SRR1 SPRN_SRR1 /* Save and Restore Register 1 */
  233. #define TBRL SPRN_TBRL /* Time Base Read Lower Register */
  234. #define TBRU SPRN_TBRU /* Time Base Read Upper Register */
  235. #define TBWL SPRN_TBWL /* Time Base Write Lower Register */
  236. #define TBWU SPRN_TBWU /* Time Base Write Upper Register */
  237. #define XER SPRN_XER
  238. /* Processor Version Register (PVR) field extraction */
  239. #define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
  240. #define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
  241. /* Processor Version Numbers */
  242. #define PV_NORTHSTAR 0x0033
  243. #define PV_PULSAR 0x0034
  244. #define PV_POWER4 0x0035
  245. #define PV_ICESTAR 0x0036
  246. #define PV_SSTAR 0x0037
  247. #define PV_POWER4p 0x0038
  248. #define PV_970 0x0039
  249. #define PV_POWER5 0x003A
  250. #define PV_POWER5p 0x003B
  251. #define PV_970FX 0x003C
  252. #define PV_630 0x0040
  253. #define PV_630p 0x0041
  254. #define PV_970MP 0x0044
  255. #define PV_BE 0x0070
  256. /* Platforms supported by PPC64 */
  257. #define PLATFORM_PSERIES 0x0100
  258. #define PLATFORM_PSERIES_LPAR 0x0101
  259. #define PLATFORM_ISERIES_LPAR 0x0201
  260. #define PLATFORM_LPAR 0x0001
  261. #define PLATFORM_POWERMAC 0x0400
  262. #define PLATFORM_MAPLE 0x0500
  263. #define PLATFORM_BPA 0x1000
  264. /* Compatibility with drivers coming from PPC32 world */
  265. #define _machine (systemcfg->platform)
  266. #define _MACH_Pmac PLATFORM_POWERMAC
  267. /*
  268. * List of interrupt controllers.
  269. */
  270. #define IC_INVALID 0
  271. #define IC_OPEN_PIC 1
  272. #define IC_PPC_XIC 2
  273. #define IC_BPA_IIC 3
  274. #define XGLUE(a,b) a##b
  275. #define GLUE(a,b) XGLUE(a,b)
  276. #ifdef __ASSEMBLY__
  277. #define _GLOBAL(name) \
  278. .section ".text"; \
  279. .align 2 ; \
  280. .globl name; \
  281. .globl GLUE(.,name); \
  282. .section ".opd","aw"; \
  283. name: \
  284. .quad GLUE(.,name); \
  285. .quad .TOC.@tocbase; \
  286. .quad 0; \
  287. .previous; \
  288. .type GLUE(.,name),@function; \
  289. GLUE(.,name):
  290. #define _KPROBE(name) \
  291. .section ".kprobes.text","a"; \
  292. .align 2 ; \
  293. .globl name; \
  294. .globl GLUE(.,name); \
  295. .section ".opd","aw"; \
  296. name: \
  297. .quad GLUE(.,name); \
  298. .quad .TOC.@tocbase; \
  299. .quad 0; \
  300. .previous; \
  301. .type GLUE(.,name),@function; \
  302. GLUE(.,name):
  303. #define _STATIC(name) \
  304. .section ".text"; \
  305. .align 2 ; \
  306. .section ".opd","aw"; \
  307. name: \
  308. .quad GLUE(.,name); \
  309. .quad .TOC.@tocbase; \
  310. .quad 0; \
  311. .previous; \
  312. .type GLUE(.,name),@function; \
  313. GLUE(.,name):
  314. #else /* __ASSEMBLY__ */
  315. /*
  316. * Default implementation of macro that returns current
  317. * instruction pointer ("program counter").
  318. */
  319. #define current_text_addr() ({ __label__ _l; _l: &&_l;})
  320. /* Macros for setting and retrieving special purpose registers */
  321. #define mfmsr() ({unsigned long rval; \
  322. asm volatile("mfmsr %0" : "=r" (rval)); rval;})
  323. #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
  324. : : "r" (v))
  325. #define mtmsrd(v) __mtmsrd((v), 0)
  326. #define mfspr(rn) ({unsigned long rval; \
  327. asm volatile("mfspr %0," __stringify(rn) \
  328. : "=r" (rval)); rval;})
  329. #define mtspr(rn, v) asm volatile("mtspr " __stringify(rn) ",%0" : : "r" (v))
  330. #define mftb() ({unsigned long rval; \
  331. asm volatile("mftb %0" : "=r" (rval)); rval;})
  332. #define mttbl(v) asm volatile("mttbl %0":: "r"(v))
  333. #define mttbu(v) asm volatile("mttbu %0":: "r"(v))
  334. #define mfasr() ({unsigned long rval; \
  335. asm volatile("mfasr %0" : "=r" (rval)); rval;})
  336. static inline void set_tb(unsigned int upper, unsigned int lower)
  337. {
  338. mttbl(0);
  339. mttbu(upper);
  340. mttbl(lower);
  341. }
  342. #define __get_SP() ({unsigned long sp; \
  343. asm volatile("mr %0,1": "=r" (sp)); sp;})
  344. #ifdef __KERNEL__
  345. extern int have_of;
  346. extern u64 ppc64_interrupt_controller;
  347. struct task_struct;
  348. void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
  349. void release_thread(struct task_struct *);
  350. /* Prepare to copy thread state - unlazy all lazy status */
  351. extern void prepare_to_copy(struct task_struct *tsk);
  352. /* Create a new kernel thread. */
  353. extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
  354. /* Lazy FPU handling on uni-processor */
  355. extern struct task_struct *last_task_used_math;
  356. extern struct task_struct *last_task_used_altivec;
  357. /* 64-bit user address space is 44-bits (16TB user VM) */
  358. #define TASK_SIZE_USER64 (0x0000100000000000UL)
  359. /*
  360. * 32-bit user address space is 4GB - 1 page
  361. * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT
  362. */
  363. #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE))
  364. #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
  365. TASK_SIZE_USER32 : TASK_SIZE_USER64)
  366. /* This decides where the kernel will search for a free chunk of vm
  367. * space during mmap's.
  368. */
  369. #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
  370. #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4))
  371. #define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)||(ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
  372. TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
  373. typedef struct {
  374. unsigned long seg;
  375. } mm_segment_t;
  376. struct thread_struct {
  377. unsigned long ksp; /* Kernel stack pointer */
  378. unsigned long ksp_vsid;
  379. struct pt_regs *regs; /* Pointer to saved register state */
  380. mm_segment_t fs; /* for get_fs() validation */
  381. double fpr[32]; /* Complete floating point set */
  382. unsigned long fpscr; /* Floating point status (plus pad) */
  383. unsigned long fpexc_mode; /* Floating-point exception mode */
  384. unsigned long start_tb; /* Start purr when proc switched in */
  385. unsigned long accum_tb; /* Total accumilated purr for process */
  386. unsigned long vdso_base; /* base of the vDSO library */
  387. #ifdef CONFIG_ALTIVEC
  388. /* Complete AltiVec register set */
  389. vector128 vr[32] __attribute((aligned(16)));
  390. /* AltiVec status */
  391. vector128 vscr __attribute((aligned(16)));
  392. unsigned long vrsave;
  393. int used_vr; /* set if process has used altivec */
  394. #endif /* CONFIG_ALTIVEC */
  395. };
  396. #define ARCH_MIN_TASKALIGN 16
  397. #define INIT_SP (sizeof(init_stack) + (unsigned long) &init_stack)
  398. #define INIT_THREAD { \
  399. .ksp = INIT_SP, \
  400. .regs = (struct pt_regs *)INIT_SP - 1, \
  401. .fs = KERNEL_DS, \
  402. .fpr = {0}, \
  403. .fpscr = 0, \
  404. .fpexc_mode = MSR_FE0|MSR_FE1, \
  405. }
  406. /*
  407. * Return saved PC of a blocked thread. For now, this is the "user" PC
  408. */
  409. #define thread_saved_pc(tsk) \
  410. ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
  411. unsigned long get_wchan(struct task_struct *p);
  412. #define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
  413. #define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
  414. /* Get/set floating-point exception mode */
  415. #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr))
  416. #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val))
  417. extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr);
  418. extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val);
  419. static inline unsigned int __unpack_fe01(unsigned long msr_bits)
  420. {
  421. return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8);
  422. }
  423. static inline unsigned long __pack_fe01(unsigned int fpmode)
  424. {
  425. return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1);
  426. }
  427. #define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
  428. /*
  429. * Prefetch macros.
  430. */
  431. #define ARCH_HAS_PREFETCH
  432. #define ARCH_HAS_PREFETCHW
  433. #define ARCH_HAS_SPINLOCK_PREFETCH
  434. static inline void prefetch(const void *x)
  435. {
  436. if (unlikely(!x))
  437. return;
  438. __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
  439. }
  440. static inline void prefetchw(const void *x)
  441. {
  442. if (unlikely(!x))
  443. return;
  444. __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
  445. }
  446. #define spin_lock_prefetch(x) prefetchw(x)
  447. #define HAVE_ARCH_PICK_MMAP_LAYOUT
  448. static inline void ppc64_runlatch_on(void)
  449. {
  450. unsigned long ctrl;
  451. if (cpu_has_feature(CPU_FTR_CTRL)) {
  452. ctrl = mfspr(SPRN_CTRLF);
  453. ctrl |= CTRL_RUNLATCH;
  454. mtspr(SPRN_CTRLT, ctrl);
  455. }
  456. }
  457. static inline void ppc64_runlatch_off(void)
  458. {
  459. unsigned long ctrl;
  460. if (cpu_has_feature(CPU_FTR_CTRL)) {
  461. ctrl = mfspr(SPRN_CTRLF);
  462. ctrl &= ~CTRL_RUNLATCH;
  463. mtspr(SPRN_CTRLT, ctrl);
  464. }
  465. }
  466. #endif /* __KERNEL__ */
  467. #endif /* __ASSEMBLY__ */
  468. #ifdef __KERNEL__
  469. #define RUNLATCH_ON(REG) \
  470. BEGIN_FTR_SECTION \
  471. mfspr (REG),SPRN_CTRLF; \
  472. ori (REG),(REG),CTRL_RUNLATCH; \
  473. mtspr SPRN_CTRLT,(REG); \
  474. END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
  475. #endif
  476. /*
  477. * Number of entries in the SLB. If this ever changes we should handle
  478. * it with a use a cpu feature fixup.
  479. */
  480. #define SLB_NUM_ENTRIES 64
  481. #endif /* __ASM_PPC64_PROCESSOR_H */