processor.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. #ifndef _ASM_IA64_PROCESSOR_H
  2. #define _ASM_IA64_PROCESSOR_H
  3. /*
  4. * Copyright (C) 1998-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. * Stephane Eranian <eranian@hpl.hp.com>
  7. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  8. * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  9. *
  10. * 11/24/98 S.Eranian added ia64_set_iva()
  11. * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
  12. * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
  13. */
  14. #include <linux/config.h>
  15. #include <asm/intrinsics.h>
  16. #include <asm/kregs.h>
  17. #include <asm/ptrace.h>
  18. #include <asm/ustack.h>
  19. #define IA64_NUM_DBG_REGS 8
  20. /*
  21. * Limits for PMC and PMD are set to less than maximum architected values
  22. * but should be sufficient for a while
  23. */
  24. #define IA64_NUM_PMC_REGS 64
  25. #define IA64_NUM_PMD_REGS 64
  26. #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
  27. #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
  28. /*
  29. * TASK_SIZE really is a mis-named. It really is the maximum user
  30. * space address (plus one). On IA-64, there are five regions of 2TB
  31. * each (assuming 8KB page size), for a total of 8TB of user virtual
  32. * address space.
  33. */
  34. #define TASK_SIZE (current->thread.task_size)
  35. /*
  36. * This decides where the kernel will search for a free chunk of vm
  37. * space during mmap's.
  38. */
  39. #define TASK_UNMAPPED_BASE (current->thread.map_base)
  40. #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
  41. #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
  42. #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
  43. #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
  44. #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
  45. #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
  46. sync at ctx sw */
  47. #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
  48. #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
  49. #define IA64_THREAD_UAC_SHIFT 3
  50. #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
  51. #define IA64_THREAD_FPEMU_SHIFT 6
  52. #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
  53. /*
  54. * This shift should be large enough to be able to represent 1000000000/itc_freq with good
  55. * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
  56. * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
  57. */
  58. #define IA64_NSEC_PER_CYC_SHIFT 30
  59. #ifndef __ASSEMBLY__
  60. #include <linux/cache.h>
  61. #include <linux/compiler.h>
  62. #include <linux/threads.h>
  63. #include <linux/types.h>
  64. #include <asm/fpu.h>
  65. #include <asm/page.h>
  66. #include <asm/percpu.h>
  67. #include <asm/rse.h>
  68. #include <asm/unwind.h>
  69. #include <asm/atomic.h>
  70. #ifdef CONFIG_NUMA
  71. #include <asm/nodedata.h>
  72. #endif
  73. /* like above but expressed as bitfields for more efficient access: */
  74. struct ia64_psr {
  75. __u64 reserved0 : 1;
  76. __u64 be : 1;
  77. __u64 up : 1;
  78. __u64 ac : 1;
  79. __u64 mfl : 1;
  80. __u64 mfh : 1;
  81. __u64 reserved1 : 7;
  82. __u64 ic : 1;
  83. __u64 i : 1;
  84. __u64 pk : 1;
  85. __u64 reserved2 : 1;
  86. __u64 dt : 1;
  87. __u64 dfl : 1;
  88. __u64 dfh : 1;
  89. __u64 sp : 1;
  90. __u64 pp : 1;
  91. __u64 di : 1;
  92. __u64 si : 1;
  93. __u64 db : 1;
  94. __u64 lp : 1;
  95. __u64 tb : 1;
  96. __u64 rt : 1;
  97. __u64 reserved3 : 4;
  98. __u64 cpl : 2;
  99. __u64 is : 1;
  100. __u64 mc : 1;
  101. __u64 it : 1;
  102. __u64 id : 1;
  103. __u64 da : 1;
  104. __u64 dd : 1;
  105. __u64 ss : 1;
  106. __u64 ri : 2;
  107. __u64 ed : 1;
  108. __u64 bn : 1;
  109. __u64 reserved4 : 19;
  110. };
  111. /*
  112. * CPU type, hardware bug flags, and per-CPU state. Frequently used
  113. * state comes earlier:
  114. */
  115. struct cpuinfo_ia64 {
  116. __u32 softirq_pending;
  117. __u64 itm_delta; /* # of clock cycles between clock ticks */
  118. __u64 itm_next; /* interval timer mask value to use for next clock tick */
  119. __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
  120. __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
  121. __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
  122. __u64 itc_freq; /* frequency of ITC counter */
  123. __u64 proc_freq; /* frequency of processor */
  124. __u64 cyc_per_usec; /* itc_freq/1000000 */
  125. __u64 ptce_base;
  126. __u32 ptce_count[2];
  127. __u32 ptce_stride[2];
  128. struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
  129. #ifdef CONFIG_SMP
  130. __u64 loops_per_jiffy;
  131. int cpu;
  132. __u32 socket_id; /* physical processor socket id */
  133. __u16 core_id; /* core id */
  134. __u16 thread_id; /* thread id */
  135. __u16 num_log; /* Total number of logical processors on
  136. * this socket that were successfully booted */
  137. __u8 cores_per_socket; /* Cores per processor socket */
  138. __u8 threads_per_core; /* Threads per core */
  139. #endif
  140. /* CPUID-derived information: */
  141. __u64 ppn;
  142. __u64 features;
  143. __u8 number;
  144. __u8 revision;
  145. __u8 model;
  146. __u8 family;
  147. __u8 archrev;
  148. char vendor[16];
  149. #ifdef CONFIG_NUMA
  150. struct ia64_node_data *node_data;
  151. #endif
  152. };
  153. DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
  154. /*
  155. * The "local" data variable. It refers to the per-CPU data of the currently executing
  156. * CPU, much like "current" points to the per-task data of the currently executing task.
  157. * Do not use the address of local_cpu_data, since it will be different from
  158. * cpu_data(smp_processor_id())!
  159. */
  160. #define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
  161. #define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
  162. extern void identify_cpu (struct cpuinfo_ia64 *);
  163. extern void print_cpu_info (struct cpuinfo_ia64 *);
  164. typedef struct {
  165. unsigned long seg;
  166. } mm_segment_t;
  167. #define SET_UNALIGN_CTL(task,value) \
  168. ({ \
  169. (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
  170. | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
  171. 0; \
  172. })
  173. #define GET_UNALIGN_CTL(task,addr) \
  174. ({ \
  175. put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
  176. (int __user *) (addr)); \
  177. })
  178. #define SET_FPEMU_CTL(task,value) \
  179. ({ \
  180. (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
  181. | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
  182. 0; \
  183. })
  184. #define GET_FPEMU_CTL(task,addr) \
  185. ({ \
  186. put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
  187. (int __user *) (addr)); \
  188. })
  189. #ifdef CONFIG_IA32_SUPPORT
  190. struct desc_struct {
  191. unsigned int a, b;
  192. };
  193. #define desc_empty(desc) (!((desc)->a + (desc)->b))
  194. #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
  195. #define GDT_ENTRY_TLS_ENTRIES 3
  196. #define GDT_ENTRY_TLS_MIN 6
  197. #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
  198. #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
  199. struct partial_page_list;
  200. #endif
  201. struct thread_struct {
  202. __u32 flags; /* various thread flags (see IA64_THREAD_*) */
  203. /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
  204. __u8 on_ustack; /* executing on user-stacks? */
  205. __u8 pad[3];
  206. __u64 ksp; /* kernel stack pointer */
  207. __u64 map_base; /* base address for get_unmapped_area() */
  208. __u64 task_size; /* limit for task size */
  209. __u64 rbs_bot; /* the base address for the RBS */
  210. int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
  211. #ifdef CONFIG_IA32_SUPPORT
  212. __u64 eflag; /* IA32 EFLAGS reg */
  213. __u64 fsr; /* IA32 floating pt status reg */
  214. __u64 fcr; /* IA32 floating pt control reg */
  215. __u64 fir; /* IA32 fp except. instr. reg */
  216. __u64 fdr; /* IA32 fp except. data reg */
  217. __u64 old_k1; /* old value of ar.k1 */
  218. __u64 old_iob; /* old IOBase value */
  219. struct partial_page_list *ppl; /* partial page list for 4K page size issue */
  220. /* cached TLS descriptors. */
  221. struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
  222. # define INIT_THREAD_IA32 .eflag = 0, \
  223. .fsr = 0, \
  224. .fcr = 0x17800000037fULL, \
  225. .fir = 0, \
  226. .fdr = 0, \
  227. .old_k1 = 0, \
  228. .old_iob = 0, \
  229. .ppl = NULL,
  230. #else
  231. # define INIT_THREAD_IA32
  232. #endif /* CONFIG_IA32_SUPPORT */
  233. #ifdef CONFIG_PERFMON
  234. __u64 pmcs[IA64_NUM_PMC_REGS];
  235. __u64 pmds[IA64_NUM_PMD_REGS];
  236. void *pfm_context; /* pointer to detailed PMU context */
  237. unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
  238. # define INIT_THREAD_PM .pmcs = {0UL, }, \
  239. .pmds = {0UL, }, \
  240. .pfm_context = NULL, \
  241. .pfm_needs_checking = 0UL,
  242. #else
  243. # define INIT_THREAD_PM
  244. #endif
  245. __u64 dbr[IA64_NUM_DBG_REGS];
  246. __u64 ibr[IA64_NUM_DBG_REGS];
  247. struct ia64_fpreg fph[96]; /* saved/loaded on demand */
  248. };
  249. #define INIT_THREAD { \
  250. .flags = 0, \
  251. .on_ustack = 0, \
  252. .ksp = 0, \
  253. .map_base = DEFAULT_MAP_BASE, \
  254. .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
  255. .task_size = DEFAULT_TASK_SIZE, \
  256. .last_fph_cpu = -1, \
  257. INIT_THREAD_IA32 \
  258. INIT_THREAD_PM \
  259. .dbr = {0, }, \
  260. .ibr = {0, }, \
  261. .fph = {{{{0}}}, } \
  262. }
  263. #define start_thread(regs,new_ip,new_sp) do { \
  264. set_fs(USER_DS); \
  265. regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
  266. & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
  267. regs->cr_iip = new_ip; \
  268. regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
  269. regs->ar_rnat = 0; \
  270. regs->ar_bspstore = current->thread.rbs_bot; \
  271. regs->ar_fpsr = FPSR_DEFAULT; \
  272. regs->loadrs = 0; \
  273. regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
  274. regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
  275. if (unlikely(!current->mm->dumpable)) { \
  276. /* \
  277. * Zap scratch regs to avoid leaking bits between processes with different \
  278. * uid/privileges. \
  279. */ \
  280. regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
  281. regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
  282. } \
  283. } while (0)
  284. /* Forward declarations, a strange C thing... */
  285. struct mm_struct;
  286. struct task_struct;
  287. /*
  288. * Free all resources held by a thread. This is called after the
  289. * parent of DEAD_TASK has collected the exit status of the task via
  290. * wait().
  291. */
  292. #define release_thread(dead_task)
  293. /* Prepare to copy thread state - unlazy all lazy status */
  294. #define prepare_to_copy(tsk) do { } while (0)
  295. /*
  296. * This is the mechanism for creating a new kernel thread.
  297. *
  298. * NOTE 1: Only a kernel-only process (ie the swapper or direct
  299. * descendants who haven't done an "execve()") should use this: it
  300. * will work within a system call from a "real" process, but the
  301. * process memory space will not be free'd until both the parent and
  302. * the child have exited.
  303. *
  304. * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
  305. * into trouble in init/main.c when the child thread returns to
  306. * do_basic_setup() and the timing is such that free_initmem() has
  307. * been called already.
  308. */
  309. extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
  310. /* Get wait channel for task P. */
  311. extern unsigned long get_wchan (struct task_struct *p);
  312. /* Return instruction pointer of blocked task TSK. */
  313. #define KSTK_EIP(tsk) \
  314. ({ \
  315. struct pt_regs *_regs = task_pt_regs(tsk); \
  316. _regs->cr_iip + ia64_psr(_regs)->ri; \
  317. })
  318. /* Return stack pointer of blocked task TSK. */
  319. #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
  320. extern void ia64_getreg_unknown_kr (void);
  321. extern void ia64_setreg_unknown_kr (void);
  322. #define ia64_get_kr(regnum) \
  323. ({ \
  324. unsigned long r = 0; \
  325. \
  326. switch (regnum) { \
  327. case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
  328. case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
  329. case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
  330. case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
  331. case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
  332. case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
  333. case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
  334. case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
  335. default: ia64_getreg_unknown_kr(); break; \
  336. } \
  337. r; \
  338. })
  339. #define ia64_set_kr(regnum, r) \
  340. ({ \
  341. switch (regnum) { \
  342. case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
  343. case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
  344. case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
  345. case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
  346. case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
  347. case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
  348. case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
  349. case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
  350. default: ia64_setreg_unknown_kr(); break; \
  351. } \
  352. })
  353. /*
  354. * The following three macros can't be inline functions because we don't have struct
  355. * task_struct at this point.
  356. */
  357. /*
  358. * Return TRUE if task T owns the fph partition of the CPU we're running on.
  359. * Must be called from code that has preemption disabled.
  360. */
  361. #define ia64_is_local_fpu_owner(t) \
  362. ({ \
  363. struct task_struct *__ia64_islfo_task = (t); \
  364. (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
  365. && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
  366. })
  367. /*
  368. * Mark task T as owning the fph partition of the CPU we're running on.
  369. * Must be called from code that has preemption disabled.
  370. */
  371. #define ia64_set_local_fpu_owner(t) do { \
  372. struct task_struct *__ia64_slfo_task = (t); \
  373. __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
  374. ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
  375. } while (0)
  376. /* Mark the fph partition of task T as being invalid on all CPUs. */
  377. #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
  378. extern void __ia64_init_fpu (void);
  379. extern void __ia64_save_fpu (struct ia64_fpreg *fph);
  380. extern void __ia64_load_fpu (struct ia64_fpreg *fph);
  381. extern void ia64_save_debug_regs (unsigned long *save_area);
  382. extern void ia64_load_debug_regs (unsigned long *save_area);
  383. #ifdef CONFIG_IA32_SUPPORT
  384. extern void ia32_save_state (struct task_struct *task);
  385. extern void ia32_load_state (struct task_struct *task);
  386. #endif
  387. #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
  388. #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
  389. /* load fp 0.0 into fph */
  390. static inline void
  391. ia64_init_fpu (void) {
  392. ia64_fph_enable();
  393. __ia64_init_fpu();
  394. ia64_fph_disable();
  395. }
  396. /* save f32-f127 at FPH */
  397. static inline void
  398. ia64_save_fpu (struct ia64_fpreg *fph) {
  399. ia64_fph_enable();
  400. __ia64_save_fpu(fph);
  401. ia64_fph_disable();
  402. }
  403. /* load f32-f127 from FPH */
  404. static inline void
  405. ia64_load_fpu (struct ia64_fpreg *fph) {
  406. ia64_fph_enable();
  407. __ia64_load_fpu(fph);
  408. ia64_fph_disable();
  409. }
  410. static inline __u64
  411. ia64_clear_ic (void)
  412. {
  413. __u64 psr;
  414. psr = ia64_getreg(_IA64_REG_PSR);
  415. ia64_stop();
  416. ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
  417. ia64_srlz_i();
  418. return psr;
  419. }
  420. /*
  421. * Restore the psr.
  422. */
  423. static inline void
  424. ia64_set_psr (__u64 psr)
  425. {
  426. ia64_stop();
  427. ia64_setreg(_IA64_REG_PSR_L, psr);
  428. ia64_srlz_d();
  429. }
  430. /*
  431. * Insert a translation into an instruction and/or data translation
  432. * register.
  433. */
  434. static inline void
  435. ia64_itr (__u64 target_mask, __u64 tr_num,
  436. __u64 vmaddr, __u64 pte,
  437. __u64 log_page_size)
  438. {
  439. ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
  440. ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
  441. ia64_stop();
  442. if (target_mask & 0x1)
  443. ia64_itri(tr_num, pte);
  444. if (target_mask & 0x2)
  445. ia64_itrd(tr_num, pte);
  446. }
  447. /*
  448. * Insert a translation into the instruction and/or data translation
  449. * cache.
  450. */
  451. static inline void
  452. ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
  453. __u64 log_page_size)
  454. {
  455. ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
  456. ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
  457. ia64_stop();
  458. /* as per EAS2.6, itc must be the last instruction in an instruction group */
  459. if (target_mask & 0x1)
  460. ia64_itci(pte);
  461. if (target_mask & 0x2)
  462. ia64_itcd(pte);
  463. }
  464. /*
  465. * Purge a range of addresses from instruction and/or data translation
  466. * register(s).
  467. */
  468. static inline void
  469. ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
  470. {
  471. if (target_mask & 0x1)
  472. ia64_ptri(vmaddr, (log_size << 2));
  473. if (target_mask & 0x2)
  474. ia64_ptrd(vmaddr, (log_size << 2));
  475. }
  476. /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
  477. static inline void
  478. ia64_set_iva (void *ivt_addr)
  479. {
  480. ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
  481. ia64_srlz_i();
  482. }
  483. /* Set the page table address and control bits. */
  484. static inline void
  485. ia64_set_pta (__u64 pta)
  486. {
  487. /* Note: srlz.i implies srlz.d */
  488. ia64_setreg(_IA64_REG_CR_PTA, pta);
  489. ia64_srlz_i();
  490. }
  491. static inline void
  492. ia64_eoi (void)
  493. {
  494. ia64_setreg(_IA64_REG_CR_EOI, 0);
  495. ia64_srlz_d();
  496. }
  497. #define cpu_relax() ia64_hint(ia64_hint_pause)
  498. static inline int
  499. ia64_get_irr(unsigned int vector)
  500. {
  501. unsigned int reg = vector / 64;
  502. unsigned int bit = vector % 64;
  503. u64 irr;
  504. switch (reg) {
  505. case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
  506. case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
  507. case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
  508. case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
  509. }
  510. return test_bit(bit, &irr);
  511. }
  512. static inline void
  513. ia64_set_lrr0 (unsigned long val)
  514. {
  515. ia64_setreg(_IA64_REG_CR_LRR0, val);
  516. ia64_srlz_d();
  517. }
  518. static inline void
  519. ia64_set_lrr1 (unsigned long val)
  520. {
  521. ia64_setreg(_IA64_REG_CR_LRR1, val);
  522. ia64_srlz_d();
  523. }
  524. /*
  525. * Given the address to which a spill occurred, return the unat bit
  526. * number that corresponds to this address.
  527. */
  528. static inline __u64
  529. ia64_unat_pos (void *spill_addr)
  530. {
  531. return ((__u64) spill_addr >> 3) & 0x3f;
  532. }
  533. /*
  534. * Set the NaT bit of an integer register which was spilled at address
  535. * SPILL_ADDR. UNAT is the mask to be updated.
  536. */
  537. static inline void
  538. ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
  539. {
  540. __u64 bit = ia64_unat_pos(spill_addr);
  541. __u64 mask = 1UL << bit;
  542. *unat = (*unat & ~mask) | (nat << bit);
  543. }
  544. /*
  545. * Return saved PC of a blocked thread.
  546. * Note that the only way T can block is through a call to schedule() -> switch_to().
  547. */
  548. static inline unsigned long
  549. thread_saved_pc (struct task_struct *t)
  550. {
  551. struct unw_frame_info info;
  552. unsigned long ip;
  553. unw_init_from_blocked_task(&info, t);
  554. if (unw_unwind(&info) < 0)
  555. return 0;
  556. unw_get_ip(&info, &ip);
  557. return ip;
  558. }
  559. /*
  560. * Get the current instruction/program counter value.
  561. */
  562. #define current_text_addr() \
  563. ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
  564. static inline __u64
  565. ia64_get_ivr (void)
  566. {
  567. __u64 r;
  568. ia64_srlz_d();
  569. r = ia64_getreg(_IA64_REG_CR_IVR);
  570. ia64_srlz_d();
  571. return r;
  572. }
  573. static inline void
  574. ia64_set_dbr (__u64 regnum, __u64 value)
  575. {
  576. __ia64_set_dbr(regnum, value);
  577. #ifdef CONFIG_ITANIUM
  578. ia64_srlz_d();
  579. #endif
  580. }
  581. static inline __u64
  582. ia64_get_dbr (__u64 regnum)
  583. {
  584. __u64 retval;
  585. retval = __ia64_get_dbr(regnum);
  586. #ifdef CONFIG_ITANIUM
  587. ia64_srlz_d();
  588. #endif
  589. return retval;
  590. }
  591. static inline __u64
  592. ia64_rotr (__u64 w, __u64 n)
  593. {
  594. return (w >> n) | (w << (64 - n));
  595. }
  596. #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
  597. /*
  598. * Take a mapped kernel address and return the equivalent address
  599. * in the region 7 identity mapped virtual area.
  600. */
  601. static inline void *
  602. ia64_imva (void *addr)
  603. {
  604. void *result;
  605. result = (void *) ia64_tpa(addr);
  606. return __va(result);
  607. }
  608. #define ARCH_HAS_PREFETCH
  609. #define ARCH_HAS_PREFETCHW
  610. #define ARCH_HAS_SPINLOCK_PREFETCH
  611. #define PREFETCH_STRIDE L1_CACHE_BYTES
  612. static inline void
  613. prefetch (const void *x)
  614. {
  615. ia64_lfetch(ia64_lfhint_none, x);
  616. }
  617. static inline void
  618. prefetchw (const void *x)
  619. {
  620. ia64_lfetch_excl(ia64_lfhint_none, x);
  621. }
  622. #define spin_lock_prefetch(x) prefetchw(x)
  623. extern unsigned long boot_option_idle_override;
  624. #endif /* !__ASSEMBLY__ */
  625. #endif /* _ASM_IA64_PROCESSOR_H */