processor.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. #ifndef _ASM_IA64_PROCESSOR_H
  2. #define _ASM_IA64_PROCESSOR_H
  3. /*
  4. * Copyright (C) 1998-2004 Hewlett-Packard Co
  5. * David Mosberger-Tang <davidm@hpl.hp.com>
  6. * Stephane Eranian <eranian@hpl.hp.com>
  7. * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  8. * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  9. *
  10. * 11/24/98 S.Eranian added ia64_set_iva()
  11. * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
  12. * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
  13. */
  14. #include <asm/intrinsics.h>
  15. #include <asm/kregs.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/ustack.h>
  18. #define IA64_NUM_PHYS_STACK_REG 96
  19. #define IA64_NUM_DBG_REGS 8
  20. #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000)
  21. #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000)
  22. /*
  23. * TASK_SIZE really is a mis-named. It really is the maximum user
  24. * space address (plus one). On IA-64, there are five regions of 2TB
  25. * each (assuming 8KB page size), for a total of 8TB of user virtual
  26. * address space.
  27. */
  28. #define TASK_SIZE_OF(tsk) ((tsk)->thread.task_size)
  29. #define TASK_SIZE TASK_SIZE_OF(current)
  30. /*
  31. * This decides where the kernel will search for a free chunk of vm
  32. * space during mmap's.
  33. */
  34. #define TASK_UNMAPPED_BASE (current->thread.map_base)
  35. #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
  36. #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
  37. #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
  38. #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
  39. #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
  40. #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration
  41. sync at ctx sw */
  42. #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
  43. #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
  44. #define IA64_THREAD_UAC_SHIFT 3
  45. #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
  46. #define IA64_THREAD_FPEMU_SHIFT 6
  47. #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
  48. /*
  49. * This shift should be large enough to be able to represent 1000000000/itc_freq with good
  50. * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits
  51. * (this will give enough slack to represent 10 seconds worth of time as a scaled number).
  52. */
  53. #define IA64_NSEC_PER_CYC_SHIFT 30
  54. #ifndef __ASSEMBLY__
  55. #include <linux/cache.h>
  56. #include <linux/compiler.h>
  57. #include <linux/threads.h>
  58. #include <linux/types.h>
  59. #include <asm/fpu.h>
  60. #include <asm/page.h>
  61. #include <asm/percpu.h>
  62. #include <asm/rse.h>
  63. #include <asm/unwind.h>
  64. #include <asm/atomic.h>
  65. #ifdef CONFIG_NUMA
  66. #include <asm/nodedata.h>
  67. #endif
  68. /* like above but expressed as bitfields for more efficient access: */
  69. struct ia64_psr {
  70. __u64 reserved0 : 1;
  71. __u64 be : 1;
  72. __u64 up : 1;
  73. __u64 ac : 1;
  74. __u64 mfl : 1;
  75. __u64 mfh : 1;
  76. __u64 reserved1 : 7;
  77. __u64 ic : 1;
  78. __u64 i : 1;
  79. __u64 pk : 1;
  80. __u64 reserved2 : 1;
  81. __u64 dt : 1;
  82. __u64 dfl : 1;
  83. __u64 dfh : 1;
  84. __u64 sp : 1;
  85. __u64 pp : 1;
  86. __u64 di : 1;
  87. __u64 si : 1;
  88. __u64 db : 1;
  89. __u64 lp : 1;
  90. __u64 tb : 1;
  91. __u64 rt : 1;
  92. __u64 reserved3 : 4;
  93. __u64 cpl : 2;
  94. __u64 is : 1;
  95. __u64 mc : 1;
  96. __u64 it : 1;
  97. __u64 id : 1;
  98. __u64 da : 1;
  99. __u64 dd : 1;
  100. __u64 ss : 1;
  101. __u64 ri : 2;
  102. __u64 ed : 1;
  103. __u64 bn : 1;
  104. __u64 reserved4 : 19;
  105. };
  106. /*
  107. * CPU type, hardware bug flags, and per-CPU state. Frequently used
  108. * state comes earlier:
  109. */
  110. struct cpuinfo_ia64 {
  111. __u32 softirq_pending;
  112. __u64 itm_delta; /* # of clock cycles between clock ticks */
  113. __u64 itm_next; /* interval timer mask value to use for next clock tick */
  114. __u64 nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
  115. __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
  116. __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
  117. __u64 itc_freq; /* frequency of ITC counter */
  118. __u64 proc_freq; /* frequency of processor */
  119. __u64 cyc_per_usec; /* itc_freq/1000000 */
  120. __u64 ptce_base;
  121. __u32 ptce_count[2];
  122. __u32 ptce_stride[2];
  123. struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
  124. #ifdef CONFIG_SMP
  125. __u64 loops_per_jiffy;
  126. int cpu;
  127. __u32 socket_id; /* physical processor socket id */
  128. __u16 core_id; /* core id */
  129. __u16 thread_id; /* thread id */
  130. __u16 num_log; /* Total number of logical processors on
  131. * this socket that were successfully booted */
  132. __u8 cores_per_socket; /* Cores per processor socket */
  133. __u8 threads_per_core; /* Threads per core */
  134. #endif
  135. /* CPUID-derived information: */
  136. __u64 ppn;
  137. __u64 features;
  138. __u8 number;
  139. __u8 revision;
  140. __u8 model;
  141. __u8 family;
  142. __u8 archrev;
  143. char vendor[16];
  144. char *model_name;
  145. #ifdef CONFIG_NUMA
  146. struct ia64_node_data *node_data;
  147. #endif
  148. };
  149. DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
  150. /*
  151. * The "local" data variable. It refers to the per-CPU data of the currently executing
  152. * CPU, much like "current" points to the per-task data of the currently executing task.
  153. * Do not use the address of local_cpu_data, since it will be different from
  154. * cpu_data(smp_processor_id())!
  155. */
  156. #define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
  157. #define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
  158. extern void print_cpu_info (struct cpuinfo_ia64 *);
  159. typedef struct {
  160. unsigned long seg;
  161. } mm_segment_t;
  162. #define SET_UNALIGN_CTL(task,value) \
  163. ({ \
  164. (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
  165. | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
  166. 0; \
  167. })
  168. #define GET_UNALIGN_CTL(task,addr) \
  169. ({ \
  170. put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
  171. (int __user *) (addr)); \
  172. })
  173. #define SET_FPEMU_CTL(task,value) \
  174. ({ \
  175. (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
  176. | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
  177. 0; \
  178. })
  179. #define GET_FPEMU_CTL(task,addr) \
  180. ({ \
  181. put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
  182. (int __user *) (addr)); \
  183. })
  184. #ifdef CONFIG_IA32_SUPPORT
  185. struct desc_struct {
  186. unsigned int a, b;
  187. };
  188. #define desc_empty(desc) (!((desc)->a | (desc)->b))
  189. #define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
  190. #define GDT_ENTRY_TLS_ENTRIES 3
  191. #define GDT_ENTRY_TLS_MIN 6
  192. #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
  193. #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
  194. struct ia64_partial_page_list;
  195. #endif
  196. struct thread_struct {
  197. __u32 flags; /* various thread flags (see IA64_THREAD_*) */
  198. /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
  199. __u8 on_ustack; /* executing on user-stacks? */
  200. __u8 pad[3];
  201. __u64 ksp; /* kernel stack pointer */
  202. __u64 map_base; /* base address for get_unmapped_area() */
  203. __u64 task_size; /* limit for task size */
  204. __u64 rbs_bot; /* the base address for the RBS */
  205. int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
  206. #ifdef CONFIG_IA32_SUPPORT
  207. __u64 eflag; /* IA32 EFLAGS reg */
  208. __u64 fsr; /* IA32 floating pt status reg */
  209. __u64 fcr; /* IA32 floating pt control reg */
  210. __u64 fir; /* IA32 fp except. instr. reg */
  211. __u64 fdr; /* IA32 fp except. data reg */
  212. __u64 old_k1; /* old value of ar.k1 */
  213. __u64 old_iob; /* old IOBase value */
  214. struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
  215. /* cached TLS descriptors. */
  216. struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
  217. # define INIT_THREAD_IA32 .eflag = 0, \
  218. .fsr = 0, \
  219. .fcr = 0x17800000037fULL, \
  220. .fir = 0, \
  221. .fdr = 0, \
  222. .old_k1 = 0, \
  223. .old_iob = 0, \
  224. .ppl = NULL,
  225. #else
  226. # define INIT_THREAD_IA32
  227. #endif /* CONFIG_IA32_SUPPORT */
  228. #ifdef CONFIG_PERFMON
  229. void *pfm_context; /* pointer to detailed PMU context */
  230. unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
  231. # define INIT_THREAD_PM .pfm_context = NULL, \
  232. .pfm_needs_checking = 0UL,
  233. #else
  234. # define INIT_THREAD_PM
  235. #endif
  236. __u64 dbr[IA64_NUM_DBG_REGS];
  237. __u64 ibr[IA64_NUM_DBG_REGS];
  238. struct ia64_fpreg fph[96]; /* saved/loaded on demand */
  239. };
  240. #define INIT_THREAD { \
  241. .flags = 0, \
  242. .on_ustack = 0, \
  243. .ksp = 0, \
  244. .map_base = DEFAULT_MAP_BASE, \
  245. .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
  246. .task_size = DEFAULT_TASK_SIZE, \
  247. .last_fph_cpu = -1, \
  248. INIT_THREAD_IA32 \
  249. INIT_THREAD_PM \
  250. .dbr = {0, }, \
  251. .ibr = {0, }, \
  252. .fph = {{{{0}}}, } \
  253. }
  254. #define start_thread(regs,new_ip,new_sp) do { \
  255. set_fs(USER_DS); \
  256. regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
  257. & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
  258. regs->cr_iip = new_ip; \
  259. regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
  260. regs->ar_rnat = 0; \
  261. regs->ar_bspstore = current->thread.rbs_bot; \
  262. regs->ar_fpsr = FPSR_DEFAULT; \
  263. regs->loadrs = 0; \
  264. regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
  265. regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
  266. if (unlikely(!get_dumpable(current->mm))) { \
  267. /* \
  268. * Zap scratch regs to avoid leaking bits between processes with different \
  269. * uid/privileges. \
  270. */ \
  271. regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
  272. regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
  273. } \
  274. } while (0)
  275. /* Forward declarations, a strange C thing... */
  276. struct mm_struct;
  277. struct task_struct;
  278. /*
  279. * Free all resources held by a thread. This is called after the
  280. * parent of DEAD_TASK has collected the exit status of the task via
  281. * wait().
  282. */
  283. #define release_thread(dead_task)
  284. /* Prepare to copy thread state - unlazy all lazy status */
  285. #define prepare_to_copy(tsk) do { } while (0)
  286. /*
  287. * This is the mechanism for creating a new kernel thread.
  288. *
  289. * NOTE 1: Only a kernel-only process (ie the swapper or direct
  290. * descendants who haven't done an "execve()") should use this: it
  291. * will work within a system call from a "real" process, but the
  292. * process memory space will not be free'd until both the parent and
  293. * the child have exited.
  294. *
  295. * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
  296. * into trouble in init/main.c when the child thread returns to
  297. * do_basic_setup() and the timing is such that free_initmem() has
  298. * been called already.
  299. */
  300. extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
  301. /* Get wait channel for task P. */
  302. extern unsigned long get_wchan (struct task_struct *p);
  303. /* Return instruction pointer of blocked task TSK. */
  304. #define KSTK_EIP(tsk) \
  305. ({ \
  306. struct pt_regs *_regs = task_pt_regs(tsk); \
  307. _regs->cr_iip + ia64_psr(_regs)->ri; \
  308. })
  309. /* Return stack pointer of blocked task TSK. */
  310. #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
  311. extern void ia64_getreg_unknown_kr (void);
  312. extern void ia64_setreg_unknown_kr (void);
  313. #define ia64_get_kr(regnum) \
  314. ({ \
  315. unsigned long r = 0; \
  316. \
  317. switch (regnum) { \
  318. case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
  319. case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
  320. case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
  321. case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
  322. case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
  323. case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
  324. case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
  325. case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
  326. default: ia64_getreg_unknown_kr(); break; \
  327. } \
  328. r; \
  329. })
  330. #define ia64_set_kr(regnum, r) \
  331. ({ \
  332. switch (regnum) { \
  333. case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
  334. case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
  335. case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
  336. case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
  337. case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
  338. case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
  339. case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
  340. case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
  341. default: ia64_setreg_unknown_kr(); break; \
  342. } \
  343. })
  344. /*
  345. * The following three macros can't be inline functions because we don't have struct
  346. * task_struct at this point.
  347. */
  348. /*
  349. * Return TRUE if task T owns the fph partition of the CPU we're running on.
  350. * Must be called from code that has preemption disabled.
  351. */
  352. #define ia64_is_local_fpu_owner(t) \
  353. ({ \
  354. struct task_struct *__ia64_islfo_task = (t); \
  355. (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
  356. && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
  357. })
  358. /*
  359. * Mark task T as owning the fph partition of the CPU we're running on.
  360. * Must be called from code that has preemption disabled.
  361. */
  362. #define ia64_set_local_fpu_owner(t) do { \
  363. struct task_struct *__ia64_slfo_task = (t); \
  364. __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
  365. ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
  366. } while (0)
  367. /* Mark the fph partition of task T as being invalid on all CPUs. */
  368. #define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
  369. extern void __ia64_init_fpu (void);
  370. extern void __ia64_save_fpu (struct ia64_fpreg *fph);
  371. extern void __ia64_load_fpu (struct ia64_fpreg *fph);
  372. extern void ia64_save_debug_regs (unsigned long *save_area);
  373. extern void ia64_load_debug_regs (unsigned long *save_area);
  374. #ifdef CONFIG_IA32_SUPPORT
  375. extern void ia32_save_state (struct task_struct *task);
  376. extern void ia32_load_state (struct task_struct *task);
  377. #endif
  378. #define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
  379. #define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
  380. /* load fp 0.0 into fph */
  381. static inline void
  382. ia64_init_fpu (void) {
  383. ia64_fph_enable();
  384. __ia64_init_fpu();
  385. ia64_fph_disable();
  386. }
  387. /* save f32-f127 at FPH */
  388. static inline void
  389. ia64_save_fpu (struct ia64_fpreg *fph) {
  390. ia64_fph_enable();
  391. __ia64_save_fpu(fph);
  392. ia64_fph_disable();
  393. }
  394. /* load f32-f127 from FPH */
  395. static inline void
  396. ia64_load_fpu (struct ia64_fpreg *fph) {
  397. ia64_fph_enable();
  398. __ia64_load_fpu(fph);
  399. ia64_fph_disable();
  400. }
  401. static inline __u64
  402. ia64_clear_ic (void)
  403. {
  404. __u64 psr;
  405. psr = ia64_getreg(_IA64_REG_PSR);
  406. ia64_stop();
  407. ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
  408. ia64_srlz_i();
  409. return psr;
  410. }
  411. /*
  412. * Restore the psr.
  413. */
  414. static inline void
  415. ia64_set_psr (__u64 psr)
  416. {
  417. ia64_stop();
  418. ia64_setreg(_IA64_REG_PSR_L, psr);
  419. ia64_srlz_i();
  420. }
  421. /*
  422. * Insert a translation into an instruction and/or data translation
  423. * register.
  424. */
  425. static inline void
  426. ia64_itr (__u64 target_mask, __u64 tr_num,
  427. __u64 vmaddr, __u64 pte,
  428. __u64 log_page_size)
  429. {
  430. ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
  431. ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
  432. ia64_stop();
  433. if (target_mask & 0x1)
  434. ia64_itri(tr_num, pte);
  435. if (target_mask & 0x2)
  436. ia64_itrd(tr_num, pte);
  437. }
  438. /*
  439. * Insert a translation into the instruction and/or data translation
  440. * cache.
  441. */
  442. static inline void
  443. ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
  444. __u64 log_page_size)
  445. {
  446. ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
  447. ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
  448. ia64_stop();
  449. /* as per EAS2.6, itc must be the last instruction in an instruction group */
  450. if (target_mask & 0x1)
  451. ia64_itci(pte);
  452. if (target_mask & 0x2)
  453. ia64_itcd(pte);
  454. }
  455. /*
  456. * Purge a range of addresses from instruction and/or data translation
  457. * register(s).
  458. */
  459. static inline void
  460. ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
  461. {
  462. if (target_mask & 0x1)
  463. ia64_ptri(vmaddr, (log_size << 2));
  464. if (target_mask & 0x2)
  465. ia64_ptrd(vmaddr, (log_size << 2));
  466. }
  467. /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
  468. static inline void
  469. ia64_set_iva (void *ivt_addr)
  470. {
  471. ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
  472. ia64_srlz_i();
  473. }
  474. /* Set the page table address and control bits. */
  475. static inline void
  476. ia64_set_pta (__u64 pta)
  477. {
  478. /* Note: srlz.i implies srlz.d */
  479. ia64_setreg(_IA64_REG_CR_PTA, pta);
  480. ia64_srlz_i();
  481. }
  482. static inline void
  483. ia64_eoi (void)
  484. {
  485. ia64_setreg(_IA64_REG_CR_EOI, 0);
  486. ia64_srlz_d();
  487. }
  488. #define cpu_relax() ia64_hint(ia64_hint_pause)
  489. static inline int
  490. ia64_get_irr(unsigned int vector)
  491. {
  492. unsigned int reg = vector / 64;
  493. unsigned int bit = vector % 64;
  494. u64 irr;
  495. switch (reg) {
  496. case 0: irr = ia64_getreg(_IA64_REG_CR_IRR0); break;
  497. case 1: irr = ia64_getreg(_IA64_REG_CR_IRR1); break;
  498. case 2: irr = ia64_getreg(_IA64_REG_CR_IRR2); break;
  499. case 3: irr = ia64_getreg(_IA64_REG_CR_IRR3); break;
  500. }
  501. return test_bit(bit, &irr);
  502. }
  503. static inline void
  504. ia64_set_lrr0 (unsigned long val)
  505. {
  506. ia64_setreg(_IA64_REG_CR_LRR0, val);
  507. ia64_srlz_d();
  508. }
  509. static inline void
  510. ia64_set_lrr1 (unsigned long val)
  511. {
  512. ia64_setreg(_IA64_REG_CR_LRR1, val);
  513. ia64_srlz_d();
  514. }
  515. /*
  516. * Given the address to which a spill occurred, return the unat bit
  517. * number that corresponds to this address.
  518. */
  519. static inline __u64
  520. ia64_unat_pos (void *spill_addr)
  521. {
  522. return ((__u64) spill_addr >> 3) & 0x3f;
  523. }
  524. /*
  525. * Set the NaT bit of an integer register which was spilled at address
  526. * SPILL_ADDR. UNAT is the mask to be updated.
  527. */
  528. static inline void
  529. ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
  530. {
  531. __u64 bit = ia64_unat_pos(spill_addr);
  532. __u64 mask = 1UL << bit;
  533. *unat = (*unat & ~mask) | (nat << bit);
  534. }
  535. /*
  536. * Return saved PC of a blocked thread.
  537. * Note that the only way T can block is through a call to schedule() -> switch_to().
  538. */
  539. static inline unsigned long
  540. thread_saved_pc (struct task_struct *t)
  541. {
  542. struct unw_frame_info info;
  543. unsigned long ip;
  544. unw_init_from_blocked_task(&info, t);
  545. if (unw_unwind(&info) < 0)
  546. return 0;
  547. unw_get_ip(&info, &ip);
  548. return ip;
  549. }
  550. /*
  551. * Get the current instruction/program counter value.
  552. */
  553. #define current_text_addr() \
  554. ({ void *_pc; _pc = (void *)ia64_getreg(_IA64_REG_IP); _pc; })
  555. static inline __u64
  556. ia64_get_ivr (void)
  557. {
  558. __u64 r;
  559. ia64_srlz_d();
  560. r = ia64_getreg(_IA64_REG_CR_IVR);
  561. ia64_srlz_d();
  562. return r;
  563. }
  564. static inline void
  565. ia64_set_dbr (__u64 regnum, __u64 value)
  566. {
  567. __ia64_set_dbr(regnum, value);
  568. #ifdef CONFIG_ITANIUM
  569. ia64_srlz_d();
  570. #endif
  571. }
  572. static inline __u64
  573. ia64_get_dbr (__u64 regnum)
  574. {
  575. __u64 retval;
  576. retval = __ia64_get_dbr(regnum);
  577. #ifdef CONFIG_ITANIUM
  578. ia64_srlz_d();
  579. #endif
  580. return retval;
  581. }
  582. static inline __u64
  583. ia64_rotr (__u64 w, __u64 n)
  584. {
  585. return (w >> n) | (w << (64 - n));
  586. }
  587. #define ia64_rotl(w,n) ia64_rotr((w), (64) - (n))
  588. /*
  589. * Take a mapped kernel address and return the equivalent address
  590. * in the region 7 identity mapped virtual area.
  591. */
  592. static inline void *
  593. ia64_imva (void *addr)
  594. {
  595. void *result;
  596. result = (void *) ia64_tpa(addr);
  597. return __va(result);
  598. }
  599. #define ARCH_HAS_PREFETCH
  600. #define ARCH_HAS_PREFETCHW
  601. #define ARCH_HAS_SPINLOCK_PREFETCH
  602. #define PREFETCH_STRIDE L1_CACHE_BYTES
  603. static inline void
  604. prefetch (const void *x)
  605. {
  606. ia64_lfetch(ia64_lfhint_none, x);
  607. }
  608. static inline void
  609. prefetchw (const void *x)
  610. {
  611. ia64_lfetch_excl(ia64_lfhint_none, x);
  612. }
  613. #define spin_lock_prefetch(x) prefetchw(x)
  614. extern unsigned long boot_option_idle_override;
  615. #endif /* !__ASSEMBLY__ */
  616. #endif /* _ASM_IA64_PROCESSOR_H */