gcc_intrin.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. #ifndef _ASM_IA64_GCC_INTRIN_H
  2. #define _ASM_IA64_GCC_INTRIN_H
  3. /*
  4. *
  5. * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
  6. * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
  7. */
  8. #include <linux/compiler.h>
  9. /* define this macro to get some asm stmts included in 'c' files */
  10. #define ASM_SUPPORTED
  11. /* Optimization barrier */
  12. /* The "volatile" is due to gcc bugs */
  13. #define ia64_barrier() asm volatile ("":::"memory")
  14. #define ia64_stop() asm volatile (";;"::)
  15. #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
  16. #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
  17. #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
  18. #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
  19. extern void ia64_bad_param_for_setreg (void);
  20. extern void ia64_bad_param_for_getreg (void);
  21. #ifdef __KERNEL__
  22. register unsigned long ia64_r13 asm ("r13") __used;
  23. #endif
  24. #define ia64_native_setreg(regnum, val) \
  25. ({ \
  26. switch (regnum) { \
  27. case _IA64_REG_PSR_L: \
  28. asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
  29. break; \
  30. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  31. asm volatile ("mov ar%0=%1" :: \
  32. "i" (regnum - _IA64_REG_AR_KR0), \
  33. "r"(val): "memory"); \
  34. break; \
  35. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  36. asm volatile ("mov cr%0=%1" :: \
  37. "i" (regnum - _IA64_REG_CR_DCR), \
  38. "r"(val): "memory" ); \
  39. break; \
  40. case _IA64_REG_SP: \
  41. asm volatile ("mov r12=%0" :: \
  42. "r"(val): "memory"); \
  43. break; \
  44. case _IA64_REG_GP: \
  45. asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
  46. break; \
  47. default: \
  48. ia64_bad_param_for_setreg(); \
  49. break; \
  50. } \
  51. })
  52. #define ia64_native_getreg(regnum) \
  53. ({ \
  54. __u64 ia64_intri_res; \
  55. \
  56. switch (regnum) { \
  57. case _IA64_REG_GP: \
  58. asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
  59. break; \
  60. case _IA64_REG_IP: \
  61. asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
  62. break; \
  63. case _IA64_REG_PSR: \
  64. asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
  65. break; \
  66. case _IA64_REG_TP: /* for current() */ \
  67. ia64_intri_res = ia64_r13; \
  68. break; \
  69. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  70. asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
  71. : "i"(regnum - _IA64_REG_AR_KR0)); \
  72. break; \
  73. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  74. asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
  75. : "i" (regnum - _IA64_REG_CR_DCR)); \
  76. break; \
  77. case _IA64_REG_SP: \
  78. asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
  79. break; \
  80. default: \
  81. ia64_bad_param_for_getreg(); \
  82. break; \
  83. } \
  84. ia64_intri_res; \
  85. })
  86. #define ia64_hint_pause 0
  87. #define ia64_hint(mode) \
  88. ({ \
  89. switch (mode) { \
  90. case ia64_hint_pause: \
  91. asm volatile ("hint @pause" ::: "memory"); \
  92. break; \
  93. } \
  94. })
  95. /* Integer values for mux1 instruction */
  96. #define ia64_mux1_brcst 0
  97. #define ia64_mux1_mix 8
  98. #define ia64_mux1_shuf 9
  99. #define ia64_mux1_alt 10
  100. #define ia64_mux1_rev 11
  101. #define ia64_mux1(x, mode) \
  102. ({ \
  103. __u64 ia64_intri_res; \
  104. \
  105. switch (mode) { \
  106. case ia64_mux1_brcst: \
  107. asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
  108. break; \
  109. case ia64_mux1_mix: \
  110. asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
  111. break; \
  112. case ia64_mux1_shuf: \
  113. asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
  114. break; \
  115. case ia64_mux1_alt: \
  116. asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
  117. break; \
  118. case ia64_mux1_rev: \
  119. asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
  120. break; \
  121. } \
  122. ia64_intri_res; \
  123. })
  124. #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
  125. # define ia64_popcnt(x) __builtin_popcountl(x)
  126. #else
  127. # define ia64_popcnt(x) \
  128. ({ \
  129. __u64 ia64_intri_res; \
  130. asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
  131. \
  132. ia64_intri_res; \
  133. })
  134. #endif
  135. #define ia64_getf_exp(x) \
  136. ({ \
  137. long ia64_intri_res; \
  138. \
  139. asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
  140. \
  141. ia64_intri_res; \
  142. })
  143. #define ia64_shrp(a, b, count) \
  144. ({ \
  145. __u64 ia64_intri_res; \
  146. asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
  147. ia64_intri_res; \
  148. })
  149. #define ia64_ldfs(regnum, x) \
  150. ({ \
  151. register double __f__ asm ("f"#regnum); \
  152. asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
  153. })
  154. #define ia64_ldfd(regnum, x) \
  155. ({ \
  156. register double __f__ asm ("f"#regnum); \
  157. asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
  158. })
  159. #define ia64_ldfe(regnum, x) \
  160. ({ \
  161. register double __f__ asm ("f"#regnum); \
  162. asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
  163. })
  164. #define ia64_ldf8(regnum, x) \
  165. ({ \
  166. register double __f__ asm ("f"#regnum); \
  167. asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
  168. })
  169. #define ia64_ldf_fill(regnum, x) \
  170. ({ \
  171. register double __f__ asm ("f"#regnum); \
  172. asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
  173. })
  174. #define ia64_st4_rel_nta(m, val) \
  175. ({ \
  176. asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
  177. })
  178. #define ia64_stfs(x, regnum) \
  179. ({ \
  180. register double __f__ asm ("f"#regnum); \
  181. asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  182. })
  183. #define ia64_stfd(x, regnum) \
  184. ({ \
  185. register double __f__ asm ("f"#regnum); \
  186. asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  187. })
  188. #define ia64_stfe(x, regnum) \
  189. ({ \
  190. register double __f__ asm ("f"#regnum); \
  191. asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  192. })
  193. #define ia64_stf8(x, regnum) \
  194. ({ \
  195. register double __f__ asm ("f"#regnum); \
  196. asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  197. })
  198. #define ia64_stf_spill(x, regnum) \
  199. ({ \
  200. register double __f__ asm ("f"#regnum); \
  201. asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  202. })
  203. #define ia64_fetchadd4_acq(p, inc) \
  204. ({ \
  205. \
  206. __u64 ia64_intri_res; \
  207. asm volatile ("fetchadd4.acq %0=[%1],%2" \
  208. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  209. : "memory"); \
  210. \
  211. ia64_intri_res; \
  212. })
  213. #define ia64_fetchadd4_rel(p, inc) \
  214. ({ \
  215. __u64 ia64_intri_res; \
  216. asm volatile ("fetchadd4.rel %0=[%1],%2" \
  217. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  218. : "memory"); \
  219. \
  220. ia64_intri_res; \
  221. })
  222. #define ia64_fetchadd8_acq(p, inc) \
  223. ({ \
  224. \
  225. __u64 ia64_intri_res; \
  226. asm volatile ("fetchadd8.acq %0=[%1],%2" \
  227. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  228. : "memory"); \
  229. \
  230. ia64_intri_res; \
  231. })
  232. #define ia64_fetchadd8_rel(p, inc) \
  233. ({ \
  234. __u64 ia64_intri_res; \
  235. asm volatile ("fetchadd8.rel %0=[%1],%2" \
  236. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  237. : "memory"); \
  238. \
  239. ia64_intri_res; \
  240. })
  241. #define ia64_xchg1(ptr,x) \
  242. ({ \
  243. __u64 ia64_intri_res; \
  244. asm volatile ("xchg1 %0=[%1],%2" \
  245. : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
  246. ia64_intri_res; \
  247. })
  248. #define ia64_xchg2(ptr,x) \
  249. ({ \
  250. __u64 ia64_intri_res; \
  251. asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
  252. : "r" (ptr), "r" (x) : "memory"); \
  253. ia64_intri_res; \
  254. })
  255. #define ia64_xchg4(ptr,x) \
  256. ({ \
  257. __u64 ia64_intri_res; \
  258. asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
  259. : "r" (ptr), "r" (x) : "memory"); \
  260. ia64_intri_res; \
  261. })
  262. #define ia64_xchg8(ptr,x) \
  263. ({ \
  264. __u64 ia64_intri_res; \
  265. asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
  266. : "r" (ptr), "r" (x) : "memory"); \
  267. ia64_intri_res; \
  268. })
  269. #define ia64_cmpxchg1_acq(ptr, new, old) \
  270. ({ \
  271. __u64 ia64_intri_res; \
  272. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  273. asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
  274. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  275. ia64_intri_res; \
  276. })
  277. #define ia64_cmpxchg1_rel(ptr, new, old) \
  278. ({ \
  279. __u64 ia64_intri_res; \
  280. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  281. asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
  282. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  283. ia64_intri_res; \
  284. })
  285. #define ia64_cmpxchg2_acq(ptr, new, old) \
  286. ({ \
  287. __u64 ia64_intri_res; \
  288. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  289. asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
  290. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  291. ia64_intri_res; \
  292. })
  293. #define ia64_cmpxchg2_rel(ptr, new, old) \
  294. ({ \
  295. __u64 ia64_intri_res; \
  296. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  297. \
  298. asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
  299. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  300. ia64_intri_res; \
  301. })
  302. #define ia64_cmpxchg4_acq(ptr, new, old) \
  303. ({ \
  304. __u64 ia64_intri_res; \
  305. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  306. asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
  307. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  308. ia64_intri_res; \
  309. })
  310. #define ia64_cmpxchg4_rel(ptr, new, old) \
  311. ({ \
  312. __u64 ia64_intri_res; \
  313. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  314. asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
  315. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  316. ia64_intri_res; \
  317. })
  318. #define ia64_cmpxchg8_acq(ptr, new, old) \
  319. ({ \
  320. __u64 ia64_intri_res; \
  321. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  322. asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
  323. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  324. ia64_intri_res; \
  325. })
  326. #define ia64_cmpxchg8_rel(ptr, new, old) \
  327. ({ \
  328. __u64 ia64_intri_res; \
  329. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  330. \
  331. asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
  332. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  333. ia64_intri_res; \
  334. })
  335. #define ia64_mf() asm volatile ("mf" ::: "memory")
  336. #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
  337. #define ia64_invala() asm volatile ("invala" ::: "memory")
  338. #define ia64_native_thash(addr) \
  339. ({ \
  340. __u64 ia64_intri_res; \
  341. asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  342. ia64_intri_res; \
  343. })
  344. #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
  345. #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
  346. #ifdef HAVE_SERIALIZE_DIRECTIVE
  347. # define ia64_dv_serialize_data() asm volatile (".serialize.data");
  348. # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
  349. #else
  350. # define ia64_dv_serialize_data()
  351. # define ia64_dv_serialize_instruction()
  352. #endif
  353. #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
  354. #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
  355. #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
  356. #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
  357. :: "r"(trnum), "r"(addr) : "memory")
  358. #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
  359. :: "r"(trnum), "r"(addr) : "memory")
  360. #define ia64_tpa(addr) \
  361. ({ \
  362. __u64 ia64_pa; \
  363. asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
  364. ia64_pa; \
  365. })
  366. #define __ia64_set_dbr(index, val) \
  367. asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  368. #define ia64_set_ibr(index, val) \
  369. asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  370. #define ia64_set_pkr(index, val) \
  371. asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  372. #define ia64_set_pmc(index, val) \
  373. asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
  374. #define ia64_set_pmd(index, val) \
  375. asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
  376. #define ia64_native_set_rr(index, val) \
  377. asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
  378. #define ia64_native_get_cpuid(index) \
  379. ({ \
  380. __u64 ia64_intri_res; \
  381. asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
  382. ia64_intri_res; \
  383. })
  384. #define __ia64_get_dbr(index) \
  385. ({ \
  386. __u64 ia64_intri_res; \
  387. asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  388. ia64_intri_res; \
  389. })
  390. #define ia64_get_ibr(index) \
  391. ({ \
  392. __u64 ia64_intri_res; \
  393. asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  394. ia64_intri_res; \
  395. })
  396. #define ia64_get_pkr(index) \
  397. ({ \
  398. __u64 ia64_intri_res; \
  399. asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  400. ia64_intri_res; \
  401. })
  402. #define ia64_get_pmc(index) \
  403. ({ \
  404. __u64 ia64_intri_res; \
  405. asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  406. ia64_intri_res; \
  407. })
  408. #define ia64_native_get_pmd(index) \
  409. ({ \
  410. __u64 ia64_intri_res; \
  411. asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  412. ia64_intri_res; \
  413. })
  414. #define ia64_native_get_rr(index) \
  415. ({ \
  416. __u64 ia64_intri_res; \
  417. asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
  418. ia64_intri_res; \
  419. })
  420. #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
  421. #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
  422. #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
  423. #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
  424. #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
  425. #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
  426. #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
  427. #define ia64_native_ptcga(addr, size) \
  428. do { \
  429. asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  430. ia64_dv_serialize_data(); \
  431. } while (0)
  432. #define ia64_ptcl(addr, size) \
  433. do { \
  434. asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  435. ia64_dv_serialize_data(); \
  436. } while (0)
  437. #define ia64_ptri(addr, size) \
  438. asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
  439. #define ia64_ptrd(addr, size) \
  440. asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
  441. #define ia64_ttag(addr) \
  442. ({ \
  443. __u64 ia64_intri_res; \
  444. asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  445. ia64_intri_res; \
  446. })
  447. /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
  448. #define ia64_lfhint_none 0
  449. #define ia64_lfhint_nt1 1
  450. #define ia64_lfhint_nt2 2
  451. #define ia64_lfhint_nta 3
  452. #define ia64_lfetch(lfhint, y) \
  453. ({ \
  454. switch (lfhint) { \
  455. case ia64_lfhint_none: \
  456. asm volatile ("lfetch [%0]" : : "r"(y)); \
  457. break; \
  458. case ia64_lfhint_nt1: \
  459. asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
  460. break; \
  461. case ia64_lfhint_nt2: \
  462. asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
  463. break; \
  464. case ia64_lfhint_nta: \
  465. asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
  466. break; \
  467. } \
  468. })
  469. #define ia64_lfetch_excl(lfhint, y) \
  470. ({ \
  471. switch (lfhint) { \
  472. case ia64_lfhint_none: \
  473. asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
  474. break; \
  475. case ia64_lfhint_nt1: \
  476. asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
  477. break; \
  478. case ia64_lfhint_nt2: \
  479. asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
  480. break; \
  481. case ia64_lfhint_nta: \
  482. asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
  483. break; \
  484. } \
  485. })
  486. #define ia64_lfetch_fault(lfhint, y) \
  487. ({ \
  488. switch (lfhint) { \
  489. case ia64_lfhint_none: \
  490. asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
  491. break; \
  492. case ia64_lfhint_nt1: \
  493. asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
  494. break; \
  495. case ia64_lfhint_nt2: \
  496. asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
  497. break; \
  498. case ia64_lfhint_nta: \
  499. asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
  500. break; \
  501. } \
  502. })
  503. #define ia64_lfetch_fault_excl(lfhint, y) \
  504. ({ \
  505. switch (lfhint) { \
  506. case ia64_lfhint_none: \
  507. asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
  508. break; \
  509. case ia64_lfhint_nt1: \
  510. asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
  511. break; \
  512. case ia64_lfhint_nt2: \
  513. asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
  514. break; \
  515. case ia64_lfhint_nta: \
  516. asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
  517. break; \
  518. } \
  519. })
  520. #define ia64_native_intrin_local_irq_restore(x) \
  521. do { \
  522. asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
  523. "(p6) ssm psr.i;" \
  524. "(p7) rsm psr.i;;" \
  525. "(p6) srlz.d" \
  526. :: "r"((x)) : "p6", "p7", "memory"); \
  527. } while (0)
  528. #endif /* _ASM_IA64_GCC_INTRIN_H */