gcc_intrin.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. #ifndef _ASM_IA64_GCC_INTRIN_H
  2. #define _ASM_IA64_GCC_INTRIN_H
  3. /*
  4. *
  5. * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
  6. * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
  7. */
  8. #include <linux/compiler.h>
  9. /* define this macro to get some asm stmts included in 'c' files */
  10. #define ASM_SUPPORTED
  11. /* Optimization barrier */
  12. /* The "volatile" is due to gcc bugs */
  13. #define ia64_barrier() asm volatile ("":::"memory")
  14. #define ia64_stop() asm volatile (";;"::)
  15. #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
  16. #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
  17. extern void ia64_bad_param_for_setreg (void);
  18. extern void ia64_bad_param_for_getreg (void);
  19. register unsigned long ia64_r13 asm ("r13") __attribute_used__;
  20. #define ia64_setreg(regnum, val) \
  21. ({ \
  22. switch (regnum) { \
  23. case _IA64_REG_PSR_L: \
  24. asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
  25. break; \
  26. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  27. asm volatile ("mov ar%0=%1" :: \
  28. "i" (regnum - _IA64_REG_AR_KR0), \
  29. "r"(val): "memory"); \
  30. break; \
  31. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  32. asm volatile ("mov cr%0=%1" :: \
  33. "i" (regnum - _IA64_REG_CR_DCR), \
  34. "r"(val): "memory" ); \
  35. break; \
  36. case _IA64_REG_SP: \
  37. asm volatile ("mov r12=%0" :: \
  38. "r"(val): "memory"); \
  39. break; \
  40. case _IA64_REG_GP: \
  41. asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
  42. break; \
  43. default: \
  44. ia64_bad_param_for_setreg(); \
  45. break; \
  46. } \
  47. })
  48. #define ia64_getreg(regnum) \
  49. ({ \
  50. __u64 ia64_intri_res; \
  51. \
  52. switch (regnum) { \
  53. case _IA64_REG_GP: \
  54. asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
  55. break; \
  56. case _IA64_REG_IP: \
  57. asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
  58. break; \
  59. case _IA64_REG_PSR: \
  60. asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
  61. break; \
  62. case _IA64_REG_TP: /* for current() */ \
  63. ia64_intri_res = ia64_r13; \
  64. break; \
  65. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  66. asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
  67. : "i"(regnum - _IA64_REG_AR_KR0)); \
  68. break; \
  69. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  70. asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
  71. : "i" (regnum - _IA64_REG_CR_DCR)); \
  72. break; \
  73. case _IA64_REG_SP: \
  74. asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
  75. break; \
  76. default: \
  77. ia64_bad_param_for_getreg(); \
  78. break; \
  79. } \
  80. ia64_intri_res; \
  81. })
  82. #define ia64_hint_pause 0
  83. #define ia64_hint(mode) \
  84. ({ \
  85. switch (mode) { \
  86. case ia64_hint_pause: \
  87. asm volatile ("hint @pause" ::: "memory"); \
  88. break; \
  89. } \
  90. })
  91. /* Integer values for mux1 instruction */
  92. #define ia64_mux1_brcst 0
  93. #define ia64_mux1_mix 8
  94. #define ia64_mux1_shuf 9
  95. #define ia64_mux1_alt 10
  96. #define ia64_mux1_rev 11
  97. #define ia64_mux1(x, mode) \
  98. ({ \
  99. __u64 ia64_intri_res; \
  100. \
  101. switch (mode) { \
  102. case ia64_mux1_brcst: \
  103. asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
  104. break; \
  105. case ia64_mux1_mix: \
  106. asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
  107. break; \
  108. case ia64_mux1_shuf: \
  109. asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
  110. break; \
  111. case ia64_mux1_alt: \
  112. asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
  113. break; \
  114. case ia64_mux1_rev: \
  115. asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
  116. break; \
  117. } \
  118. ia64_intri_res; \
  119. })
  120. #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
  121. # define ia64_popcnt(x) __builtin_popcountl(x)
  122. #else
  123. # define ia64_popcnt(x) \
  124. ({ \
  125. __u64 ia64_intri_res; \
  126. asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
  127. \
  128. ia64_intri_res; \
  129. })
  130. #endif
  131. #define ia64_getf_exp(x) \
  132. ({ \
  133. long ia64_intri_res; \
  134. \
  135. asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
  136. \
  137. ia64_intri_res; \
  138. })
  139. #define ia64_shrp(a, b, count) \
  140. ({ \
  141. __u64 ia64_intri_res; \
  142. asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
  143. ia64_intri_res; \
  144. })
  145. #define ia64_ldfs(regnum, x) \
  146. ({ \
  147. register double __f__ asm ("f"#regnum); \
  148. asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
  149. })
  150. #define ia64_ldfd(regnum, x) \
  151. ({ \
  152. register double __f__ asm ("f"#regnum); \
  153. asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
  154. })
  155. #define ia64_ldfe(regnum, x) \
  156. ({ \
  157. register double __f__ asm ("f"#regnum); \
  158. asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
  159. })
  160. #define ia64_ldf8(regnum, x) \
  161. ({ \
  162. register double __f__ asm ("f"#regnum); \
  163. asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
  164. })
  165. #define ia64_ldf_fill(regnum, x) \
  166. ({ \
  167. register double __f__ asm ("f"#regnum); \
  168. asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
  169. })
  170. #define ia64_stfs(x, regnum) \
  171. ({ \
  172. register double __f__ asm ("f"#regnum); \
  173. asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  174. })
  175. #define ia64_stfd(x, regnum) \
  176. ({ \
  177. register double __f__ asm ("f"#regnum); \
  178. asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  179. })
  180. #define ia64_stfe(x, regnum) \
  181. ({ \
  182. register double __f__ asm ("f"#regnum); \
  183. asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  184. })
  185. #define ia64_stf8(x, regnum) \
  186. ({ \
  187. register double __f__ asm ("f"#regnum); \
  188. asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  189. })
  190. #define ia64_stf_spill(x, regnum) \
  191. ({ \
  192. register double __f__ asm ("f"#regnum); \
  193. asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  194. })
  195. #define ia64_fetchadd4_acq(p, inc) \
  196. ({ \
  197. \
  198. __u64 ia64_intri_res; \
  199. asm volatile ("fetchadd4.acq %0=[%1],%2" \
  200. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  201. : "memory"); \
  202. \
  203. ia64_intri_res; \
  204. })
  205. #define ia64_fetchadd4_rel(p, inc) \
  206. ({ \
  207. __u64 ia64_intri_res; \
  208. asm volatile ("fetchadd4.rel %0=[%1],%2" \
  209. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  210. : "memory"); \
  211. \
  212. ia64_intri_res; \
  213. })
  214. #define ia64_fetchadd8_acq(p, inc) \
  215. ({ \
  216. \
  217. __u64 ia64_intri_res; \
  218. asm volatile ("fetchadd8.acq %0=[%1],%2" \
  219. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  220. : "memory"); \
  221. \
  222. ia64_intri_res; \
  223. })
  224. #define ia64_fetchadd8_rel(p, inc) \
  225. ({ \
  226. __u64 ia64_intri_res; \
  227. asm volatile ("fetchadd8.rel %0=[%1],%2" \
  228. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  229. : "memory"); \
  230. \
  231. ia64_intri_res; \
  232. })
  233. #define ia64_xchg1(ptr,x) \
  234. ({ \
  235. __u64 ia64_intri_res; \
  236. asm volatile ("xchg1 %0=[%1],%2" \
  237. : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
  238. ia64_intri_res; \
  239. })
  240. #define ia64_xchg2(ptr,x) \
  241. ({ \
  242. __u64 ia64_intri_res; \
  243. asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
  244. : "r" (ptr), "r" (x) : "memory"); \
  245. ia64_intri_res; \
  246. })
  247. #define ia64_xchg4(ptr,x) \
  248. ({ \
  249. __u64 ia64_intri_res; \
  250. asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
  251. : "r" (ptr), "r" (x) : "memory"); \
  252. ia64_intri_res; \
  253. })
  254. #define ia64_xchg8(ptr,x) \
  255. ({ \
  256. __u64 ia64_intri_res; \
  257. asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
  258. : "r" (ptr), "r" (x) : "memory"); \
  259. ia64_intri_res; \
  260. })
  261. #define ia64_cmpxchg1_acq(ptr, new, old) \
  262. ({ \
  263. __u64 ia64_intri_res; \
  264. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  265. asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
  266. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  267. ia64_intri_res; \
  268. })
  269. #define ia64_cmpxchg1_rel(ptr, new, old) \
  270. ({ \
  271. __u64 ia64_intri_res; \
  272. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  273. asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
  274. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  275. ia64_intri_res; \
  276. })
  277. #define ia64_cmpxchg2_acq(ptr, new, old) \
  278. ({ \
  279. __u64 ia64_intri_res; \
  280. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  281. asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
  282. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  283. ia64_intri_res; \
  284. })
  285. #define ia64_cmpxchg2_rel(ptr, new, old) \
  286. ({ \
  287. __u64 ia64_intri_res; \
  288. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  289. \
  290. asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
  291. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  292. ia64_intri_res; \
  293. })
  294. #define ia64_cmpxchg4_acq(ptr, new, old) \
  295. ({ \
  296. __u64 ia64_intri_res; \
  297. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  298. asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
  299. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  300. ia64_intri_res; \
  301. })
  302. #define ia64_cmpxchg4_rel(ptr, new, old) \
  303. ({ \
  304. __u64 ia64_intri_res; \
  305. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  306. asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
  307. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  308. ia64_intri_res; \
  309. })
  310. #define ia64_cmpxchg8_acq(ptr, new, old) \
  311. ({ \
  312. __u64 ia64_intri_res; \
  313. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  314. asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
  315. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  316. ia64_intri_res; \
  317. })
  318. #define ia64_cmpxchg8_rel(ptr, new, old) \
  319. ({ \
  320. __u64 ia64_intri_res; \
  321. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  322. \
  323. asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
  324. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  325. ia64_intri_res; \
  326. })
  327. #define ia64_mf() asm volatile ("mf" ::: "memory")
  328. #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
  329. #define ia64_invala() asm volatile ("invala" ::: "memory")
  330. #define ia64_thash(addr) \
  331. ({ \
  332. __u64 ia64_intri_res; \
  333. asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  334. ia64_intri_res; \
  335. })
  336. #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
  337. #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
  338. #ifdef HAVE_SERIALIZE_DIRECTIVE
  339. # define ia64_dv_serialize_data() asm volatile (".serialize.data");
  340. # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
  341. #else
  342. # define ia64_dv_serialize_data()
  343. # define ia64_dv_serialize_instruction()
  344. #endif
  345. #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
  346. #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
  347. #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
  348. #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
  349. :: "r"(trnum), "r"(addr) : "memory")
  350. #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
  351. :: "r"(trnum), "r"(addr) : "memory")
  352. #define ia64_tpa(addr) \
  353. ({ \
  354. __u64 ia64_pa; \
  355. asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
  356. ia64_pa; \
  357. })
  358. #define __ia64_set_dbr(index, val) \
  359. asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  360. #define ia64_set_ibr(index, val) \
  361. asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  362. #define ia64_set_pkr(index, val) \
  363. asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  364. #define ia64_set_pmc(index, val) \
  365. asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
  366. #define ia64_set_pmd(index, val) \
  367. asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
  368. #define ia64_set_rr(index, val) \
  369. asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
  370. #define ia64_get_cpuid(index) \
  371. ({ \
  372. __u64 ia64_intri_res; \
  373. asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
  374. ia64_intri_res; \
  375. })
  376. #define __ia64_get_dbr(index) \
  377. ({ \
  378. __u64 ia64_intri_res; \
  379. asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  380. ia64_intri_res; \
  381. })
  382. #define ia64_get_ibr(index) \
  383. ({ \
  384. __u64 ia64_intri_res; \
  385. asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  386. ia64_intri_res; \
  387. })
  388. #define ia64_get_pkr(index) \
  389. ({ \
  390. __u64 ia64_intri_res; \
  391. asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  392. ia64_intri_res; \
  393. })
  394. #define ia64_get_pmc(index) \
  395. ({ \
  396. __u64 ia64_intri_res; \
  397. asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  398. ia64_intri_res; \
  399. })
  400. #define ia64_get_pmd(index) \
  401. ({ \
  402. __u64 ia64_intri_res; \
  403. asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  404. ia64_intri_res; \
  405. })
  406. #define ia64_get_rr(index) \
  407. ({ \
  408. __u64 ia64_intri_res; \
  409. asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
  410. ia64_intri_res; \
  411. })
  412. #define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
  413. #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
  414. #define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
  415. #define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
  416. #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
  417. #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
  418. #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
  419. #define ia64_ptcga(addr, size) \
  420. do { \
  421. asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  422. ia64_dv_serialize_data(); \
  423. } while (0)
  424. #define ia64_ptcl(addr, size) \
  425. do { \
  426. asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  427. ia64_dv_serialize_data(); \
  428. } while (0)
  429. #define ia64_ptri(addr, size) \
  430. asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
  431. #define ia64_ptrd(addr, size) \
  432. asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
  433. /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
  434. #define ia64_lfhint_none 0
  435. #define ia64_lfhint_nt1 1
  436. #define ia64_lfhint_nt2 2
  437. #define ia64_lfhint_nta 3
  438. #define ia64_lfetch(lfhint, y) \
  439. ({ \
  440. switch (lfhint) { \
  441. case ia64_lfhint_none: \
  442. asm volatile ("lfetch [%0]" : : "r"(y)); \
  443. break; \
  444. case ia64_lfhint_nt1: \
  445. asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
  446. break; \
  447. case ia64_lfhint_nt2: \
  448. asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
  449. break; \
  450. case ia64_lfhint_nta: \
  451. asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
  452. break; \
  453. } \
  454. })
  455. #define ia64_lfetch_excl(lfhint, y) \
  456. ({ \
  457. switch (lfhint) { \
  458. case ia64_lfhint_none: \
  459. asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
  460. break; \
  461. case ia64_lfhint_nt1: \
  462. asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
  463. break; \
  464. case ia64_lfhint_nt2: \
  465. asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
  466. break; \
  467. case ia64_lfhint_nta: \
  468. asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
  469. break; \
  470. } \
  471. })
  472. #define ia64_lfetch_fault(lfhint, y) \
  473. ({ \
  474. switch (lfhint) { \
  475. case ia64_lfhint_none: \
  476. asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
  477. break; \
  478. case ia64_lfhint_nt1: \
  479. asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
  480. break; \
  481. case ia64_lfhint_nt2: \
  482. asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
  483. break; \
  484. case ia64_lfhint_nta: \
  485. asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
  486. break; \
  487. } \
  488. })
  489. #define ia64_lfetch_fault_excl(lfhint, y) \
  490. ({ \
  491. switch (lfhint) { \
  492. case ia64_lfhint_none: \
  493. asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
  494. break; \
  495. case ia64_lfhint_nt1: \
  496. asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
  497. break; \
  498. case ia64_lfhint_nt2: \
  499. asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
  500. break; \
  501. case ia64_lfhint_nta: \
  502. asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
  503. break; \
  504. } \
  505. })
  506. #define ia64_intrin_local_irq_restore(x) \
  507. do { \
  508. asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
  509. "(p6) ssm psr.i;" \
  510. "(p7) rsm psr.i;;" \
  511. "(p6) srlz.d" \
  512. :: "r"((x)) : "p6", "p7", "memory"); \
  513. } while (0)
  514. #endif /* _ASM_IA64_GCC_INTRIN_H */