gcc_intrin.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. #ifndef _ASM_IA64_GCC_INTRIN_H
  2. #define _ASM_IA64_GCC_INTRIN_H
  3. /*
  4. *
  5. * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
  6. * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
  7. */
  8. #include <linux/types.h>
  9. #include <linux/compiler.h>
  10. /* define this macro to get some asm stmts included in 'c' files */
  11. #define ASM_SUPPORTED
  12. /* Optimization barrier */
  13. /* The "volatile" is due to gcc bugs */
  14. #define ia64_barrier() asm volatile ("":::"memory")
  15. #define ia64_stop() asm volatile (";;"::)
  16. #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
  17. #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
  18. #define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
  19. #define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
  20. extern void ia64_bad_param_for_setreg (void);
  21. extern void ia64_bad_param_for_getreg (void);
  22. #ifdef __KERNEL__
  23. register unsigned long ia64_r13 asm ("r13") __used;
  24. #endif
  25. #define ia64_native_setreg(regnum, val) \
  26. ({ \
  27. switch (regnum) { \
  28. case _IA64_REG_PSR_L: \
  29. asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
  30. break; \
  31. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  32. asm volatile ("mov ar%0=%1" :: \
  33. "i" (regnum - _IA64_REG_AR_KR0), \
  34. "r"(val): "memory"); \
  35. break; \
  36. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  37. asm volatile ("mov cr%0=%1" :: \
  38. "i" (regnum - _IA64_REG_CR_DCR), \
  39. "r"(val): "memory" ); \
  40. break; \
  41. case _IA64_REG_SP: \
  42. asm volatile ("mov r12=%0" :: \
  43. "r"(val): "memory"); \
  44. break; \
  45. case _IA64_REG_GP: \
  46. asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
  47. break; \
  48. default: \
  49. ia64_bad_param_for_setreg(); \
  50. break; \
  51. } \
  52. })
  53. #define ia64_native_getreg(regnum) \
  54. ({ \
  55. __u64 ia64_intri_res; \
  56. \
  57. switch (regnum) { \
  58. case _IA64_REG_GP: \
  59. asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
  60. break; \
  61. case _IA64_REG_IP: \
  62. asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
  63. break; \
  64. case _IA64_REG_PSR: \
  65. asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
  66. break; \
  67. case _IA64_REG_TP: /* for current() */ \
  68. ia64_intri_res = ia64_r13; \
  69. break; \
  70. case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
  71. asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
  72. : "i"(regnum - _IA64_REG_AR_KR0)); \
  73. break; \
  74. case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
  75. asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
  76. : "i" (regnum - _IA64_REG_CR_DCR)); \
  77. break; \
  78. case _IA64_REG_SP: \
  79. asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
  80. break; \
  81. default: \
  82. ia64_bad_param_for_getreg(); \
  83. break; \
  84. } \
  85. ia64_intri_res; \
  86. })
  87. #define ia64_hint_pause 0
  88. #define ia64_hint(mode) \
  89. ({ \
  90. switch (mode) { \
  91. case ia64_hint_pause: \
  92. asm volatile ("hint @pause" ::: "memory"); \
  93. break; \
  94. } \
  95. })
  96. /* Integer values for mux1 instruction */
  97. #define ia64_mux1_brcst 0
  98. #define ia64_mux1_mix 8
  99. #define ia64_mux1_shuf 9
  100. #define ia64_mux1_alt 10
  101. #define ia64_mux1_rev 11
  102. #define ia64_mux1(x, mode) \
  103. ({ \
  104. __u64 ia64_intri_res; \
  105. \
  106. switch (mode) { \
  107. case ia64_mux1_brcst: \
  108. asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
  109. break; \
  110. case ia64_mux1_mix: \
  111. asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
  112. break; \
  113. case ia64_mux1_shuf: \
  114. asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
  115. break; \
  116. case ia64_mux1_alt: \
  117. asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
  118. break; \
  119. case ia64_mux1_rev: \
  120. asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
  121. break; \
  122. } \
  123. ia64_intri_res; \
  124. })
  125. #if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
  126. # define ia64_popcnt(x) __builtin_popcountl(x)
  127. #else
  128. # define ia64_popcnt(x) \
  129. ({ \
  130. __u64 ia64_intri_res; \
  131. asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
  132. \
  133. ia64_intri_res; \
  134. })
  135. #endif
  136. #define ia64_getf_exp(x) \
  137. ({ \
  138. long ia64_intri_res; \
  139. \
  140. asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
  141. \
  142. ia64_intri_res; \
  143. })
  144. #define ia64_shrp(a, b, count) \
  145. ({ \
  146. __u64 ia64_intri_res; \
  147. asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
  148. ia64_intri_res; \
  149. })
  150. #define ia64_ldfs(regnum, x) \
  151. ({ \
  152. register double __f__ asm ("f"#regnum); \
  153. asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
  154. })
  155. #define ia64_ldfd(regnum, x) \
  156. ({ \
  157. register double __f__ asm ("f"#regnum); \
  158. asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
  159. })
  160. #define ia64_ldfe(regnum, x) \
  161. ({ \
  162. register double __f__ asm ("f"#regnum); \
  163. asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
  164. })
  165. #define ia64_ldf8(regnum, x) \
  166. ({ \
  167. register double __f__ asm ("f"#regnum); \
  168. asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
  169. })
  170. #define ia64_ldf_fill(regnum, x) \
  171. ({ \
  172. register double __f__ asm ("f"#regnum); \
  173. asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
  174. })
  175. #define ia64_st4_rel_nta(m, val) \
  176. ({ \
  177. asm volatile ("st4.rel.nta [%0] = %1\n\t" :: "r"(m), "r"(val)); \
  178. })
  179. #define ia64_stfs(x, regnum) \
  180. ({ \
  181. register double __f__ asm ("f"#regnum); \
  182. asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  183. })
  184. #define ia64_stfd(x, regnum) \
  185. ({ \
  186. register double __f__ asm ("f"#regnum); \
  187. asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  188. })
  189. #define ia64_stfe(x, regnum) \
  190. ({ \
  191. register double __f__ asm ("f"#regnum); \
  192. asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  193. })
  194. #define ia64_stf8(x, regnum) \
  195. ({ \
  196. register double __f__ asm ("f"#regnum); \
  197. asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  198. })
  199. #define ia64_stf_spill(x, regnum) \
  200. ({ \
  201. register double __f__ asm ("f"#regnum); \
  202. asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
  203. })
  204. #define ia64_fetchadd4_acq(p, inc) \
  205. ({ \
  206. \
  207. __u64 ia64_intri_res; \
  208. asm volatile ("fetchadd4.acq %0=[%1],%2" \
  209. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  210. : "memory"); \
  211. \
  212. ia64_intri_res; \
  213. })
  214. #define ia64_fetchadd4_rel(p, inc) \
  215. ({ \
  216. __u64 ia64_intri_res; \
  217. asm volatile ("fetchadd4.rel %0=[%1],%2" \
  218. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  219. : "memory"); \
  220. \
  221. ia64_intri_res; \
  222. })
  223. #define ia64_fetchadd8_acq(p, inc) \
  224. ({ \
  225. \
  226. __u64 ia64_intri_res; \
  227. asm volatile ("fetchadd8.acq %0=[%1],%2" \
  228. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  229. : "memory"); \
  230. \
  231. ia64_intri_res; \
  232. })
  233. #define ia64_fetchadd8_rel(p, inc) \
  234. ({ \
  235. __u64 ia64_intri_res; \
  236. asm volatile ("fetchadd8.rel %0=[%1],%2" \
  237. : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
  238. : "memory"); \
  239. \
  240. ia64_intri_res; \
  241. })
  242. #define ia64_xchg1(ptr,x) \
  243. ({ \
  244. __u64 ia64_intri_res; \
  245. asm volatile ("xchg1 %0=[%1],%2" \
  246. : "=r" (ia64_intri_res) : "r" (ptr), "r" (x) : "memory"); \
  247. ia64_intri_res; \
  248. })
  249. #define ia64_xchg2(ptr,x) \
  250. ({ \
  251. __u64 ia64_intri_res; \
  252. asm volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
  253. : "r" (ptr), "r" (x) : "memory"); \
  254. ia64_intri_res; \
  255. })
  256. #define ia64_xchg4(ptr,x) \
  257. ({ \
  258. __u64 ia64_intri_res; \
  259. asm volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
  260. : "r" (ptr), "r" (x) : "memory"); \
  261. ia64_intri_res; \
  262. })
  263. #define ia64_xchg8(ptr,x) \
  264. ({ \
  265. __u64 ia64_intri_res; \
  266. asm volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
  267. : "r" (ptr), "r" (x) : "memory"); \
  268. ia64_intri_res; \
  269. })
  270. #define ia64_cmpxchg1_acq(ptr, new, old) \
  271. ({ \
  272. __u64 ia64_intri_res; \
  273. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  274. asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
  275. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  276. ia64_intri_res; \
  277. })
  278. #define ia64_cmpxchg1_rel(ptr, new, old) \
  279. ({ \
  280. __u64 ia64_intri_res; \
  281. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  282. asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
  283. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  284. ia64_intri_res; \
  285. })
  286. #define ia64_cmpxchg2_acq(ptr, new, old) \
  287. ({ \
  288. __u64 ia64_intri_res; \
  289. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  290. asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
  291. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  292. ia64_intri_res; \
  293. })
  294. #define ia64_cmpxchg2_rel(ptr, new, old) \
  295. ({ \
  296. __u64 ia64_intri_res; \
  297. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  298. \
  299. asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
  300. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  301. ia64_intri_res; \
  302. })
  303. #define ia64_cmpxchg4_acq(ptr, new, old) \
  304. ({ \
  305. __u64 ia64_intri_res; \
  306. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  307. asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
  308. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  309. ia64_intri_res; \
  310. })
  311. #define ia64_cmpxchg4_rel(ptr, new, old) \
  312. ({ \
  313. __u64 ia64_intri_res; \
  314. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  315. asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
  316. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  317. ia64_intri_res; \
  318. })
  319. #define ia64_cmpxchg8_acq(ptr, new, old) \
  320. ({ \
  321. __u64 ia64_intri_res; \
  322. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  323. asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
  324. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  325. ia64_intri_res; \
  326. })
  327. #define ia64_cmpxchg8_rel(ptr, new, old) \
  328. ({ \
  329. __u64 ia64_intri_res; \
  330. asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
  331. \
  332. asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
  333. "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
  334. ia64_intri_res; \
  335. })
  336. #define ia64_mf() asm volatile ("mf" ::: "memory")
  337. #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
  338. #define ia64_invala() asm volatile ("invala" ::: "memory")
  339. #define ia64_native_thash(addr) \
  340. ({ \
  341. __u64 ia64_intri_res; \
  342. asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  343. ia64_intri_res; \
  344. })
  345. #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
  346. #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
  347. #ifdef HAVE_SERIALIZE_DIRECTIVE
  348. # define ia64_dv_serialize_data() asm volatile (".serialize.data");
  349. # define ia64_dv_serialize_instruction() asm volatile (".serialize.instruction");
  350. #else
  351. # define ia64_dv_serialize_data()
  352. # define ia64_dv_serialize_instruction()
  353. #endif
  354. #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
  355. #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
  356. #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
  357. #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
  358. :: "r"(trnum), "r"(addr) : "memory")
  359. #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
  360. :: "r"(trnum), "r"(addr) : "memory")
  361. #define ia64_tpa(addr) \
  362. ({ \
  363. __u64 ia64_pa; \
  364. asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
  365. ia64_pa; \
  366. })
  367. #define __ia64_set_dbr(index, val) \
  368. asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  369. #define ia64_set_ibr(index, val) \
  370. asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  371. #define ia64_set_pkr(index, val) \
  372. asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
  373. #define ia64_set_pmc(index, val) \
  374. asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
  375. #define ia64_set_pmd(index, val) \
  376. asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
  377. #define ia64_native_set_rr(index, val) \
  378. asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
  379. #define ia64_native_get_cpuid(index) \
  380. ({ \
  381. __u64 ia64_intri_res; \
  382. asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
  383. ia64_intri_res; \
  384. })
  385. #define __ia64_get_dbr(index) \
  386. ({ \
  387. __u64 ia64_intri_res; \
  388. asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  389. ia64_intri_res; \
  390. })
  391. #define ia64_get_ibr(index) \
  392. ({ \
  393. __u64 ia64_intri_res; \
  394. asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  395. ia64_intri_res; \
  396. })
  397. #define ia64_get_pkr(index) \
  398. ({ \
  399. __u64 ia64_intri_res; \
  400. asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  401. ia64_intri_res; \
  402. })
  403. #define ia64_get_pmc(index) \
  404. ({ \
  405. __u64 ia64_intri_res; \
  406. asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  407. ia64_intri_res; \
  408. })
  409. #define ia64_native_get_pmd(index) \
  410. ({ \
  411. __u64 ia64_intri_res; \
  412. asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
  413. ia64_intri_res; \
  414. })
  415. #define ia64_native_get_rr(index) \
  416. ({ \
  417. __u64 ia64_intri_res; \
  418. asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
  419. ia64_intri_res; \
  420. })
  421. #define ia64_native_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
  422. #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
  423. #define ia64_native_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
  424. #define ia64_native_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
  425. #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
  426. #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
  427. #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
  428. #define ia64_native_ptcga(addr, size) \
  429. do { \
  430. asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  431. ia64_dv_serialize_data(); \
  432. } while (0)
  433. #define ia64_ptcl(addr, size) \
  434. do { \
  435. asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory"); \
  436. ia64_dv_serialize_data(); \
  437. } while (0)
  438. #define ia64_ptri(addr, size) \
  439. asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
  440. #define ia64_ptrd(addr, size) \
  441. asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
  442. #define ia64_ttag(addr) \
  443. ({ \
  444. __u64 ia64_intri_res; \
  445. asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
  446. ia64_intri_res; \
  447. })
  448. /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
  449. #define ia64_lfhint_none 0
  450. #define ia64_lfhint_nt1 1
  451. #define ia64_lfhint_nt2 2
  452. #define ia64_lfhint_nta 3
  453. #define ia64_lfetch(lfhint, y) \
  454. ({ \
  455. switch (lfhint) { \
  456. case ia64_lfhint_none: \
  457. asm volatile ("lfetch [%0]" : : "r"(y)); \
  458. break; \
  459. case ia64_lfhint_nt1: \
  460. asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
  461. break; \
  462. case ia64_lfhint_nt2: \
  463. asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
  464. break; \
  465. case ia64_lfhint_nta: \
  466. asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
  467. break; \
  468. } \
  469. })
  470. #define ia64_lfetch_excl(lfhint, y) \
  471. ({ \
  472. switch (lfhint) { \
  473. case ia64_lfhint_none: \
  474. asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
  475. break; \
  476. case ia64_lfhint_nt1: \
  477. asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
  478. break; \
  479. case ia64_lfhint_nt2: \
  480. asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
  481. break; \
  482. case ia64_lfhint_nta: \
  483. asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
  484. break; \
  485. } \
  486. })
  487. #define ia64_lfetch_fault(lfhint, y) \
  488. ({ \
  489. switch (lfhint) { \
  490. case ia64_lfhint_none: \
  491. asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
  492. break; \
  493. case ia64_lfhint_nt1: \
  494. asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
  495. break; \
  496. case ia64_lfhint_nt2: \
  497. asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
  498. break; \
  499. case ia64_lfhint_nta: \
  500. asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
  501. break; \
  502. } \
  503. })
  504. #define ia64_lfetch_fault_excl(lfhint, y) \
  505. ({ \
  506. switch (lfhint) { \
  507. case ia64_lfhint_none: \
  508. asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
  509. break; \
  510. case ia64_lfhint_nt1: \
  511. asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
  512. break; \
  513. case ia64_lfhint_nt2: \
  514. asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
  515. break; \
  516. case ia64_lfhint_nta: \
  517. asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
  518. break; \
  519. } \
  520. })
  521. #define ia64_native_intrin_local_irq_restore(x) \
  522. do { \
  523. asm volatile (";; cmp.ne p6,p7=%0,r0;;" \
  524. "(p6) ssm psr.i;" \
  525. "(p7) rsm psr.i;;" \
  526. "(p6) srlz.d" \
  527. :: "r"((x)) : "p6", "p7", "memory"); \
  528. } while (0)
  529. #endif /* _ASM_IA64_GCC_INTRIN_H */