unaligned.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521
  1. /*
  2. * Architecture-specific unaligned trap handling.
  3. *
  4. * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
  5. * Stephane Eranian <eranian@hpl.hp.com>
  6. * David Mosberger-Tang <davidm@hpl.hp.com>
  7. *
  8. * 2002/12/09 Fix rotating register handling (off-by-1 error, missing fr-rotation). Fix
  9. * get_rse_reg() to not leak kernel bits to user-level (reading an out-of-frame
  10. * stacked register returns an undefined value; it does NOT trigger a
  11. * "rsvd register fault").
  12. * 2001/10/11 Fix unaligned access to rotating registers in s/w pipelined loops.
  13. * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
  14. * 2001/01/17 Add support emulation of unaligned kernel accesses.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/sched.h>
  18. #include <linux/smp_lock.h>
  19. #include <linux/tty.h>
  20. #include <asm/intrinsics.h>
  21. #include <asm/processor.h>
  22. #include <asm/rse.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/unaligned.h>
  25. extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
  26. #undef DEBUG_UNALIGNED_TRAP
  27. #ifdef DEBUG_UNALIGNED_TRAP
  28. # define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0)
  29. # define DDUMP(str,vp,len) dump(str, vp, len)
  30. static void
  31. dump (const char *str, void *vp, size_t len)
  32. {
  33. unsigned char *cp = vp;
  34. int i;
  35. printk("%s", str);
  36. for (i = 0; i < len; ++i)
  37. printk (" %02x", *cp++);
  38. printk("\n");
  39. }
  40. #else
  41. # define DPRINT(a...)
  42. # define DDUMP(str,vp,len)
  43. #endif
  44. #define IA64_FIRST_STACKED_GR 32
  45. #define IA64_FIRST_ROTATING_FR 32
  46. #define SIGN_EXT9 0xffffffffffffff00ul
  47. /*
  48. * For M-unit:
  49. *
  50. * opcode | m | x6 |
  51. * --------|------|---------|
  52. * [40-37] | [36] | [35:30] |
  53. * --------|------|---------|
  54. * 4 | 1 | 6 | = 11 bits
  55. * --------------------------
  56. * However bits [31:30] are not directly useful to distinguish between
  57. * load/store so we can use [35:32] instead, which gives the following
  58. * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
  59. * checking the m-bit until later in the load/store emulation.
  60. */
  61. #define IA64_OPCODE_MASK 0x1ef
  62. #define IA64_OPCODE_SHIFT 32
  63. /*
  64. * Table C-28 Integer Load/Store
  65. *
  66. * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
  67. *
  68. * ld8.fill, st8.fill MUST be aligned because the RNATs are based on
  69. * the address (bits [8:3]), so we must failed.
  70. */
  71. #define LD_OP 0x080
  72. #define LDS_OP 0x081
  73. #define LDA_OP 0x082
  74. #define LDSA_OP 0x083
  75. #define LDBIAS_OP 0x084
  76. #define LDACQ_OP 0x085
  77. /* 0x086, 0x087 are not relevant */
  78. #define LDCCLR_OP 0x088
  79. #define LDCNC_OP 0x089
  80. #define LDCCLRACQ_OP 0x08a
  81. #define ST_OP 0x08c
  82. #define STREL_OP 0x08d
  83. /* 0x08e,0x8f are not relevant */
  84. /*
  85. * Table C-29 Integer Load +Reg
  86. *
  87. * we use the ld->m (bit [36:36]) field to determine whether or not we have
  88. * a load/store of this form.
  89. */
  90. /*
  91. * Table C-30 Integer Load/Store +Imm
  92. *
  93. * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
  94. *
  95. * ld8.fill, st8.fill must be aligned because the Nat register are based on
  96. * the address, so we must fail and the program must be fixed.
  97. */
  98. #define LD_IMM_OP 0x0a0
  99. #define LDS_IMM_OP 0x0a1
  100. #define LDA_IMM_OP 0x0a2
  101. #define LDSA_IMM_OP 0x0a3
  102. #define LDBIAS_IMM_OP 0x0a4
  103. #define LDACQ_IMM_OP 0x0a5
  104. /* 0x0a6, 0xa7 are not relevant */
  105. #define LDCCLR_IMM_OP 0x0a8
  106. #define LDCNC_IMM_OP 0x0a9
  107. #define LDCCLRACQ_IMM_OP 0x0aa
  108. #define ST_IMM_OP 0x0ac
  109. #define STREL_IMM_OP 0x0ad
  110. /* 0x0ae,0xaf are not relevant */
  111. /*
  112. * Table C-32 Floating-point Load/Store
  113. */
  114. #define LDF_OP 0x0c0
  115. #define LDFS_OP 0x0c1
  116. #define LDFA_OP 0x0c2
  117. #define LDFSA_OP 0x0c3
  118. /* 0x0c6 is irrelevant */
  119. #define LDFCCLR_OP 0x0c8
  120. #define LDFCNC_OP 0x0c9
  121. /* 0x0cb is irrelevant */
  122. #define STF_OP 0x0cc
  123. /*
  124. * Table C-33 Floating-point Load +Reg
  125. *
  126. * we use the ld->m (bit [36:36]) field to determine whether or not we have
  127. * a load/store of this form.
  128. */
  129. /*
  130. * Table C-34 Floating-point Load/Store +Imm
  131. */
  132. #define LDF_IMM_OP 0x0e0
  133. #define LDFS_IMM_OP 0x0e1
  134. #define LDFA_IMM_OP 0x0e2
  135. #define LDFSA_IMM_OP 0x0e3
  136. /* 0x0e6 is irrelevant */
  137. #define LDFCCLR_IMM_OP 0x0e8
  138. #define LDFCNC_IMM_OP 0x0e9
  139. #define STF_IMM_OP 0x0ec
  140. typedef struct {
  141. unsigned long qp:6; /* [0:5] */
  142. unsigned long r1:7; /* [6:12] */
  143. unsigned long imm:7; /* [13:19] */
  144. unsigned long r3:7; /* [20:26] */
  145. unsigned long x:1; /* [27:27] */
  146. unsigned long hint:2; /* [28:29] */
  147. unsigned long x6_sz:2; /* [30:31] */
  148. unsigned long x6_op:4; /* [32:35], x6 = x6_sz|x6_op */
  149. unsigned long m:1; /* [36:36] */
  150. unsigned long op:4; /* [37:40] */
  151. unsigned long pad:23; /* [41:63] */
  152. } load_store_t;
  153. typedef enum {
  154. UPD_IMMEDIATE, /* ldXZ r1=[r3],imm(9) */
  155. UPD_REG /* ldXZ r1=[r3],r2 */
  156. } update_t;
  157. /*
  158. * We use tables to keep track of the offsets of registers in the saved state.
  159. * This way we save having big switch/case statements.
  160. *
  161. * We use bit 0 to indicate switch_stack or pt_regs.
  162. * The offset is simply shifted by 1 bit.
  163. * A 2-byte value should be enough to hold any kind of offset
  164. *
  165. * In case the calling convention changes (and thus pt_regs/switch_stack)
  166. * simply use RSW instead of RPT or vice-versa.
  167. */
  168. #define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
  169. #define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
  170. #define RPT(x) (RPO(x) << 1)
  171. #define RSW(x) (1| RSO(x)<<1)
  172. #define GR_OFFS(x) (gr_info[x]>>1)
  173. #define GR_IN_SW(x) (gr_info[x] & 0x1)
  174. #define FR_OFFS(x) (fr_info[x]>>1)
  175. #define FR_IN_SW(x) (fr_info[x] & 0x1)
  176. static u16 gr_info[32]={
  177. 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
  178. RPT(r1), RPT(r2), RPT(r3),
  179. RSW(r4), RSW(r5), RSW(r6), RSW(r7),
  180. RPT(r8), RPT(r9), RPT(r10), RPT(r11),
  181. RPT(r12), RPT(r13), RPT(r14), RPT(r15),
  182. RPT(r16), RPT(r17), RPT(r18), RPT(r19),
  183. RPT(r20), RPT(r21), RPT(r22), RPT(r23),
  184. RPT(r24), RPT(r25), RPT(r26), RPT(r27),
  185. RPT(r28), RPT(r29), RPT(r30), RPT(r31)
  186. };
  187. static u16 fr_info[32]={
  188. 0, /* constant : WE SHOULD NEVER GET THIS */
  189. 0, /* constant : WE SHOULD NEVER GET THIS */
  190. RSW(f2), RSW(f3), RSW(f4), RSW(f5),
  191. RPT(f6), RPT(f7), RPT(f8), RPT(f9),
  192. RPT(f10), RPT(f11),
  193. RSW(f12), RSW(f13), RSW(f14),
  194. RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
  195. RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
  196. RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
  197. RSW(f30), RSW(f31)
  198. };
  199. /* Invalidate ALAT entry for integer register REGNO. */
  200. static void
  201. invala_gr (int regno)
  202. {
  203. # define F(reg) case reg: ia64_invala_gr(reg); break
  204. switch (regno) {
  205. F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
  206. F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
  207. F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
  208. F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
  209. F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
  210. F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
  211. F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
  212. F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
  213. F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
  214. F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
  215. F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
  216. F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
  217. F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
  218. F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
  219. F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
  220. F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
  221. }
  222. # undef F
  223. }
  224. /* Invalidate ALAT entry for floating-point register REGNO. */
  225. static void
  226. invala_fr (int regno)
  227. {
  228. # define F(reg) case reg: ia64_invala_fr(reg); break
  229. switch (regno) {
  230. F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
  231. F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
  232. F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
  233. F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
  234. F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
  235. F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
  236. F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
  237. F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
  238. F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
  239. F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
  240. F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
  241. F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
  242. F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
  243. F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
  244. F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
  245. F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
  246. }
  247. # undef F
  248. }
  249. static inline unsigned long
  250. rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
  251. {
  252. reg += rrb;
  253. if (reg >= sor)
  254. reg -= sor;
  255. return reg;
  256. }
  257. static void
  258. set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
  259. {
  260. struct switch_stack *sw = (struct switch_stack *) regs - 1;
  261. unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
  262. unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
  263. unsigned long rnats, nat_mask;
  264. unsigned long on_kbs;
  265. long sof = (regs->cr_ifs) & 0x7f;
  266. long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
  267. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  268. long ridx = r1 - 32;
  269. if (ridx >= sof) {
  270. /* this should never happen, as the "rsvd register fault" has higher priority */
  271. DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
  272. return;
  273. }
  274. if (ridx < sor)
  275. ridx = rotate_reg(sor, rrb_gr, ridx);
  276. DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
  277. r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
  278. on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
  279. addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
  280. if (addr >= kbs) {
  281. /* the register is on the kernel backing store: easy... */
  282. rnat_addr = ia64_rse_rnat_addr(addr);
  283. if ((unsigned long) rnat_addr >= sw->ar_bspstore)
  284. rnat_addr = &sw->ar_rnat;
  285. nat_mask = 1UL << ia64_rse_slot_num(addr);
  286. *addr = val;
  287. if (nat)
  288. *rnat_addr |= nat_mask;
  289. else
  290. *rnat_addr &= ~nat_mask;
  291. return;
  292. }
  293. if (!user_stack(current, regs)) {
  294. DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1);
  295. return;
  296. }
  297. bspstore = (unsigned long *)regs->ar_bspstore;
  298. ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
  299. bsp = ia64_rse_skip_regs(ubs_end, -sof);
  300. addr = ia64_rse_skip_regs(bsp, ridx);
  301. DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
  302. ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
  303. rnat_addr = ia64_rse_rnat_addr(addr);
  304. ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
  305. DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
  306. (void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1);
  307. nat_mask = 1UL << ia64_rse_slot_num(addr);
  308. if (nat)
  309. rnats |= nat_mask;
  310. else
  311. rnats &= ~nat_mask;
  312. ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
  313. DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
  314. }
  315. static void
  316. get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat)
  317. {
  318. struct switch_stack *sw = (struct switch_stack *) regs - 1;
  319. unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
  320. unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
  321. unsigned long rnats, nat_mask;
  322. unsigned long on_kbs;
  323. long sof = (regs->cr_ifs) & 0x7f;
  324. long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
  325. long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
  326. long ridx = r1 - 32;
  327. if (ridx >= sof) {
  328. /* read of out-of-frame register returns an undefined value; 0 in our case. */
  329. DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
  330. goto fail;
  331. }
  332. if (ridx < sor)
  333. ridx = rotate_reg(sor, rrb_gr, ridx);
  334. DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
  335. r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
  336. on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
  337. addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
  338. if (addr >= kbs) {
  339. /* the register is on the kernel backing store: easy... */
  340. *val = *addr;
  341. if (nat) {
  342. rnat_addr = ia64_rse_rnat_addr(addr);
  343. if ((unsigned long) rnat_addr >= sw->ar_bspstore)
  344. rnat_addr = &sw->ar_rnat;
  345. nat_mask = 1UL << ia64_rse_slot_num(addr);
  346. *nat = (*rnat_addr & nat_mask) != 0;
  347. }
  348. return;
  349. }
  350. if (!user_stack(current, regs)) {
  351. DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);
  352. goto fail;
  353. }
  354. bspstore = (unsigned long *)regs->ar_bspstore;
  355. ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
  356. bsp = ia64_rse_skip_regs(ubs_end, -sof);
  357. addr = ia64_rse_skip_regs(bsp, ridx);
  358. DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
  359. ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
  360. if (nat) {
  361. rnat_addr = ia64_rse_rnat_addr(addr);
  362. nat_mask = 1UL << ia64_rse_slot_num(addr);
  363. DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
  364. ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
  365. *nat = (rnats & nat_mask) != 0;
  366. }
  367. return;
  368. fail:
  369. *val = 0;
  370. if (nat)
  371. *nat = 0;
  372. return;
  373. }
  374. static void
  375. setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
  376. {
  377. struct switch_stack *sw = (struct switch_stack *) regs - 1;
  378. unsigned long addr;
  379. unsigned long bitmask;
  380. unsigned long *unat;
  381. /*
  382. * First takes care of stacked registers
  383. */
  384. if (regnum >= IA64_FIRST_STACKED_GR) {
  385. set_rse_reg(regs, regnum, val, nat);
  386. return;
  387. }
  388. /*
  389. * Using r0 as a target raises a General Exception fault which has higher priority
  390. * than the Unaligned Reference fault.
  391. */
  392. /*
  393. * Now look at registers in [0-31] range and init correct UNAT
  394. */
  395. if (GR_IN_SW(regnum)) {
  396. addr = (unsigned long)sw;
  397. unat = &sw->ar_unat;
  398. } else {
  399. addr = (unsigned long)regs;
  400. unat = &sw->caller_unat;
  401. }
  402. DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
  403. addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
  404. /*
  405. * add offset from base of struct
  406. * and do it !
  407. */
  408. addr += GR_OFFS(regnum);
  409. *(unsigned long *)addr = val;
  410. /*
  411. * We need to clear the corresponding UNAT bit to fully emulate the load
  412. * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
  413. */
  414. bitmask = 1UL << (addr >> 3 & 0x3f);
  415. DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat);
  416. if (nat) {
  417. *unat |= bitmask;
  418. } else {
  419. *unat &= ~bitmask;
  420. }
  421. DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat);
  422. }
  423. /*
  424. * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the
  425. * range from 32-127, result is in the range from 0-95.
  426. */
  427. static inline unsigned long
  428. fph_index (struct pt_regs *regs, long regnum)
  429. {
  430. unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
  431. return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
  432. }
  433. static void
  434. setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
  435. {
  436. struct switch_stack *sw = (struct switch_stack *)regs - 1;
  437. unsigned long addr;
  438. /*
  439. * From EAS-2.5: FPDisableFault has higher priority than Unaligned
  440. * Fault. Thus, when we get here, we know the partition is enabled.
  441. * To update f32-f127, there are three choices:
  442. *
  443. * (1) save f32-f127 to thread.fph and update the values there
  444. * (2) use a gigantic switch statement to directly access the registers
  445. * (3) generate code on the fly to update the desired register
  446. *
  447. * For now, we are using approach (1).
  448. */
  449. if (regnum >= IA64_FIRST_ROTATING_FR) {
  450. ia64_sync_fph(current);
  451. current->thread.fph[fph_index(regs, regnum)] = *fpval;
  452. } else {
  453. /*
  454. * pt_regs or switch_stack ?
  455. */
  456. if (FR_IN_SW(regnum)) {
  457. addr = (unsigned long)sw;
  458. } else {
  459. addr = (unsigned long)regs;
  460. }
  461. DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
  462. addr += FR_OFFS(regnum);
  463. *(struct ia64_fpreg *)addr = *fpval;
  464. /*
  465. * mark the low partition as being used now
  466. *
  467. * It is highly unlikely that this bit is not already set, but
  468. * let's do it for safety.
  469. */
  470. regs->cr_ipsr |= IA64_PSR_MFL;
  471. }
  472. }
  473. /*
  474. * Those 2 inline functions generate the spilled versions of the constant floating point
  475. * registers which can be used with stfX
  476. */
  477. static inline void
  478. float_spill_f0 (struct ia64_fpreg *final)
  479. {
  480. ia64_stf_spill(final, 0);
  481. }
  482. static inline void
  483. float_spill_f1 (struct ia64_fpreg *final)
  484. {
  485. ia64_stf_spill(final, 1);
  486. }
  487. static void
  488. getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
  489. {
  490. struct switch_stack *sw = (struct switch_stack *) regs - 1;
  491. unsigned long addr;
  492. /*
  493. * From EAS-2.5: FPDisableFault has higher priority than
  494. * Unaligned Fault. Thus, when we get here, we know the partition is
  495. * enabled.
  496. *
  497. * When regnum > 31, the register is still live and we need to force a save
  498. * to current->thread.fph to get access to it. See discussion in setfpreg()
  499. * for reasons and other ways of doing this.
  500. */
  501. if (regnum >= IA64_FIRST_ROTATING_FR) {
  502. ia64_flush_fph(current);
  503. *fpval = current->thread.fph[fph_index(regs, regnum)];
  504. } else {
  505. /*
  506. * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
  507. * not saved, we must generate their spilled form on the fly
  508. */
  509. switch(regnum) {
  510. case 0:
  511. float_spill_f0(fpval);
  512. break;
  513. case 1:
  514. float_spill_f1(fpval);
  515. break;
  516. default:
  517. /*
  518. * pt_regs or switch_stack ?
  519. */
  520. addr = FR_IN_SW(regnum) ? (unsigned long)sw
  521. : (unsigned long)regs;
  522. DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
  523. FR_IN_SW(regnum), addr, FR_OFFS(regnum));
  524. addr += FR_OFFS(regnum);
  525. *fpval = *(struct ia64_fpreg *)addr;
  526. }
  527. }
  528. }
  529. static void
  530. getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
  531. {
  532. struct switch_stack *sw = (struct switch_stack *) regs - 1;
  533. unsigned long addr, *unat;
  534. if (regnum >= IA64_FIRST_STACKED_GR) {
  535. get_rse_reg(regs, regnum, val, nat);
  536. return;
  537. }
  538. /*
  539. * take care of r0 (read-only always evaluate to 0)
  540. */
  541. if (regnum == 0) {
  542. *val = 0;
  543. if (nat)
  544. *nat = 0;
  545. return;
  546. }
  547. /*
  548. * Now look at registers in [0-31] range and init correct UNAT
  549. */
  550. if (GR_IN_SW(regnum)) {
  551. addr = (unsigned long)sw;
  552. unat = &sw->ar_unat;
  553. } else {
  554. addr = (unsigned long)regs;
  555. unat = &sw->caller_unat;
  556. }
  557. DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum));
  558. addr += GR_OFFS(regnum);
  559. *val = *(unsigned long *)addr;
  560. /*
  561. * do it only when requested
  562. */
  563. if (nat)
  564. *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
  565. }
  566. static void
  567. emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
  568. {
  569. /*
  570. * IMPORTANT:
  571. * Given the way we handle unaligned speculative loads, we should
  572. * not get to this point in the code but we keep this sanity check,
  573. * just in case.
  574. */
  575. if (ld.x6_op == 1 || ld.x6_op == 3) {
  576. printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__);
  577. die_if_kernel("unaligned reference on speculative load with register update\n",
  578. regs, 30);
  579. }
  580. /*
  581. * at this point, we know that the base register to update is valid i.e.,
  582. * it's not r0
  583. */
  584. if (type == UPD_IMMEDIATE) {
  585. unsigned long imm;
  586. /*
  587. * Load +Imm: ldXZ r1=[r3],imm(9)
  588. *
  589. *
  590. * form imm9: [13:19] contain the first 7 bits
  591. */
  592. imm = ld.x << 7 | ld.imm;
  593. /*
  594. * sign extend (1+8bits) if m set
  595. */
  596. if (ld.m) imm |= SIGN_EXT9;
  597. /*
  598. * ifa == r3 and we know that the NaT bit on r3 was clear so
  599. * we can directly use ifa.
  600. */
  601. ifa += imm;
  602. setreg(ld.r3, ifa, 0, regs);
  603. DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa);
  604. } else if (ld.m) {
  605. unsigned long r2;
  606. int nat_r2;
  607. /*
  608. * Load +Reg Opcode: ldXZ r1=[r3],r2
  609. *
  610. * Note: that we update r3 even in the case of ldfX.a
  611. * (where the load does not happen)
  612. *
  613. * The way the load algorithm works, we know that r3 does not
  614. * have its NaT bit set (would have gotten NaT consumption
  615. * before getting the unaligned fault). So we can use ifa
  616. * which equals r3 at this point.
  617. *
  618. * IMPORTANT:
  619. * The above statement holds ONLY because we know that we
  620. * never reach this code when trying to do a ldX.s.
  621. * If we ever make it to here on an ldfX.s then
  622. */
  623. getreg(ld.imm, &r2, &nat_r2, regs);
  624. ifa += r2;
  625. /*
  626. * propagate Nat r2 -> r3
  627. */
  628. setreg(ld.r3, ifa, nat_r2, regs);
  629. DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2);
  630. }
  631. }
  632. static int
  633. emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
  634. {
  635. unsigned int len = 1 << ld.x6_sz;
  636. unsigned long val = 0;
  637. /*
  638. * r0, as target, doesn't need to be checked because Illegal Instruction
  639. * faults have higher priority than unaligned faults.
  640. *
  641. * r0 cannot be found as the base as it would never generate an
  642. * unaligned reference.
  643. */
  644. /*
  645. * ldX.a we will emulate load and also invalidate the ALAT entry.
  646. * See comment below for explanation on how we handle ldX.a
  647. */
  648. if (len != 2 && len != 4 && len != 8) {
  649. DPRINT("unknown size: x6=%d\n", ld.x6_sz);
  650. return -1;
  651. }
  652. /* this assumes little-endian byte-order: */
  653. if (copy_from_user(&val, (void __user *) ifa, len))
  654. return -1;
  655. setreg(ld.r1, val, 0, regs);
  656. /*
  657. * check for updates on any kind of loads
  658. */
  659. if (ld.op == 0x5 || ld.m)
  660. emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
  661. /*
  662. * handling of various loads (based on EAS2.4):
  663. *
  664. * ldX.acq (ordered load):
  665. * - acquire semantics would have been used, so force fence instead.
  666. *
  667. * ldX.c.clr (check load and clear):
  668. * - if we get to this handler, it's because the entry was not in the ALAT.
  669. * Therefore the operation reverts to a normal load
  670. *
  671. * ldX.c.nc (check load no clear):
  672. * - same as previous one
  673. *
  674. * ldX.c.clr.acq (ordered check load and clear):
  675. * - same as above for c.clr part. The load needs to have acquire semantics. So
  676. * we use the fence semantics which is stronger and thus ensures correctness.
  677. *
  678. * ldX.a (advanced load):
  679. * - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
  680. * address doesn't match requested size alignment. This means that we would
  681. * possibly need more than one load to get the result.
  682. *
  683. * The load part can be handled just like a normal load, however the difficult
  684. * part is to get the right thing into the ALAT. The critical piece of information
  685. * in the base address of the load & size. To do that, a ld.a must be executed,
  686. * clearly any address can be pushed into the table by using ld1.a r1=[r3]. Now
  687. * if we use the same target register, we will be okay for the check.a instruction.
  688. * If we look at the store, basically a stX [r3]=r1 checks the ALAT for any entry
  689. * which would overlap within [r3,r3+X] (the size of the load was store in the
  690. * ALAT). If such an entry is found the entry is invalidated. But this is not good
  691. * enough, take the following example:
  692. * r3=3
  693. * ld4.a r1=[r3]
  694. *
  695. * Could be emulated by doing:
  696. * ld1.a r1=[r3],1
  697. * store to temporary;
  698. * ld1.a r1=[r3],1
  699. * store & shift to temporary;
  700. * ld1.a r1=[r3],1
  701. * store & shift to temporary;
  702. * ld1.a r1=[r3]
  703. * store & shift to temporary;
  704. * r1=temporary
  705. *
  706. * So in this case, you would get the right value is r1 but the wrong info in
  707. * the ALAT. Notice that you could do it in reverse to finish with address 3
  708. * but you would still get the size wrong. To get the size right, one needs to
  709. * execute exactly the same kind of load. You could do it from a aligned
  710. * temporary location, but you would get the address wrong.
  711. *
  712. * So no matter what, it is not possible to emulate an advanced load
  713. * correctly. But is that really critical ?
  714. *
  715. * We will always convert ld.a into a normal load with ALAT invalidated. This
  716. * will enable compiler to do optimization where certain code path after ld.a
  717. * is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
  718. *
  719. * If there is a store after the advanced load, one must either do a ld.c.* or
  720. * chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
  721. * entry found in ALAT), and that's perfectly ok because:
  722. *
  723. * - ld.c.*, if the entry is not present a normal load is executed
  724. * - chk.a.*, if the entry is not present, execution jumps to recovery code
  725. *
  726. * In either case, the load can be potentially retried in another form.
  727. *
  728. * ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
  729. * up a stale entry later). The register base update MUST also be performed.
  730. */
  731. /*
  732. * when the load has the .acq completer then
  733. * use ordering fence.
  734. */
  735. if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
  736. mb();
  737. /*
  738. * invalidate ALAT entry in case of advanced load
  739. */
  740. if (ld.x6_op == 0x2)
  741. invala_gr(ld.r1);
  742. return 0;
  743. }
  744. static int
  745. emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
  746. {
  747. unsigned long r2;
  748. unsigned int len = 1 << ld.x6_sz;
  749. /*
  750. * if we get to this handler, Nat bits on both r3 and r2 have already
  751. * been checked. so we don't need to do it
  752. *
  753. * extract the value to be stored
  754. */
  755. getreg(ld.imm, &r2, NULL, regs);
  756. /*
  757. * we rely on the macros in unaligned.h for now i.e.,
  758. * we let the compiler figure out how to read memory gracefully.
  759. *
  760. * We need this switch/case because the way the inline function
  761. * works. The code is optimized by the compiler and looks like
  762. * a single switch/case.
  763. */
  764. DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
  765. if (len != 2 && len != 4 && len != 8) {
  766. DPRINT("unknown size: x6=%d\n", ld.x6_sz);
  767. return -1;
  768. }
  769. /* this assumes little-endian byte-order: */
  770. if (copy_to_user((void __user *) ifa, &r2, len))
  771. return -1;
  772. /*
  773. * stX [r3]=r2,imm(9)
  774. *
  775. * NOTE:
  776. * ld.r3 can never be r0, because r0 would not generate an
  777. * unaligned access.
  778. */
  779. if (ld.op == 0x5) {
  780. unsigned long imm;
  781. /*
  782. * form imm9: [12:6] contain first 7bits
  783. */
  784. imm = ld.x << 7 | ld.r1;
  785. /*
  786. * sign extend (8bits) if m set
  787. */
  788. if (ld.m) imm |= SIGN_EXT9;
  789. /*
  790. * ifa == r3 (NaT is necessarily cleared)
  791. */
  792. ifa += imm;
  793. DPRINT("imm=%lx r3=%lx\n", imm, ifa);
  794. setreg(ld.r3, ifa, 0, regs);
  795. }
  796. /*
  797. * we don't have alat_invalidate_multiple() so we need
  798. * to do the complete flush :-<<
  799. */
  800. ia64_invala();
  801. /*
  802. * stX.rel: use fence instead of release
  803. */
  804. if (ld.x6_op == 0xd)
  805. mb();
  806. return 0;
  807. }
  808. /*
  809. * floating point operations sizes in bytes
  810. */
  811. static const unsigned char float_fsz[4]={
  812. 10, /* extended precision (e) */
  813. 8, /* integer (8) */
  814. 4, /* single precision (s) */
  815. 8 /* double precision (d) */
  816. };
  817. static inline void
  818. mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
  819. {
  820. ia64_ldfe(6, init);
  821. ia64_stop();
  822. ia64_stf_spill(final, 6);
  823. }
  824. static inline void
  825. mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
  826. {
  827. ia64_ldf8(6, init);
  828. ia64_stop();
  829. ia64_stf_spill(final, 6);
  830. }
  831. static inline void
  832. mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
  833. {
  834. ia64_ldfs(6, init);
  835. ia64_stop();
  836. ia64_stf_spill(final, 6);
  837. }
  838. static inline void
  839. mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
  840. {
  841. ia64_ldfd(6, init);
  842. ia64_stop();
  843. ia64_stf_spill(final, 6);
  844. }
  845. static inline void
  846. float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
  847. {
  848. ia64_ldf_fill(6, init);
  849. ia64_stop();
  850. ia64_stfe(final, 6);
  851. }
  852. static inline void
  853. float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
  854. {
  855. ia64_ldf_fill(6, init);
  856. ia64_stop();
  857. ia64_stf8(final, 6);
  858. }
  859. static inline void
  860. float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
  861. {
  862. ia64_ldf_fill(6, init);
  863. ia64_stop();
  864. ia64_stfs(final, 6);
  865. }
  866. static inline void
  867. float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
  868. {
  869. ia64_ldf_fill(6, init);
  870. ia64_stop();
  871. ia64_stfd(final, 6);
  872. }
  873. static int
  874. emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
  875. {
  876. struct ia64_fpreg fpr_init[2];
  877. struct ia64_fpreg fpr_final[2];
  878. unsigned long len = float_fsz[ld.x6_sz];
  879. /*
  880. * fr0 & fr1 don't need to be checked because Illegal Instruction faults have
  881. * higher priority than unaligned faults.
  882. *
  883. * r0 cannot be found as the base as it would never generate an unaligned
  884. * reference.
  885. */
  886. /*
  887. * make sure we get clean buffers
  888. */
  889. memset(&fpr_init, 0, sizeof(fpr_init));
  890. memset(&fpr_final, 0, sizeof(fpr_final));
  891. /*
  892. * ldfpX.a: we don't try to emulate anything but we must
  893. * invalidate the ALAT entry and execute updates, if any.
  894. */
  895. if (ld.x6_op != 0x2) {
  896. /*
  897. * This assumes little-endian byte-order. Note that there is no "ldfpe"
  898. * instruction:
  899. */
  900. if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
  901. || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
  902. return -1;
  903. DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
  904. DDUMP("frp_init =", &fpr_init, 2*len);
  905. /*
  906. * XXX fixme
  907. * Could optimize inlines by using ldfpX & 2 spills
  908. */
  909. switch( ld.x6_sz ) {
  910. case 0:
  911. mem2float_extended(&fpr_init[0], &fpr_final[0]);
  912. mem2float_extended(&fpr_init[1], &fpr_final[1]);
  913. break;
  914. case 1:
  915. mem2float_integer(&fpr_init[0], &fpr_final[0]);
  916. mem2float_integer(&fpr_init[1], &fpr_final[1]);
  917. break;
  918. case 2:
  919. mem2float_single(&fpr_init[0], &fpr_final[0]);
  920. mem2float_single(&fpr_init[1], &fpr_final[1]);
  921. break;
  922. case 3:
  923. mem2float_double(&fpr_init[0], &fpr_final[0]);
  924. mem2float_double(&fpr_init[1], &fpr_final[1]);
  925. break;
  926. }
  927. DDUMP("fpr_final =", &fpr_final, 2*len);
  928. /*
  929. * XXX fixme
  930. *
  931. * A possible optimization would be to drop fpr_final and directly
  932. * use the storage from the saved context i.e., the actual final
  933. * destination (pt_regs, switch_stack or thread structure).
  934. */
  935. setfpreg(ld.r1, &fpr_final[0], regs);
  936. setfpreg(ld.imm, &fpr_final[1], regs);
  937. }
  938. /*
  939. * Check for updates: only immediate updates are available for this
  940. * instruction.
  941. */
  942. if (ld.m) {
  943. /*
  944. * the immediate is implicit given the ldsz of the operation:
  945. * single: 8 (2x4) and for all others it's 16 (2x8)
  946. */
  947. ifa += len<<1;
  948. /*
  949. * IMPORTANT:
  950. * the fact that we force the NaT of r3 to zero is ONLY valid
  951. * as long as we don't come here with a ldfpX.s.
  952. * For this reason we keep this sanity check
  953. */
  954. if (ld.x6_op == 1 || ld.x6_op == 3)
  955. printk(KERN_ERR "%s: register update on speculative load pair, error\n",
  956. __FUNCTION__);
  957. setreg(ld.r3, ifa, 0, regs);
  958. }
  959. /*
  960. * Invalidate ALAT entries, if any, for both registers.
  961. */
  962. if (ld.x6_op == 0x2) {
  963. invala_fr(ld.r1);
  964. invala_fr(ld.imm);
  965. }
  966. return 0;
  967. }
  968. static int
  969. emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
  970. {
  971. struct ia64_fpreg fpr_init;
  972. struct ia64_fpreg fpr_final;
  973. unsigned long len = float_fsz[ld.x6_sz];
  974. /*
  975. * fr0 & fr1 don't need to be checked because Illegal Instruction
  976. * faults have higher priority than unaligned faults.
  977. *
  978. * r0 cannot be found as the base as it would never generate an
  979. * unaligned reference.
  980. */
  981. /*
  982. * make sure we get clean buffers
  983. */
  984. memset(&fpr_init,0, sizeof(fpr_init));
  985. memset(&fpr_final,0, sizeof(fpr_final));
  986. /*
  987. * ldfX.a we don't try to emulate anything but we must
  988. * invalidate the ALAT entry.
  989. * See comments in ldX for descriptions on how the various loads are handled.
  990. */
  991. if (ld.x6_op != 0x2) {
  992. if (copy_from_user(&fpr_init, (void __user *) ifa, len))
  993. return -1;
  994. DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
  995. DDUMP("fpr_init =", &fpr_init, len);
  996. /*
  997. * we only do something for x6_op={0,8,9}
  998. */
  999. switch( ld.x6_sz ) {
  1000. case 0:
  1001. mem2float_extended(&fpr_init, &fpr_final);
  1002. break;
  1003. case 1:
  1004. mem2float_integer(&fpr_init, &fpr_final);
  1005. break;
  1006. case 2:
  1007. mem2float_single(&fpr_init, &fpr_final);
  1008. break;
  1009. case 3:
  1010. mem2float_double(&fpr_init, &fpr_final);
  1011. break;
  1012. }
  1013. DDUMP("fpr_final =", &fpr_final, len);
  1014. /*
  1015. * XXX fixme
  1016. *
  1017. * A possible optimization would be to drop fpr_final and directly
  1018. * use the storage from the saved context i.e., the actual final
  1019. * destination (pt_regs, switch_stack or thread structure).
  1020. */
  1021. setfpreg(ld.r1, &fpr_final, regs);
  1022. }
  1023. /*
  1024. * check for updates on any loads
  1025. */
  1026. if (ld.op == 0x7 || ld.m)
  1027. emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
  1028. /*
  1029. * invalidate ALAT entry in case of advanced floating point loads
  1030. */
  1031. if (ld.x6_op == 0x2)
  1032. invala_fr(ld.r1);
  1033. return 0;
  1034. }
  1035. static int
  1036. emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
  1037. {
  1038. struct ia64_fpreg fpr_init;
  1039. struct ia64_fpreg fpr_final;
  1040. unsigned long len = float_fsz[ld.x6_sz];
  1041. /*
  1042. * make sure we get clean buffers
  1043. */
  1044. memset(&fpr_init,0, sizeof(fpr_init));
  1045. memset(&fpr_final,0, sizeof(fpr_final));
  1046. /*
  1047. * if we get to this handler, Nat bits on both r3 and r2 have already
  1048. * been checked. so we don't need to do it
  1049. *
  1050. * extract the value to be stored
  1051. */
  1052. getfpreg(ld.imm, &fpr_init, regs);
  1053. /*
  1054. * during this step, we extract the spilled registers from the saved
  1055. * context i.e., we refill. Then we store (no spill) to temporary
  1056. * aligned location
  1057. */
  1058. switch( ld.x6_sz ) {
  1059. case 0:
  1060. float2mem_extended(&fpr_init, &fpr_final);
  1061. break;
  1062. case 1:
  1063. float2mem_integer(&fpr_init, &fpr_final);
  1064. break;
  1065. case 2:
  1066. float2mem_single(&fpr_init, &fpr_final);
  1067. break;
  1068. case 3:
  1069. float2mem_double(&fpr_init, &fpr_final);
  1070. break;
  1071. }
  1072. DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
  1073. DDUMP("fpr_init =", &fpr_init, len);
  1074. DDUMP("fpr_final =", &fpr_final, len);
  1075. if (copy_to_user((void __user *) ifa, &fpr_final, len))
  1076. return -1;
  1077. /*
  1078. * stfX [r3]=r2,imm(9)
  1079. *
  1080. * NOTE:
  1081. * ld.r3 can never be r0, because r0 would not generate an
  1082. * unaligned access.
  1083. */
  1084. if (ld.op == 0x7) {
  1085. unsigned long imm;
  1086. /*
  1087. * form imm9: [12:6] contain first 7bits
  1088. */
  1089. imm = ld.x << 7 | ld.r1;
  1090. /*
  1091. * sign extend (8bits) if m set
  1092. */
  1093. if (ld.m)
  1094. imm |= SIGN_EXT9;
  1095. /*
  1096. * ifa == r3 (NaT is necessarily cleared)
  1097. */
  1098. ifa += imm;
  1099. DPRINT("imm=%lx r3=%lx\n", imm, ifa);
  1100. setreg(ld.r3, ifa, 0, regs);
  1101. }
  1102. /*
  1103. * we don't have alat_invalidate_multiple() so we need
  1104. * to do the complete flush :-<<
  1105. */
  1106. ia64_invala();
  1107. return 0;
  1108. }
  1109. /*
  1110. * Make sure we log the unaligned access, so that user/sysadmin can notice it and
  1111. * eventually fix the program. However, we don't want to do that for every access so we
  1112. * pace it with jiffies. This isn't really MP-safe, but it doesn't really have to be
  1113. * either...
  1114. */
  1115. static int
  1116. within_logging_rate_limit (void)
  1117. {
  1118. static unsigned long count, last_time;
  1119. if (jiffies - last_time > 5*HZ)
  1120. count = 0;
  1121. if (++count < 5) {
  1122. last_time = jiffies;
  1123. return 1;
  1124. }
  1125. return 0;
  1126. }
  1127. void
  1128. ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
  1129. {
  1130. struct ia64_psr *ipsr = ia64_psr(regs);
  1131. mm_segment_t old_fs = get_fs();
  1132. unsigned long bundle[2];
  1133. unsigned long opcode;
  1134. struct siginfo si;
  1135. const struct exception_table_entry *eh = NULL;
  1136. union {
  1137. unsigned long l;
  1138. load_store_t insn;
  1139. } u;
  1140. int ret = -1;
  1141. if (ia64_psr(regs)->be) {
  1142. /* we don't support big-endian accesses */
  1143. die_if_kernel("big-endian unaligned accesses are not supported", regs, 0);
  1144. goto force_sigbus;
  1145. }
  1146. /*
  1147. * Treat kernel accesses for which there is an exception handler entry the same as
  1148. * user-level unaligned accesses. Otherwise, a clever program could trick this
  1149. * handler into reading an arbitrary kernel addresses...
  1150. */
  1151. if (!user_mode(regs))
  1152. eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
  1153. if (user_mode(regs) || eh) {
  1154. if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
  1155. goto force_sigbus;
  1156. if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
  1157. && within_logging_rate_limit())
  1158. {
  1159. char buf[200]; /* comm[] is at most 16 bytes... */
  1160. size_t len;
  1161. len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
  1162. "ip=0x%016lx\n\r", current->comm, current->pid,
  1163. ifa, regs->cr_iip + ipsr->ri);
  1164. /*
  1165. * Don't call tty_write_message() if we're in the kernel; we might
  1166. * be holding locks...
  1167. */
  1168. if (user_mode(regs))
  1169. tty_write_message(current->signal->tty, buf);
  1170. buf[len-1] = '\0'; /* drop '\r' */
  1171. printk(KERN_WARNING "%s", buf); /* watch for command names containing %s */
  1172. }
  1173. } else {
  1174. if (within_logging_rate_limit())
  1175. printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
  1176. ifa, regs->cr_iip + ipsr->ri);
  1177. set_fs(KERNEL_DS);
  1178. }
  1179. DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
  1180. regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
  1181. if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
  1182. goto failure;
  1183. /*
  1184. * extract the instruction from the bundle given the slot number
  1185. */
  1186. switch (ipsr->ri) {
  1187. case 0: u.l = (bundle[0] >> 5); break;
  1188. case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
  1189. case 2: u.l = (bundle[1] >> 23); break;
  1190. }
  1191. opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
  1192. DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "
  1193. "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,
  1194. u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op);
  1195. /*
  1196. * IMPORTANT:
  1197. * Notice that the switch statement DOES not cover all possible instructions
  1198. * that DO generate unaligned references. This is made on purpose because for some
  1199. * instructions it DOES NOT make sense to try and emulate the access. Sometimes it
  1200. * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e.,
  1201. * the program will get a signal and die:
  1202. *
  1203. * load/store:
  1204. * - ldX.spill
  1205. * - stX.spill
  1206. * Reason: RNATs are based on addresses
  1207. * - ld16
  1208. * - st16
  1209. * Reason: ld16 and st16 are supposed to occur in a single
  1210. * memory op
  1211. *
  1212. * synchronization:
  1213. * - cmpxchg
  1214. * - fetchadd
  1215. * - xchg
  1216. * Reason: ATOMIC operations cannot be emulated properly using multiple
  1217. * instructions.
  1218. *
  1219. * speculative loads:
  1220. * - ldX.sZ
  1221. * Reason: side effects, code must be ready to deal with failure so simpler
  1222. * to let the load fail.
  1223. * ---------------------------------------------------------------------------------
  1224. * XXX fixme
  1225. *
  1226. * I would like to get rid of this switch case and do something
  1227. * more elegant.
  1228. */
  1229. switch (opcode) {
  1230. case LDS_OP:
  1231. case LDSA_OP:
  1232. if (u.insn.x)
  1233. /* oops, really a semaphore op (cmpxchg, etc) */
  1234. goto failure;
  1235. /* no break */
  1236. case LDS_IMM_OP:
  1237. case LDSA_IMM_OP:
  1238. case LDFS_OP:
  1239. case LDFSA_OP:
  1240. case LDFS_IMM_OP:
  1241. /*
  1242. * The instruction will be retried with deferred exceptions turned on, and
  1243. * we should get Nat bit installed
  1244. *
  1245. * IMPORTANT: When PSR_ED is set, the register & immediate update forms
  1246. * are actually executed even though the operation failed. So we don't
  1247. * need to take care of this.
  1248. */
  1249. DPRINT("forcing PSR_ED\n");
  1250. regs->cr_ipsr |= IA64_PSR_ED;
  1251. goto done;
  1252. case LD_OP:
  1253. case LDA_OP:
  1254. case LDBIAS_OP:
  1255. case LDACQ_OP:
  1256. case LDCCLR_OP:
  1257. case LDCNC_OP:
  1258. case LDCCLRACQ_OP:
  1259. if (u.insn.x)
  1260. /* oops, really a semaphore op (cmpxchg, etc) */
  1261. goto failure;
  1262. /* no break */
  1263. case LD_IMM_OP:
  1264. case LDA_IMM_OP:
  1265. case LDBIAS_IMM_OP:
  1266. case LDACQ_IMM_OP:
  1267. case LDCCLR_IMM_OP:
  1268. case LDCNC_IMM_OP:
  1269. case LDCCLRACQ_IMM_OP:
  1270. ret = emulate_load_int(ifa, u.insn, regs);
  1271. break;
  1272. case ST_OP:
  1273. case STREL_OP:
  1274. if (u.insn.x)
  1275. /* oops, really a semaphore op (cmpxchg, etc) */
  1276. goto failure;
  1277. /* no break */
  1278. case ST_IMM_OP:
  1279. case STREL_IMM_OP:
  1280. ret = emulate_store_int(ifa, u.insn, regs);
  1281. break;
  1282. case LDF_OP:
  1283. case LDFA_OP:
  1284. case LDFCCLR_OP:
  1285. case LDFCNC_OP:
  1286. case LDF_IMM_OP:
  1287. case LDFA_IMM_OP:
  1288. case LDFCCLR_IMM_OP:
  1289. case LDFCNC_IMM_OP:
  1290. if (u.insn.x)
  1291. ret = emulate_load_floatpair(ifa, u.insn, regs);
  1292. else
  1293. ret = emulate_load_float(ifa, u.insn, regs);
  1294. break;
  1295. case STF_OP:
  1296. case STF_IMM_OP:
  1297. ret = emulate_store_float(ifa, u.insn, regs);
  1298. break;
  1299. default:
  1300. goto failure;
  1301. }
  1302. DPRINT("ret=%d\n", ret);
  1303. if (ret)
  1304. goto failure;
  1305. if (ipsr->ri == 2)
  1306. /*
  1307. * given today's architecture this case is not likely to happen because a
  1308. * memory access instruction (M) can never be in the last slot of a
  1309. * bundle. But let's keep it for now.
  1310. */
  1311. regs->cr_iip += 16;
  1312. ipsr->ri = (ipsr->ri + 1) & 0x3;
  1313. DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
  1314. done:
  1315. set_fs(old_fs); /* restore original address limit */
  1316. return;
  1317. failure:
  1318. /* something went wrong... */
  1319. if (!user_mode(regs)) {
  1320. if (eh) {
  1321. ia64_handle_exception(regs, eh);
  1322. goto done;
  1323. }
  1324. die_if_kernel("error during unaligned kernel access\n", regs, ret);
  1325. /* NOT_REACHED */
  1326. }
  1327. force_sigbus:
  1328. si.si_signo = SIGBUS;
  1329. si.si_errno = 0;
  1330. si.si_code = BUS_ADRALN;
  1331. si.si_addr = (void __user *) ifa;
  1332. si.si_flags = 0;
  1333. si.si_isr = 0;
  1334. si.si_imm = 0;
  1335. force_sig_info(SIGBUS, &si, current);
  1336. goto done;
  1337. }