unaligned_64.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * unaligned.c: Unaligned load/store trap handling with special
  3. * cases for the kernel to do them more quickly.
  4. *
  5. * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
  6. * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7. */
  8. #include <linux/jiffies.h>
  9. #include <linux/kernel.h>
  10. #include <linux/sched.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <asm/asi.h>
  14. #include <asm/ptrace.h>
  15. #include <asm/pstate.h>
  16. #include <asm/processor.h>
  17. #include <asm/uaccess.h>
  18. #include <linux/smp.h>
  19. #include <linux/bitops.h>
  20. #include <linux/perf_event.h>
  21. #include <linux/ratelimit.h>
  22. #include <linux/context_tracking.h>
  23. #include <asm/fpumacro.h>
  24. #include <asm/cacheflush.h>
  25. #include "entry.h"
  26. enum direction {
  27. load, /* ld, ldd, ldh, ldsh */
  28. store, /* st, std, sth, stsh */
  29. both, /* Swap, ldstub, cas, ... */
  30. fpld,
  31. fpst,
  32. invalid,
  33. };
  34. static inline enum direction decode_direction(unsigned int insn)
  35. {
  36. unsigned long tmp = (insn >> 21) & 1;
  37. if (!tmp)
  38. return load;
  39. else {
  40. switch ((insn>>19)&0xf) {
  41. case 15: /* swap* */
  42. return both;
  43. default:
  44. return store;
  45. }
  46. }
  47. }
  48. /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
  49. static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
  50. {
  51. unsigned int tmp;
  52. tmp = ((insn >> 19) & 0xf);
  53. if (tmp == 11 || tmp == 14) /* ldx/stx */
  54. return 8;
  55. tmp &= 3;
  56. if (!tmp)
  57. return 4;
  58. else if (tmp == 3)
  59. return 16; /* ldd/std - Although it is actually 8 */
  60. else if (tmp == 2)
  61. return 2;
  62. else {
  63. printk("Impossible unaligned trap. insn=%08x\n", insn);
  64. die_if_kernel("Byte sized unaligned access?!?!", regs);
  65. /* GCC should never warn that control reaches the end
  66. * of this function without returning a value because
  67. * die_if_kernel() is marked with attribute 'noreturn'.
  68. * Alas, some versions do...
  69. */
  70. return 0;
  71. }
  72. }
  73. static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
  74. {
  75. if (insn & 0x800000) {
  76. if (insn & 0x2000)
  77. return (unsigned char)(regs->tstate >> 24); /* %asi */
  78. else
  79. return (unsigned char)(insn >> 5); /* imm_asi */
  80. } else
  81. return ASI_P;
  82. }
  83. /* 0x400000 = signed, 0 = unsigned */
  84. static inline int decode_signedness(unsigned int insn)
  85. {
  86. return (insn & 0x400000);
  87. }
  88. static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
  89. unsigned int rd, int from_kernel)
  90. {
  91. if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
  92. if (from_kernel != 0)
  93. __asm__ __volatile__("flushw");
  94. else
  95. flushw_user();
  96. }
  97. }
  98. static inline long sign_extend_imm13(long imm)
  99. {
  100. return imm << 51 >> 51;
  101. }
  102. static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
  103. {
  104. unsigned long value, fp;
  105. if (reg < 16)
  106. return (!reg ? 0 : regs->u_regs[reg]);
  107. fp = regs->u_regs[UREG_FP];
  108. if (regs->tstate & TSTATE_PRIV) {
  109. struct reg_window *win;
  110. win = (struct reg_window *)(fp + STACK_BIAS);
  111. value = win->locals[reg - 16];
  112. } else if (!test_thread_64bit_stack(fp)) {
  113. struct reg_window32 __user *win32;
  114. win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
  115. get_user(value, &win32->locals[reg - 16]);
  116. } else {
  117. struct reg_window __user *win;
  118. win = (struct reg_window __user *)(fp + STACK_BIAS);
  119. get_user(value, &win->locals[reg - 16]);
  120. }
  121. return value;
  122. }
  123. static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
  124. {
  125. unsigned long fp;
  126. if (reg < 16)
  127. return &regs->u_regs[reg];
  128. fp = regs->u_regs[UREG_FP];
  129. if (regs->tstate & TSTATE_PRIV) {
  130. struct reg_window *win;
  131. win = (struct reg_window *)(fp + STACK_BIAS);
  132. return &win->locals[reg - 16];
  133. } else if (!test_thread_64bit_stack(fp)) {
  134. struct reg_window32 *win32;
  135. win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
  136. return (unsigned long *)&win32->locals[reg - 16];
  137. } else {
  138. struct reg_window *win;
  139. win = (struct reg_window *)(fp + STACK_BIAS);
  140. return &win->locals[reg - 16];
  141. }
  142. }
  143. unsigned long compute_effective_address(struct pt_regs *regs,
  144. unsigned int insn, unsigned int rd)
  145. {
  146. unsigned int rs1 = (insn >> 14) & 0x1f;
  147. unsigned int rs2 = insn & 0x1f;
  148. int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
  149. if (insn & 0x2000) {
  150. maybe_flush_windows(rs1, 0, rd, from_kernel);
  151. return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
  152. } else {
  153. maybe_flush_windows(rs1, rs2, rd, from_kernel);
  154. return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
  155. }
  156. }
  157. /* This is just to make gcc think die_if_kernel does return... */
  158. static void __used unaligned_panic(char *str, struct pt_regs *regs)
  159. {
  160. die_if_kernel(str, regs);
  161. }
  162. extern int do_int_load(unsigned long *dest_reg, int size,
  163. unsigned long *saddr, int is_signed, int asi);
  164. extern int __do_int_store(unsigned long *dst_addr, int size,
  165. unsigned long src_val, int asi);
  166. static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
  167. struct pt_regs *regs, int asi, int orig_asi)
  168. {
  169. unsigned long zero = 0;
  170. unsigned long *src_val_p = &zero;
  171. unsigned long src_val;
  172. if (size == 16) {
  173. size = 8;
  174. zero = (((long)(reg_num ?
  175. (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
  176. (unsigned)fetch_reg(reg_num + 1, regs);
  177. } else if (reg_num) {
  178. src_val_p = fetch_reg_addr(reg_num, regs);
  179. }
  180. src_val = *src_val_p;
  181. if (unlikely(asi != orig_asi)) {
  182. switch (size) {
  183. case 2:
  184. src_val = swab16(src_val);
  185. break;
  186. case 4:
  187. src_val = swab32(src_val);
  188. break;
  189. case 8:
  190. src_val = swab64(src_val);
  191. break;
  192. case 16:
  193. default:
  194. BUG();
  195. break;
  196. }
  197. }
  198. return __do_int_store(dst_addr, size, src_val, asi);
  199. }
  200. static inline void advance(struct pt_regs *regs)
  201. {
  202. regs->tpc = regs->tnpc;
  203. regs->tnpc += 4;
  204. if (test_thread_flag(TIF_32BIT)) {
  205. regs->tpc &= 0xffffffff;
  206. regs->tnpc &= 0xffffffff;
  207. }
  208. }
  209. static inline int floating_point_load_or_store_p(unsigned int insn)
  210. {
  211. return (insn >> 24) & 1;
  212. }
  213. static inline int ok_for_kernel(unsigned int insn)
  214. {
  215. return !floating_point_load_or_store_p(insn);
  216. }
  217. static void kernel_mna_trap_fault(int fixup_tstate_asi)
  218. {
  219. struct pt_regs *regs = current_thread_info()->kern_una_regs;
  220. unsigned int insn = current_thread_info()->kern_una_insn;
  221. const struct exception_table_entry *entry;
  222. entry = search_exception_tables(regs->tpc);
  223. if (!entry) {
  224. unsigned long address;
  225. address = compute_effective_address(regs, insn,
  226. ((insn >> 25) & 0x1f));
  227. if (address < PAGE_SIZE) {
  228. printk(KERN_ALERT "Unable to handle kernel NULL "
  229. "pointer dereference in mna handler");
  230. } else
  231. printk(KERN_ALERT "Unable to handle kernel paging "
  232. "request in mna handler");
  233. printk(KERN_ALERT " at virtual address %016lx\n",address);
  234. printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
  235. (current->mm ? CTX_HWBITS(current->mm->context) :
  236. CTX_HWBITS(current->active_mm->context)));
  237. printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
  238. (current->mm ? (unsigned long) current->mm->pgd :
  239. (unsigned long) current->active_mm->pgd));
  240. die_if_kernel("Oops", regs);
  241. /* Not reached */
  242. }
  243. regs->tpc = entry->fixup;
  244. regs->tnpc = regs->tpc + 4;
  245. if (fixup_tstate_asi) {
  246. regs->tstate &= ~TSTATE_ASI;
  247. regs->tstate |= (ASI_AIUS << 24UL);
  248. }
  249. }
  250. static void log_unaligned(struct pt_regs *regs)
  251. {
  252. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
  253. if (__ratelimit(&ratelimit)) {
  254. printk("Kernel unaligned access at TPC[%lx] %pS\n",
  255. regs->tpc, (void *) regs->tpc);
  256. }
  257. }
  258. asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
  259. {
  260. enum direction dir = decode_direction(insn);
  261. int size = decode_access_size(regs, insn);
  262. int orig_asi, asi;
  263. current_thread_info()->kern_una_regs = regs;
  264. current_thread_info()->kern_una_insn = insn;
  265. orig_asi = asi = decode_asi(insn, regs);
  266. /* If this is a {get,put}_user() on an unaligned userspace pointer,
  267. * just signal a fault and do not log the event.
  268. */
  269. if (asi == ASI_AIUS) {
  270. kernel_mna_trap_fault(0);
  271. return;
  272. }
  273. log_unaligned(regs);
  274. if (!ok_for_kernel(insn) || dir == both) {
  275. printk("Unsupported unaligned load/store trap for kernel "
  276. "at <%016lx>.\n", regs->tpc);
  277. unaligned_panic("Kernel does fpu/atomic "
  278. "unaligned load/store.", regs);
  279. kernel_mna_trap_fault(0);
  280. } else {
  281. unsigned long addr, *reg_addr;
  282. int err;
  283. addr = compute_effective_address(regs, insn,
  284. ((insn >> 25) & 0x1f));
  285. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
  286. switch (asi) {
  287. case ASI_NL:
  288. case ASI_AIUPL:
  289. case ASI_AIUSL:
  290. case ASI_PL:
  291. case ASI_SL:
  292. case ASI_PNFL:
  293. case ASI_SNFL:
  294. asi &= ~0x08;
  295. break;
  296. }
  297. switch (dir) {
  298. case load:
  299. reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
  300. err = do_int_load(reg_addr, size,
  301. (unsigned long *) addr,
  302. decode_signedness(insn), asi);
  303. if (likely(!err) && unlikely(asi != orig_asi)) {
  304. unsigned long val_in = *reg_addr;
  305. switch (size) {
  306. case 2:
  307. val_in = swab16(val_in);
  308. break;
  309. case 4:
  310. val_in = swab32(val_in);
  311. break;
  312. case 8:
  313. val_in = swab64(val_in);
  314. break;
  315. case 16:
  316. default:
  317. BUG();
  318. break;
  319. }
  320. *reg_addr = val_in;
  321. }
  322. break;
  323. case store:
  324. err = do_int_store(((insn>>25)&0x1f), size,
  325. (unsigned long *) addr, regs,
  326. asi, orig_asi);
  327. break;
  328. default:
  329. panic("Impossible kernel unaligned trap.");
  330. /* Not reached... */
  331. }
  332. if (unlikely(err))
  333. kernel_mna_trap_fault(1);
  334. else
  335. advance(regs);
  336. }
  337. }
  338. int handle_popc(u32 insn, struct pt_regs *regs)
  339. {
  340. int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
  341. int ret, rd = ((insn >> 25) & 0x1f);
  342. u64 value;
  343. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  344. if (insn & 0x2000) {
  345. maybe_flush_windows(0, 0, rd, from_kernel);
  346. value = sign_extend_imm13(insn);
  347. } else {
  348. maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
  349. value = fetch_reg(insn & 0x1f, regs);
  350. }
  351. ret = hweight64(value);
  352. if (rd < 16) {
  353. if (rd)
  354. regs->u_regs[rd] = ret;
  355. } else {
  356. unsigned long fp = regs->u_regs[UREG_FP];
  357. if (!test_thread_64bit_stack(fp)) {
  358. struct reg_window32 __user *win32;
  359. win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
  360. put_user(ret, &win32->locals[rd - 16]);
  361. } else {
  362. struct reg_window __user *win;
  363. win = (struct reg_window __user *)(fp + STACK_BIAS);
  364. put_user(ret, &win->locals[rd - 16]);
  365. }
  366. }
  367. advance(regs);
  368. return 1;
  369. }
  370. extern void do_fpother(struct pt_regs *regs);
  371. extern void do_privact(struct pt_regs *regs);
  372. extern void sun4v_data_access_exception(struct pt_regs *regs,
  373. unsigned long addr,
  374. unsigned long type_ctx);
  375. int handle_ldf_stq(u32 insn, struct pt_regs *regs)
  376. {
  377. unsigned long addr = compute_effective_address(regs, insn, 0);
  378. int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
  379. struct fpustate *f = FPUSTATE;
  380. int asi = decode_asi(insn, regs);
  381. int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
  382. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  383. save_and_clear_fpu();
  384. current_thread_info()->xfsr[0] &= ~0x1c000;
  385. if (freg & 3) {
  386. current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
  387. do_fpother(regs);
  388. return 0;
  389. }
  390. if (insn & 0x200000) {
  391. /* STQ */
  392. u64 first = 0, second = 0;
  393. if (current_thread_info()->fpsaved[0] & flag) {
  394. first = *(u64 *)&f->regs[freg];
  395. second = *(u64 *)&f->regs[freg+2];
  396. }
  397. if (asi < 0x80) {
  398. do_privact(regs);
  399. return 1;
  400. }
  401. switch (asi) {
  402. case ASI_P:
  403. case ASI_S: break;
  404. case ASI_PL:
  405. case ASI_SL:
  406. {
  407. /* Need to convert endians */
  408. u64 tmp = __swab64p(&first);
  409. first = __swab64p(&second);
  410. second = tmp;
  411. break;
  412. }
  413. default:
  414. if (tlb_type == hypervisor)
  415. sun4v_data_access_exception(regs, addr, 0);
  416. else
  417. spitfire_data_access_exception(regs, 0, addr);
  418. return 1;
  419. }
  420. if (put_user (first >> 32, (u32 __user *)addr) ||
  421. __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
  422. __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
  423. __put_user ((u32)second, (u32 __user *)(addr + 12))) {
  424. if (tlb_type == hypervisor)
  425. sun4v_data_access_exception(regs, addr, 0);
  426. else
  427. spitfire_data_access_exception(regs, 0, addr);
  428. return 1;
  429. }
  430. } else {
  431. /* LDF, LDDF, LDQF */
  432. u32 data[4] __attribute__ ((aligned(8)));
  433. int size, i;
  434. int err;
  435. if (asi < 0x80) {
  436. do_privact(regs);
  437. return 1;
  438. } else if (asi > ASI_SNFL) {
  439. if (tlb_type == hypervisor)
  440. sun4v_data_access_exception(regs, addr, 0);
  441. else
  442. spitfire_data_access_exception(regs, 0, addr);
  443. return 1;
  444. }
  445. switch (insn & 0x180000) {
  446. case 0x000000: size = 1; break;
  447. case 0x100000: size = 4; break;
  448. default: size = 2; break;
  449. }
  450. for (i = 0; i < size; i++)
  451. data[i] = 0;
  452. err = get_user (data[0], (u32 __user *) addr);
  453. if (!err) {
  454. for (i = 1; i < size; i++)
  455. err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
  456. }
  457. if (err && !(asi & 0x2 /* NF */)) {
  458. if (tlb_type == hypervisor)
  459. sun4v_data_access_exception(regs, addr, 0);
  460. else
  461. spitfire_data_access_exception(regs, 0, addr);
  462. return 1;
  463. }
  464. if (asi & 0x8) /* Little */ {
  465. u64 tmp;
  466. switch (size) {
  467. case 1: data[0] = le32_to_cpup(data + 0); break;
  468. default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
  469. break;
  470. case 4: tmp = le64_to_cpup((u64 *)(data + 0));
  471. *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
  472. *(u64 *)(data + 2) = tmp;
  473. break;
  474. }
  475. }
  476. if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
  477. current_thread_info()->fpsaved[0] = FPRS_FEF;
  478. current_thread_info()->gsr[0] = 0;
  479. }
  480. if (!(current_thread_info()->fpsaved[0] & flag)) {
  481. if (freg < 32)
  482. memset(f->regs, 0, 32*sizeof(u32));
  483. else
  484. memset(f->regs+32, 0, 32*sizeof(u32));
  485. }
  486. memcpy(f->regs + freg, data, size * 4);
  487. current_thread_info()->fpsaved[0] |= flag;
  488. }
  489. advance(regs);
  490. return 1;
  491. }
  492. void handle_ld_nf(u32 insn, struct pt_regs *regs)
  493. {
  494. int rd = ((insn >> 25) & 0x1f);
  495. int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
  496. unsigned long *reg;
  497. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  498. maybe_flush_windows(0, 0, rd, from_kernel);
  499. reg = fetch_reg_addr(rd, regs);
  500. if (from_kernel || rd < 16) {
  501. reg[0] = 0;
  502. if ((insn & 0x780000) == 0x180000)
  503. reg[1] = 0;
  504. } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
  505. put_user(0, (int __user *) reg);
  506. if ((insn & 0x780000) == 0x180000)
  507. put_user(0, ((int __user *) reg) + 1);
  508. } else {
  509. put_user(0, (unsigned long __user *) reg);
  510. if ((insn & 0x780000) == 0x180000)
  511. put_user(0, (unsigned long __user *) reg + 1);
  512. }
  513. advance(regs);
  514. }
  515. void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
  516. {
  517. enum ctx_state prev_state = exception_enter();
  518. unsigned long pc = regs->tpc;
  519. unsigned long tstate = regs->tstate;
  520. u32 insn;
  521. u64 value;
  522. u8 freg;
  523. int flag;
  524. struct fpustate *f = FPUSTATE;
  525. if (tstate & TSTATE_PRIV)
  526. die_if_kernel("lddfmna from kernel", regs);
  527. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
  528. if (test_thread_flag(TIF_32BIT))
  529. pc = (u32)pc;
  530. if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
  531. int asi = decode_asi(insn, regs);
  532. u32 first, second;
  533. int err;
  534. if ((asi > ASI_SNFL) ||
  535. (asi < ASI_P))
  536. goto daex;
  537. first = second = 0;
  538. err = get_user(first, (u32 __user *)sfar);
  539. if (!err)
  540. err = get_user(second, (u32 __user *)(sfar + 4));
  541. if (err) {
  542. if (!(asi & 0x2))
  543. goto daex;
  544. first = second = 0;
  545. }
  546. save_and_clear_fpu();
  547. freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
  548. value = (((u64)first) << 32) | second;
  549. if (asi & 0x8) /* Little */
  550. value = __swab64p(&value);
  551. flag = (freg < 32) ? FPRS_DL : FPRS_DU;
  552. if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
  553. current_thread_info()->fpsaved[0] = FPRS_FEF;
  554. current_thread_info()->gsr[0] = 0;
  555. }
  556. if (!(current_thread_info()->fpsaved[0] & flag)) {
  557. if (freg < 32)
  558. memset(f->regs, 0, 32*sizeof(u32));
  559. else
  560. memset(f->regs+32, 0, 32*sizeof(u32));
  561. }
  562. *(u64 *)(f->regs + freg) = value;
  563. current_thread_info()->fpsaved[0] |= flag;
  564. } else {
  565. daex:
  566. if (tlb_type == hypervisor)
  567. sun4v_data_access_exception(regs, sfar, sfsr);
  568. else
  569. spitfire_data_access_exception(regs, sfsr, sfar);
  570. goto out;
  571. }
  572. advance(regs);
  573. out:
  574. exception_exit(prev_state);
  575. }
  576. void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
  577. {
  578. enum ctx_state prev_state = exception_enter();
  579. unsigned long pc = regs->tpc;
  580. unsigned long tstate = regs->tstate;
  581. u32 insn;
  582. u64 value;
  583. u8 freg;
  584. int flag;
  585. struct fpustate *f = FPUSTATE;
  586. if (tstate & TSTATE_PRIV)
  587. die_if_kernel("stdfmna from kernel", regs);
  588. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
  589. if (test_thread_flag(TIF_32BIT))
  590. pc = (u32)pc;
  591. if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
  592. int asi = decode_asi(insn, regs);
  593. freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
  594. value = 0;
  595. flag = (freg < 32) ? FPRS_DL : FPRS_DU;
  596. if ((asi > ASI_SNFL) ||
  597. (asi < ASI_P))
  598. goto daex;
  599. save_and_clear_fpu();
  600. if (current_thread_info()->fpsaved[0] & flag)
  601. value = *(u64 *)&f->regs[freg];
  602. switch (asi) {
  603. case ASI_P:
  604. case ASI_S: break;
  605. case ASI_PL:
  606. case ASI_SL:
  607. value = __swab64p(&value); break;
  608. default: goto daex;
  609. }
  610. if (put_user (value >> 32, (u32 __user *) sfar) ||
  611. __put_user ((u32)value, (u32 __user *)(sfar + 4)))
  612. goto daex;
  613. } else {
  614. daex:
  615. if (tlb_type == hypervisor)
  616. sun4v_data_access_exception(regs, sfar, sfsr);
  617. else
  618. spitfire_data_access_exception(regs, sfsr, sfar);
  619. goto out;
  620. }
  621. advance(regs);
  622. out:
  623. exception_exit(prev_state);
  624. }