unaligned_32.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /*
  2. * unaligned.c: Unaligned load/store trap handling with special
  3. * cases for the kernel to do them more quickly.
  4. *
  5. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6. * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/sched.h>
  10. #include <linux/mm.h>
  11. #include <linux/module.h>
  12. #include <asm/ptrace.h>
  13. #include <asm/processor.h>
  14. #include <asm/system.h>
  15. #include <asm/uaccess.h>
  16. #include <linux/smp.h>
  17. #include <linux/smp_lock.h>
  18. /* #define DEBUG_MNA */
  19. enum direction {
  20. load, /* ld, ldd, ldh, ldsh */
  21. store, /* st, std, sth, stsh */
  22. both, /* Swap, ldstub, etc. */
  23. fpload,
  24. fpstore,
  25. invalid,
  26. };
  27. #ifdef DEBUG_MNA
  28. static char *dirstrings[] = {
  29. "load", "store", "both", "fpload", "fpstore", "invalid"
  30. };
  31. #endif
  32. static inline enum direction decode_direction(unsigned int insn)
  33. {
  34. unsigned long tmp = (insn >> 21) & 1;
  35. if(!tmp)
  36. return load;
  37. else {
  38. if(((insn>>19)&0x3f) == 15)
  39. return both;
  40. else
  41. return store;
  42. }
  43. }
  44. /* 8 = double-word, 4 = word, 2 = half-word */
  45. static inline int decode_access_size(unsigned int insn)
  46. {
  47. insn = (insn >> 19) & 3;
  48. if(!insn)
  49. return 4;
  50. else if(insn == 3)
  51. return 8;
  52. else if(insn == 2)
  53. return 2;
  54. else {
  55. printk("Impossible unaligned trap. insn=%08x\n", insn);
  56. die_if_kernel("Byte sized unaligned access?!?!", current->thread.kregs);
  57. return 4; /* just to keep gcc happy. */
  58. }
  59. }
  60. /* 0x400000 = signed, 0 = unsigned */
  61. static inline int decode_signedness(unsigned int insn)
  62. {
  63. return (insn & 0x400000);
  64. }
  65. static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
  66. unsigned int rd)
  67. {
  68. if(rs2 >= 16 || rs1 >= 16 || rd >= 16) {
  69. /* Wheee... */
  70. __asm__ __volatile__("save %sp, -0x40, %sp\n\t"
  71. "save %sp, -0x40, %sp\n\t"
  72. "save %sp, -0x40, %sp\n\t"
  73. "save %sp, -0x40, %sp\n\t"
  74. "save %sp, -0x40, %sp\n\t"
  75. "save %sp, -0x40, %sp\n\t"
  76. "save %sp, -0x40, %sp\n\t"
  77. "restore; restore; restore; restore;\n\t"
  78. "restore; restore; restore;\n\t");
  79. }
  80. }
  81. static inline int sign_extend_imm13(int imm)
  82. {
  83. return imm << 19 >> 19;
  84. }
  85. static inline unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
  86. {
  87. struct reg_window32 *win;
  88. if(reg < 16)
  89. return (!reg ? 0 : regs->u_regs[reg]);
  90. /* Ho hum, the slightly complicated case. */
  91. win = (struct reg_window32 *) regs->u_regs[UREG_FP];
  92. return win->locals[reg - 16]; /* yes, I know what this does... */
  93. }
  94. static inline unsigned long safe_fetch_reg(unsigned int reg, struct pt_regs *regs)
  95. {
  96. struct reg_window32 __user *win;
  97. unsigned long ret;
  98. if (reg < 16)
  99. return (!reg ? 0 : regs->u_regs[reg]);
  100. /* Ho hum, the slightly complicated case. */
  101. win = (struct reg_window32 __user *) regs->u_regs[UREG_FP];
  102. if ((unsigned long)win & 3)
  103. return -1;
  104. if (get_user(ret, &win->locals[reg - 16]))
  105. return -1;
  106. return ret;
  107. }
  108. static inline unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
  109. {
  110. struct reg_window32 *win;
  111. if(reg < 16)
  112. return &regs->u_regs[reg];
  113. win = (struct reg_window32 *) regs->u_regs[UREG_FP];
  114. return &win->locals[reg - 16];
  115. }
  116. static unsigned long compute_effective_address(struct pt_regs *regs,
  117. unsigned int insn)
  118. {
  119. unsigned int rs1 = (insn >> 14) & 0x1f;
  120. unsigned int rs2 = insn & 0x1f;
  121. unsigned int rd = (insn >> 25) & 0x1f;
  122. if(insn & 0x2000) {
  123. maybe_flush_windows(rs1, 0, rd);
  124. return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
  125. } else {
  126. maybe_flush_windows(rs1, rs2, rd);
  127. return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
  128. }
  129. }
  130. unsigned long safe_compute_effective_address(struct pt_regs *regs,
  131. unsigned int insn)
  132. {
  133. unsigned int rs1 = (insn >> 14) & 0x1f;
  134. unsigned int rs2 = insn & 0x1f;
  135. unsigned int rd = (insn >> 25) & 0x1f;
  136. if(insn & 0x2000) {
  137. maybe_flush_windows(rs1, 0, rd);
  138. return (safe_fetch_reg(rs1, regs) + sign_extend_imm13(insn));
  139. } else {
  140. maybe_flush_windows(rs1, rs2, rd);
  141. return (safe_fetch_reg(rs1, regs) + safe_fetch_reg(rs2, regs));
  142. }
  143. }
  144. /* This is just to make gcc think panic does return... */
  145. static void unaligned_panic(char *str)
  146. {
  147. panic(str);
  148. }
  149. /* una_asm.S */
  150. extern int do_int_load(unsigned long *dest_reg, int size,
  151. unsigned long *saddr, int is_signed);
  152. extern int __do_int_store(unsigned long *dst_addr, int size,
  153. unsigned long *src_val);
  154. static int do_int_store(int reg_num, int size, unsigned long *dst_addr,
  155. struct pt_regs *regs)
  156. {
  157. unsigned long zero[2] = { 0, 0 };
  158. unsigned long *src_val;
  159. if (reg_num)
  160. src_val = fetch_reg_addr(reg_num, regs);
  161. else {
  162. src_val = &zero[0];
  163. if (size == 8)
  164. zero[1] = fetch_reg(1, regs);
  165. }
  166. return __do_int_store(dst_addr, size, src_val);
  167. }
  168. extern void smp_capture(void);
  169. extern void smp_release(void);
  170. static inline void advance(struct pt_regs *regs)
  171. {
  172. regs->pc = regs->npc;
  173. regs->npc += 4;
  174. }
  175. static inline int floating_point_load_or_store_p(unsigned int insn)
  176. {
  177. return (insn >> 24) & 1;
  178. }
  179. static inline int ok_for_kernel(unsigned int insn)
  180. {
  181. return !floating_point_load_or_store_p(insn);
  182. }
  183. static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
  184. {
  185. unsigned long g2 = regs->u_regs [UREG_G2];
  186. unsigned long fixup = search_extables_range(regs->pc, &g2);
  187. if (!fixup) {
  188. unsigned long address = compute_effective_address(regs, insn);
  189. if(address < PAGE_SIZE) {
  190. printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
  191. } else
  192. printk(KERN_ALERT "Unable to handle kernel paging request in mna handler");
  193. printk(KERN_ALERT " at virtual address %08lx\n",address);
  194. printk(KERN_ALERT "current->{mm,active_mm}->context = %08lx\n",
  195. (current->mm ? current->mm->context :
  196. current->active_mm->context));
  197. printk(KERN_ALERT "current->{mm,active_mm}->pgd = %08lx\n",
  198. (current->mm ? (unsigned long) current->mm->pgd :
  199. (unsigned long) current->active_mm->pgd));
  200. die_if_kernel("Oops", regs);
  201. /* Not reached */
  202. }
  203. regs->pc = fixup;
  204. regs->npc = regs->pc + 4;
  205. regs->u_regs [UREG_G2] = g2;
  206. }
  207. asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
  208. {
  209. enum direction dir = decode_direction(insn);
  210. int size = decode_access_size(insn);
  211. if(!ok_for_kernel(insn) || dir == both) {
  212. printk("Unsupported unaligned load/store trap for kernel at <%08lx>.\n",
  213. regs->pc);
  214. unaligned_panic("Wheee. Kernel does fpu/atomic unaligned load/store.");
  215. } else {
  216. unsigned long addr = compute_effective_address(regs, insn);
  217. int err;
  218. #ifdef DEBUG_MNA
  219. printk("KMNA: pc=%08lx [dir=%s addr=%08lx size=%d] retpc[%08lx]\n",
  220. regs->pc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]);
  221. #endif
  222. switch (dir) {
  223. case load:
  224. err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
  225. regs),
  226. size, (unsigned long *) addr,
  227. decode_signedness(insn));
  228. break;
  229. case store:
  230. err = do_int_store(((insn>>25)&0x1f), size,
  231. (unsigned long *) addr, regs);
  232. break;
  233. default:
  234. panic("Impossible kernel unaligned trap.");
  235. /* Not reached... */
  236. }
  237. if (err)
  238. kernel_mna_trap_fault(regs, insn);
  239. else
  240. advance(regs);
  241. }
  242. }
  243. static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
  244. enum direction dir)
  245. {
  246. unsigned int reg;
  247. int check = (dir == load) ? VERIFY_READ : VERIFY_WRITE;
  248. int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
  249. if ((regs->pc | regs->npc) & 3)
  250. return 0;
  251. /* Must access_ok() in all the necessary places. */
  252. #define WINREG_ADDR(regnum) \
  253. ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
  254. reg = (insn >> 25) & 0x1f;
  255. if (reg >= 16) {
  256. if (!access_ok(check, WINREG_ADDR(reg - 16), size))
  257. return -EFAULT;
  258. }
  259. reg = (insn >> 14) & 0x1f;
  260. if (reg >= 16) {
  261. if (!access_ok(check, WINREG_ADDR(reg - 16), size))
  262. return -EFAULT;
  263. }
  264. if (!(insn & 0x2000)) {
  265. reg = (insn & 0x1f);
  266. if (reg >= 16) {
  267. if (!access_ok(check, WINREG_ADDR(reg - 16), size))
  268. return -EFAULT;
  269. }
  270. }
  271. #undef WINREG_ADDR
  272. return 0;
  273. }
  274. static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
  275. {
  276. siginfo_t info;
  277. info.si_signo = SIGBUS;
  278. info.si_errno = 0;
  279. info.si_code = BUS_ADRALN;
  280. info.si_addr = (void __user *)safe_compute_effective_address(regs, insn);
  281. info.si_trapno = 0;
  282. send_sig_info(SIGBUS, &info, current);
  283. }
  284. asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
  285. {
  286. enum direction dir;
  287. lock_kernel();
  288. if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
  289. (((insn >> 30) & 3) != 3))
  290. goto kill_user;
  291. dir = decode_direction(insn);
  292. if(!ok_for_user(regs, insn, dir)) {
  293. goto kill_user;
  294. } else {
  295. int err, size = decode_access_size(insn);
  296. unsigned long addr;
  297. if(floating_point_load_or_store_p(insn)) {
  298. printk("User FPU load/store unaligned unsupported.\n");
  299. goto kill_user;
  300. }
  301. addr = compute_effective_address(regs, insn);
  302. switch(dir) {
  303. case load:
  304. err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
  305. regs),
  306. size, (unsigned long *) addr,
  307. decode_signedness(insn));
  308. break;
  309. case store:
  310. err = do_int_store(((insn>>25)&0x1f), size,
  311. (unsigned long *) addr, regs);
  312. break;
  313. case both:
  314. /*
  315. * This was supported in 2.4. However, we question
  316. * the value of SWAP instruction across word boundaries.
  317. */
  318. printk("Unaligned SWAP unsupported.\n");
  319. err = -EFAULT;
  320. break;
  321. default:
  322. unaligned_panic("Impossible user unaligned trap.");
  323. goto out;
  324. }
  325. if (err)
  326. goto kill_user;
  327. else
  328. advance(regs);
  329. goto out;
  330. }
  331. kill_user:
  332. user_mna_trap_fault(regs, insn);
  333. out:
  334. unlock_kernel();
  335. }