single_step.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that enables instruction single-stepping.
  15. * Derived from iLib's single-stepping code.
  16. */
  17. #ifndef __tilegx__ /* Hardware support for single step unavailable. */
  18. /* These functions are only used on the TILE platform */
  19. #include <linux/slab.h>
  20. #include <linux/thread_info.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/mman.h>
  23. #include <linux/types.h>
  24. #include <linux/err.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/unaligned.h>
  27. #include <arch/abi.h>
  28. #include <arch/opcode.h>
  29. #define signExtend17(val) sign_extend((val), 17)
  30. #define TILE_X1_MASK (0xffffffffULL << 31)
  31. int unaligned_printk;
  32. static int __init setup_unaligned_printk(char *str)
  33. {
  34. long val;
  35. if (strict_strtol(str, 0, &val) != 0)
  36. return 0;
  37. unaligned_printk = val;
  38. pr_info("Printk for each unaligned data accesses is %s\n",
  39. unaligned_printk ? "enabled" : "disabled");
  40. return 1;
  41. }
  42. __setup("unaligned_printk=", setup_unaligned_printk);
  43. unsigned int unaligned_fixup_count;
  44. enum mem_op {
  45. MEMOP_NONE,
  46. MEMOP_LOAD,
  47. MEMOP_STORE,
  48. MEMOP_LOAD_POSTINCR,
  49. MEMOP_STORE_POSTINCR
  50. };
  51. static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
  52. {
  53. tile_bundle_bits result;
  54. /* mask out the old offset */
  55. tile_bundle_bits mask = create_BrOff_X1(-1);
  56. result = n & (~mask);
  57. /* or in the new offset */
  58. result |= create_BrOff_X1(offset);
  59. return result;
  60. }
  61. static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
  62. {
  63. tile_bundle_bits result;
  64. tile_bundle_bits op;
  65. result = n & (~TILE_X1_MASK);
  66. op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
  67. create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
  68. create_Dest_X1(dest) |
  69. create_SrcB_X1(TREG_ZERO) |
  70. create_SrcA_X1(src) ;
  71. result |= op;
  72. return result;
  73. }
  74. static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
  75. {
  76. return move_X1(n, TREG_ZERO, TREG_ZERO);
  77. }
  78. static inline tile_bundle_bits addi_X1(
  79. tile_bundle_bits n, int dest, int src, int imm)
  80. {
  81. n &= ~TILE_X1_MASK;
  82. n |= (create_SrcA_X1(src) |
  83. create_Dest_X1(dest) |
  84. create_Imm8_X1(imm) |
  85. create_S_X1(0) |
  86. create_Opcode_X1(IMM_0_OPCODE_X1) |
  87. create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
  88. return n;
  89. }
  90. static tile_bundle_bits rewrite_load_store_unaligned(
  91. struct single_step_state *state,
  92. tile_bundle_bits bundle,
  93. struct pt_regs *regs,
  94. enum mem_op mem_op,
  95. int size, int sign_ext)
  96. {
  97. unsigned char __user *addr;
  98. int val_reg, addr_reg, err, val;
  99. /* Get address and value registers */
  100. if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
  101. addr_reg = get_SrcA_Y2(bundle);
  102. val_reg = get_SrcBDest_Y2(bundle);
  103. } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  104. addr_reg = get_SrcA_X1(bundle);
  105. val_reg = get_Dest_X1(bundle);
  106. } else {
  107. addr_reg = get_SrcA_X1(bundle);
  108. val_reg = get_SrcB_X1(bundle);
  109. }
  110. /*
  111. * If registers are not GPRs, don't try to handle it.
  112. *
  113. * FIXME: we could handle non-GPR loads by getting the real value
  114. * from memory, writing it to the single step buffer, using a
  115. * temp_reg to hold a pointer to that memory, then executing that
  116. * instruction and resetting temp_reg. For non-GPR stores, it's a
  117. * little trickier; we could use the single step buffer for that
  118. * too, but we'd have to add some more state bits so that we could
  119. * call back in here to copy that value to the real target. For
  120. * now, we just handle the simple case.
  121. */
  122. if ((val_reg >= PTREGS_NR_GPRS &&
  123. (val_reg != TREG_ZERO ||
  124. mem_op == MEMOP_LOAD ||
  125. mem_op == MEMOP_LOAD_POSTINCR)) ||
  126. addr_reg >= PTREGS_NR_GPRS)
  127. return bundle;
  128. /* If it's aligned, don't handle it specially */
  129. addr = (void __user *)regs->regs[addr_reg];
  130. if (((unsigned long)addr % size) == 0)
  131. return bundle;
  132. /*
  133. * Return SIGBUS with the unaligned address, if requested.
  134. * Note that we return SIGBUS even for completely invalid addresses
  135. * as long as they are in fact unaligned; this matches what the
  136. * tilepro hardware would be doing, if it could provide us with the
  137. * actual bad address in an SPR, which it doesn't.
  138. */
  139. if (unaligned_fixup == 0) {
  140. siginfo_t info = {
  141. .si_signo = SIGBUS,
  142. .si_code = BUS_ADRALN,
  143. .si_addr = addr
  144. };
  145. trace_unhandled_signal("unaligned trap", regs,
  146. (unsigned long)addr, SIGBUS);
  147. force_sig_info(info.si_signo, &info, current);
  148. return (tilepro_bundle_bits) 0;
  149. }
  150. /* Handle unaligned load/store */
  151. if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  152. unsigned short val_16;
  153. switch (size) {
  154. case 2:
  155. err = copy_from_user(&val_16, addr, sizeof(val_16));
  156. val = sign_ext ? ((short)val_16) : val_16;
  157. break;
  158. case 4:
  159. err = copy_from_user(&val, addr, sizeof(val));
  160. break;
  161. default:
  162. BUG();
  163. }
  164. if (err == 0) {
  165. state->update_reg = val_reg;
  166. state->update_value = val;
  167. state->update = 1;
  168. }
  169. } else {
  170. unsigned short val_16;
  171. val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
  172. switch (size) {
  173. case 2:
  174. val_16 = val;
  175. err = copy_to_user(addr, &val_16, sizeof(val_16));
  176. break;
  177. case 4:
  178. err = copy_to_user(addr, &val, sizeof(val));
  179. break;
  180. default:
  181. BUG();
  182. }
  183. }
  184. if (err) {
  185. siginfo_t info = {
  186. .si_signo = SIGSEGV,
  187. .si_code = SEGV_MAPERR,
  188. .si_addr = addr
  189. };
  190. trace_unhandled_signal("segfault", regs,
  191. (unsigned long)addr, SIGSEGV);
  192. force_sig_info(info.si_signo, &info, current);
  193. return (tile_bundle_bits) 0;
  194. }
  195. if (unaligned_printk || unaligned_fixup_count == 0) {
  196. pr_info("Process %d/%s: PC %#lx: Fixup of"
  197. " unaligned %s at %#lx.\n",
  198. current->pid, current->comm, regs->pc,
  199. (mem_op == MEMOP_LOAD ||
  200. mem_op == MEMOP_LOAD_POSTINCR) ?
  201. "load" : "store",
  202. (unsigned long)addr);
  203. if (!unaligned_printk) {
  204. #define P pr_info
  205. P("\n");
  206. P("Unaligned fixups in the kernel will slow your application considerably.\n");
  207. P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
  208. P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
  209. P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
  210. P("access will become a SIGBUS you can debug. No further warnings will be\n");
  211. P("shown so as to avoid additional slowdown, but you can track the number\n");
  212. P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
  213. P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
  214. P("\n");
  215. #undef P
  216. }
  217. }
  218. ++unaligned_fixup_count;
  219. if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
  220. /* Convert the Y2 instruction to a prefetch. */
  221. bundle &= ~(create_SrcBDest_Y2(-1) |
  222. create_Opcode_Y2(-1));
  223. bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
  224. create_Opcode_Y2(LW_OPCODE_Y2));
  225. /* Replace the load postincr with an addi */
  226. } else if (mem_op == MEMOP_LOAD_POSTINCR) {
  227. bundle = addi_X1(bundle, addr_reg, addr_reg,
  228. get_Imm8_X1(bundle));
  229. /* Replace the store postincr with an addi */
  230. } else if (mem_op == MEMOP_STORE_POSTINCR) {
  231. bundle = addi_X1(bundle, addr_reg, addr_reg,
  232. get_Dest_Imm8_X1(bundle));
  233. } else {
  234. /* Convert the X1 instruction to a nop. */
  235. bundle &= ~(create_Opcode_X1(-1) |
  236. create_UnShOpcodeExtension_X1(-1) |
  237. create_UnOpcodeExtension_X1(-1));
  238. bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
  239. create_UnShOpcodeExtension_X1(
  240. UN_0_SHUN_0_OPCODE_X1) |
  241. create_UnOpcodeExtension_X1(
  242. NOP_UN_0_SHUN_0_OPCODE_X1));
  243. }
  244. return bundle;
  245. }
  246. /*
  247. * Called after execve() has started the new image. This allows us
  248. * to reset the info state. Note that the the mmap'ed memory, if there
  249. * was any, has already been unmapped by the exec.
  250. */
  251. void single_step_execve(void)
  252. {
  253. struct thread_info *ti = current_thread_info();
  254. kfree(ti->step_state);
  255. ti->step_state = NULL;
  256. }
  257. /**
  258. * single_step_once() - entry point when single stepping has been triggered.
  259. * @regs: The machine register state
  260. *
  261. * When we arrive at this routine via a trampoline, the single step
  262. * engine copies the executing bundle to the single step buffer.
  263. * If the instruction is a condition branch, then the target is
  264. * reset to one past the next instruction. If the instruction
  265. * sets the lr, then that is noted. If the instruction is a jump
  266. * or call, then the new target pc is preserved and the current
  267. * bundle instruction set to null.
  268. *
  269. * The necessary post-single-step rewriting information is stored in
  270. * single_step_state-> We use data segment values because the
  271. * stack will be rewound when we run the rewritten single-stepped
  272. * instruction.
  273. */
  274. void single_step_once(struct pt_regs *regs)
  275. {
  276. extern tile_bundle_bits __single_step_ill_insn;
  277. extern tile_bundle_bits __single_step_j_insn;
  278. extern tile_bundle_bits __single_step_addli_insn;
  279. extern tile_bundle_bits __single_step_auli_insn;
  280. struct thread_info *info = (void *)current_thread_info();
  281. struct single_step_state *state = info->step_state;
  282. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  283. tile_bundle_bits __user *buffer, *pc;
  284. tile_bundle_bits bundle;
  285. int temp_reg;
  286. int target_reg = TREG_LR;
  287. int err;
  288. enum mem_op mem_op = MEMOP_NONE;
  289. int size = 0, sign_ext = 0; /* happy compiler */
  290. asm(
  291. " .pushsection .rodata.single_step\n"
  292. " .align 8\n"
  293. " .globl __single_step_ill_insn\n"
  294. "__single_step_ill_insn:\n"
  295. " ill\n"
  296. " .globl __single_step_addli_insn\n"
  297. "__single_step_addli_insn:\n"
  298. " { nop; addli r0, zero, 0 }\n"
  299. " .globl __single_step_auli_insn\n"
  300. "__single_step_auli_insn:\n"
  301. " { nop; auli r0, r0, 0 }\n"
  302. " .globl __single_step_j_insn\n"
  303. "__single_step_j_insn:\n"
  304. " j .\n"
  305. " .popsection\n"
  306. );
  307. /*
  308. * Enable interrupts here to allow touching userspace and the like.
  309. * The callers expect this: do_trap() already has interrupts
  310. * enabled, and do_work_pending() handles functions that enable
  311. * interrupts internally.
  312. */
  313. local_irq_enable();
  314. if (state == NULL) {
  315. /* allocate a page of writable, executable memory */
  316. state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
  317. if (state == NULL) {
  318. pr_err("Out of kernel memory trying to single-step\n");
  319. return;
  320. }
  321. /* allocate a cache line of writable, executable memory */
  322. buffer = (void __user *) vm_mmap(NULL, 0, 64,
  323. PROT_EXEC | PROT_READ | PROT_WRITE,
  324. MAP_PRIVATE | MAP_ANONYMOUS,
  325. 0);
  326. if (IS_ERR((void __force *)buffer)) {
  327. kfree(state);
  328. pr_err("Out of kernel pages trying to single-step\n");
  329. return;
  330. }
  331. state->buffer = buffer;
  332. state->is_enabled = 0;
  333. info->step_state = state;
  334. /* Validate our stored instruction patterns */
  335. BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
  336. ADDLI_OPCODE_X1);
  337. BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
  338. AULI_OPCODE_X1);
  339. BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
  340. BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
  341. BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
  342. }
  343. /*
  344. * If we are returning from a syscall, we still haven't hit the
  345. * "ill" for the swint1 instruction. So back the PC up to be
  346. * pointing at the swint1, but we'll actually return directly
  347. * back to the "ill" so we come back in via SIGILL as if we
  348. * had "executed" the swint1 without ever being in kernel space.
  349. */
  350. if (regs->faultnum == INT_SWINT_1)
  351. regs->pc -= 8;
  352. pc = (tile_bundle_bits __user *)(regs->pc);
  353. if (get_user(bundle, pc) != 0) {
  354. pr_err("Couldn't read instruction at %p trying to step\n", pc);
  355. return;
  356. }
  357. /* We'll follow the instruction with 2 ill op bundles */
  358. state->orig_pc = (unsigned long)pc;
  359. state->next_pc = (unsigned long)(pc + 1);
  360. state->branch_next_pc = 0;
  361. state->update = 0;
  362. if (!(bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK)) {
  363. /* two wide, check for control flow */
  364. int opcode = get_Opcode_X1(bundle);
  365. switch (opcode) {
  366. /* branches */
  367. case BRANCH_OPCODE_X1:
  368. {
  369. s32 offset = signExtend17(get_BrOff_X1(bundle));
  370. /*
  371. * For branches, we use a rewriting trick to let the
  372. * hardware evaluate whether the branch is taken or
  373. * untaken. We record the target offset and then
  374. * rewrite the branch instruction to target 1 insn
  375. * ahead if the branch is taken. We then follow the
  376. * rewritten branch with two bundles, each containing
  377. * an "ill" instruction. The supervisor examines the
  378. * pc after the single step code is executed, and if
  379. * the pc is the first ill instruction, then the
  380. * branch (if any) was not taken. If the pc is the
  381. * second ill instruction, then the branch was
  382. * taken. The new pc is computed for these cases, and
  383. * inserted into the registers for the thread. If
  384. * the pc is the start of the single step code, then
  385. * an exception or interrupt was taken before the
  386. * code started processing, and the same "original"
  387. * pc is restored. This change, different from the
  388. * original implementation, has the advantage of
  389. * executing a single user instruction.
  390. */
  391. state->branch_next_pc = (unsigned long)(pc + offset);
  392. /* rewrite branch offset to go forward one bundle */
  393. bundle = set_BrOff_X1(bundle, 2);
  394. }
  395. break;
  396. /* jumps */
  397. case JALB_OPCODE_X1:
  398. case JALF_OPCODE_X1:
  399. state->update = 1;
  400. state->next_pc =
  401. (unsigned long) (pc + get_JOffLong_X1(bundle));
  402. break;
  403. case JB_OPCODE_X1:
  404. case JF_OPCODE_X1:
  405. state->next_pc =
  406. (unsigned long) (pc + get_JOffLong_X1(bundle));
  407. bundle = nop_X1(bundle);
  408. break;
  409. case SPECIAL_0_OPCODE_X1:
  410. switch (get_RRROpcodeExtension_X1(bundle)) {
  411. /* jump-register */
  412. case JALRP_SPECIAL_0_OPCODE_X1:
  413. case JALR_SPECIAL_0_OPCODE_X1:
  414. state->update = 1;
  415. state->next_pc =
  416. regs->regs[get_SrcA_X1(bundle)];
  417. break;
  418. case JRP_SPECIAL_0_OPCODE_X1:
  419. case JR_SPECIAL_0_OPCODE_X1:
  420. state->next_pc =
  421. regs->regs[get_SrcA_X1(bundle)];
  422. bundle = nop_X1(bundle);
  423. break;
  424. case LNK_SPECIAL_0_OPCODE_X1:
  425. state->update = 1;
  426. target_reg = get_Dest_X1(bundle);
  427. break;
  428. /* stores */
  429. case SH_SPECIAL_0_OPCODE_X1:
  430. mem_op = MEMOP_STORE;
  431. size = 2;
  432. break;
  433. case SW_SPECIAL_0_OPCODE_X1:
  434. mem_op = MEMOP_STORE;
  435. size = 4;
  436. break;
  437. }
  438. break;
  439. /* loads and iret */
  440. case SHUN_0_OPCODE_X1:
  441. if (get_UnShOpcodeExtension_X1(bundle) ==
  442. UN_0_SHUN_0_OPCODE_X1) {
  443. switch (get_UnOpcodeExtension_X1(bundle)) {
  444. case LH_UN_0_SHUN_0_OPCODE_X1:
  445. mem_op = MEMOP_LOAD;
  446. size = 2;
  447. sign_ext = 1;
  448. break;
  449. case LH_U_UN_0_SHUN_0_OPCODE_X1:
  450. mem_op = MEMOP_LOAD;
  451. size = 2;
  452. sign_ext = 0;
  453. break;
  454. case LW_UN_0_SHUN_0_OPCODE_X1:
  455. mem_op = MEMOP_LOAD;
  456. size = 4;
  457. break;
  458. case IRET_UN_0_SHUN_0_OPCODE_X1:
  459. {
  460. unsigned long ex0_0 = __insn_mfspr(
  461. SPR_EX_CONTEXT_0_0);
  462. unsigned long ex0_1 = __insn_mfspr(
  463. SPR_EX_CONTEXT_0_1);
  464. /*
  465. * Special-case it if we're iret'ing
  466. * to PL0 again. Otherwise just let
  467. * it run and it will generate SIGILL.
  468. */
  469. if (EX1_PL(ex0_1) == USER_PL) {
  470. state->next_pc = ex0_0;
  471. regs->ex1 = ex0_1;
  472. bundle = nop_X1(bundle);
  473. }
  474. }
  475. }
  476. }
  477. break;
  478. #if CHIP_HAS_WH64()
  479. /* postincrement operations */
  480. case IMM_0_OPCODE_X1:
  481. switch (get_ImmOpcodeExtension_X1(bundle)) {
  482. case LWADD_IMM_0_OPCODE_X1:
  483. mem_op = MEMOP_LOAD_POSTINCR;
  484. size = 4;
  485. break;
  486. case LHADD_IMM_0_OPCODE_X1:
  487. mem_op = MEMOP_LOAD_POSTINCR;
  488. size = 2;
  489. sign_ext = 1;
  490. break;
  491. case LHADD_U_IMM_0_OPCODE_X1:
  492. mem_op = MEMOP_LOAD_POSTINCR;
  493. size = 2;
  494. sign_ext = 0;
  495. break;
  496. case SWADD_IMM_0_OPCODE_X1:
  497. mem_op = MEMOP_STORE_POSTINCR;
  498. size = 4;
  499. break;
  500. case SHADD_IMM_0_OPCODE_X1:
  501. mem_op = MEMOP_STORE_POSTINCR;
  502. size = 2;
  503. break;
  504. default:
  505. break;
  506. }
  507. break;
  508. #endif /* CHIP_HAS_WH64() */
  509. }
  510. if (state->update) {
  511. /*
  512. * Get an available register. We start with a
  513. * bitmask with 1's for available registers.
  514. * We truncate to the low 32 registers since
  515. * we are guaranteed to have set bits in the
  516. * low 32 bits, then use ctz to pick the first.
  517. */
  518. u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
  519. (1ULL << get_SrcA_X0(bundle)) |
  520. (1ULL << get_SrcB_X0(bundle)) |
  521. (1ULL << target_reg));
  522. temp_reg = __builtin_ctz(mask);
  523. state->update_reg = temp_reg;
  524. state->update_value = regs->regs[temp_reg];
  525. regs->regs[temp_reg] = (unsigned long) (pc+1);
  526. regs->flags |= PT_FLAGS_RESTORE_REGS;
  527. bundle = move_X1(bundle, target_reg, temp_reg);
  528. }
  529. } else {
  530. int opcode = get_Opcode_Y2(bundle);
  531. switch (opcode) {
  532. /* loads */
  533. case LH_OPCODE_Y2:
  534. mem_op = MEMOP_LOAD;
  535. size = 2;
  536. sign_ext = 1;
  537. break;
  538. case LH_U_OPCODE_Y2:
  539. mem_op = MEMOP_LOAD;
  540. size = 2;
  541. sign_ext = 0;
  542. break;
  543. case LW_OPCODE_Y2:
  544. mem_op = MEMOP_LOAD;
  545. size = 4;
  546. break;
  547. /* stores */
  548. case SH_OPCODE_Y2:
  549. mem_op = MEMOP_STORE;
  550. size = 2;
  551. break;
  552. case SW_OPCODE_Y2:
  553. mem_op = MEMOP_STORE;
  554. size = 4;
  555. break;
  556. }
  557. }
  558. /*
  559. * Check if we need to rewrite an unaligned load/store.
  560. * Returning zero is a special value meaning we need to SIGSEGV.
  561. */
  562. if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
  563. bundle = rewrite_load_store_unaligned(state, bundle, regs,
  564. mem_op, size, sign_ext);
  565. if (bundle == 0)
  566. return;
  567. }
  568. /* write the bundle to our execution area */
  569. buffer = state->buffer;
  570. err = __put_user(bundle, buffer++);
  571. /*
  572. * If we're really single-stepping, we take an INT_ILL after.
  573. * If we're just handling an unaligned access, we can just
  574. * jump directly back to where we were in user code.
  575. */
  576. if (is_single_step) {
  577. err |= __put_user(__single_step_ill_insn, buffer++);
  578. err |= __put_user(__single_step_ill_insn, buffer++);
  579. } else {
  580. long delta;
  581. if (state->update) {
  582. /* We have some state to update; do it inline */
  583. int ha16;
  584. bundle = __single_step_addli_insn;
  585. bundle |= create_Dest_X1(state->update_reg);
  586. bundle |= create_Imm16_X1(state->update_value);
  587. err |= __put_user(bundle, buffer++);
  588. bundle = __single_step_auli_insn;
  589. bundle |= create_Dest_X1(state->update_reg);
  590. bundle |= create_SrcA_X1(state->update_reg);
  591. ha16 = (state->update_value + 0x8000) >> 16;
  592. bundle |= create_Imm16_X1(ha16);
  593. err |= __put_user(bundle, buffer++);
  594. state->update = 0;
  595. }
  596. /* End with a jump back to the next instruction */
  597. delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
  598. (unsigned long)buffer) >>
  599. TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
  600. bundle = __single_step_j_insn;
  601. bundle |= create_JOffLong_X1(delta);
  602. err |= __put_user(bundle, buffer++);
  603. }
  604. if (err) {
  605. pr_err("Fault when writing to single-step buffer\n");
  606. return;
  607. }
  608. /*
  609. * Flush the buffer.
  610. * We do a local flush only, since this is a thread-specific buffer.
  611. */
  612. __flush_icache_range((unsigned long)state->buffer,
  613. (unsigned long)buffer);
  614. /* Indicate enabled */
  615. state->is_enabled = is_single_step;
  616. regs->pc = (unsigned long)state->buffer;
  617. /* Fault immediately if we are coming back from a syscall. */
  618. if (regs->faultnum == INT_SWINT_1)
  619. regs->pc += 8;
  620. }
  621. #else
  622. #include <linux/smp.h>
  623. #include <linux/ptrace.h>
  624. #include <arch/spr_def.h>
  625. static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
  626. /*
  627. * Called directly on the occasion of an interrupt.
  628. *
  629. * If the process doesn't have single step set, then we use this as an
  630. * opportunity to turn single step off.
  631. *
  632. * It has been mentioned that we could conditionally turn off single stepping
  633. * on each entry into the kernel and rely on single_step_once to turn it
  634. * on for the processes that matter (as we already do), but this
  635. * implementation is somewhat more efficient in that we muck with registers
  636. * once on a bum interrupt rather than on every entry into the kernel.
  637. *
  638. * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
  639. * so we have to run through this process again before we can say that an
  640. * instruction has executed.
  641. *
  642. * swint will set CANCELED, but it's a legitimate instruction. Fortunately
  643. * it changes the PC. If it hasn't changed, then we know that the interrupt
  644. * wasn't generated by swint and we'll need to run this process again before
  645. * we can say an instruction has executed.
  646. *
  647. * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
  648. * on with our lives.
  649. */
  650. void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
  651. {
  652. unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
  653. struct thread_info *info = (void *)current_thread_info();
  654. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  655. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  656. if (is_single_step == 0) {
  657. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
  658. } else if ((*ss_pc != regs->pc) ||
  659. (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
  660. ptrace_notify(SIGTRAP);
  661. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  662. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  663. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  664. }
  665. }
  666. /*
  667. * Called from need_singlestep. Set up the control registers and the enable
  668. * register, then return back.
  669. */
  670. void single_step_once(struct pt_regs *regs)
  671. {
  672. unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
  673. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  674. *ss_pc = regs->pc;
  675. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  676. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  677. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  678. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
  679. }
  680. void single_step_execve(void)
  681. {
  682. /* Nothing */
  683. }
  684. #endif /* !__tilegx__ */