single_step.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that enables instruction single-stepping.
  15. * Derived from iLib's single-stepping code.
  16. */
  17. #ifndef __tilegx__ /* Hardware support for single step unavailable. */
  18. /* These functions are only used on the TILE platform */
  19. #include <linux/slab.h>
  20. #include <linux/thread_info.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/mman.h>
  23. #include <linux/types.h>
  24. #include <linux/err.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/opcode-tile.h>
  27. #include <asm/opcode_constants.h>
  28. #include <arch/abi.h>
  29. #define signExtend17(val) sign_extend((val), 17)
  30. #define TILE_X1_MASK (0xffffffffULL << 31)
  31. int unaligned_printk;
  32. static int __init setup_unaligned_printk(char *str)
  33. {
  34. long val;
  35. if (strict_strtol(str, 0, &val) != 0)
  36. return 0;
  37. unaligned_printk = val;
  38. pr_info("Printk for each unaligned data accesses is %s\n",
  39. unaligned_printk ? "enabled" : "disabled");
  40. return 1;
  41. }
  42. __setup("unaligned_printk=", setup_unaligned_printk);
  43. unsigned int unaligned_fixup_count;
  44. enum mem_op {
  45. MEMOP_NONE,
  46. MEMOP_LOAD,
  47. MEMOP_STORE,
  48. MEMOP_LOAD_POSTINCR,
  49. MEMOP_STORE_POSTINCR
  50. };
  51. static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
  52. {
  53. tile_bundle_bits result;
  54. /* mask out the old offset */
  55. tile_bundle_bits mask = create_BrOff_X1(-1);
  56. result = n & (~mask);
  57. /* or in the new offset */
  58. result |= create_BrOff_X1(offset);
  59. return result;
  60. }
  61. static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
  62. {
  63. tile_bundle_bits result;
  64. tile_bundle_bits op;
  65. result = n & (~TILE_X1_MASK);
  66. op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
  67. create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
  68. create_Dest_X1(dest) |
  69. create_SrcB_X1(TREG_ZERO) |
  70. create_SrcA_X1(src) ;
  71. result |= op;
  72. return result;
  73. }
  74. static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
  75. {
  76. return move_X1(n, TREG_ZERO, TREG_ZERO);
  77. }
  78. static inline tile_bundle_bits addi_X1(
  79. tile_bundle_bits n, int dest, int src, int imm)
  80. {
  81. n &= ~TILE_X1_MASK;
  82. n |= (create_SrcA_X1(src) |
  83. create_Dest_X1(dest) |
  84. create_Imm8_X1(imm) |
  85. create_S_X1(0) |
  86. create_Opcode_X1(IMM_0_OPCODE_X1) |
  87. create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
  88. return n;
  89. }
  90. static tile_bundle_bits rewrite_load_store_unaligned(
  91. struct single_step_state *state,
  92. tile_bundle_bits bundle,
  93. struct pt_regs *regs,
  94. enum mem_op mem_op,
  95. int size, int sign_ext)
  96. {
  97. unsigned char __user *addr;
  98. int val_reg, addr_reg, err, val;
  99. /* Get address and value registers */
  100. if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
  101. addr_reg = get_SrcA_Y2(bundle);
  102. val_reg = get_SrcBDest_Y2(bundle);
  103. } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  104. addr_reg = get_SrcA_X1(bundle);
  105. val_reg = get_Dest_X1(bundle);
  106. } else {
  107. addr_reg = get_SrcA_X1(bundle);
  108. val_reg = get_SrcB_X1(bundle);
  109. }
  110. /*
  111. * If registers are not GPRs, don't try to handle it.
  112. *
  113. * FIXME: we could handle non-GPR loads by getting the real value
  114. * from memory, writing it to the single step buffer, using a
  115. * temp_reg to hold a pointer to that memory, then executing that
  116. * instruction and resetting temp_reg. For non-GPR stores, it's a
  117. * little trickier; we could use the single step buffer for that
  118. * too, but we'd have to add some more state bits so that we could
  119. * call back in here to copy that value to the real target. For
  120. * now, we just handle the simple case.
  121. */
  122. if ((val_reg >= PTREGS_NR_GPRS &&
  123. (val_reg != TREG_ZERO ||
  124. mem_op == MEMOP_LOAD ||
  125. mem_op == MEMOP_LOAD_POSTINCR)) ||
  126. addr_reg >= PTREGS_NR_GPRS)
  127. return bundle;
  128. /* If it's aligned, don't handle it specially */
  129. addr = (void __user *)regs->regs[addr_reg];
  130. if (((unsigned long)addr % size) == 0)
  131. return bundle;
  132. #ifndef __LITTLE_ENDIAN
  133. # error We assume little-endian representation with copy_xx_user size 2 here
  134. #endif
  135. /* Handle unaligned load/store */
  136. if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  137. unsigned short val_16;
  138. switch (size) {
  139. case 2:
  140. err = copy_from_user(&val_16, addr, sizeof(val_16));
  141. val = sign_ext ? ((short)val_16) : val_16;
  142. break;
  143. case 4:
  144. err = copy_from_user(&val, addr, sizeof(val));
  145. break;
  146. default:
  147. BUG();
  148. }
  149. if (err == 0) {
  150. state->update_reg = val_reg;
  151. state->update_value = val;
  152. state->update = 1;
  153. }
  154. } else {
  155. val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
  156. err = copy_to_user(addr, &val, size);
  157. }
  158. if (err) {
  159. siginfo_t info = {
  160. .si_signo = SIGSEGV,
  161. .si_code = SEGV_MAPERR,
  162. .si_addr = addr
  163. };
  164. force_sig_info(info.si_signo, &info, current);
  165. return (tile_bundle_bits) 0;
  166. }
  167. if (unaligned_fixup == 0) {
  168. siginfo_t info = {
  169. .si_signo = SIGBUS,
  170. .si_code = BUS_ADRALN,
  171. .si_addr = addr
  172. };
  173. force_sig_info(info.si_signo, &info, current);
  174. return (tile_bundle_bits) 0;
  175. }
  176. if (unaligned_printk || unaligned_fixup_count == 0) {
  177. pr_info("Process %d/%s: PC %#lx: Fixup of"
  178. " unaligned %s at %#lx.\n",
  179. current->pid, current->comm, regs->pc,
  180. (mem_op == MEMOP_LOAD ||
  181. mem_op == MEMOP_LOAD_POSTINCR) ?
  182. "load" : "store",
  183. (unsigned long)addr);
  184. if (!unaligned_printk) {
  185. #define P pr_info
  186. P("\n");
  187. P("Unaligned fixups in the kernel will slow your application considerably.\n");
  188. P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
  189. P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
  190. P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
  191. P("access will become a SIGBUS you can debug. No further warnings will be\n");
  192. P("shown so as to avoid additional slowdown, but you can track the number\n");
  193. P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
  194. P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
  195. P("\n");
  196. #undef P
  197. }
  198. }
  199. ++unaligned_fixup_count;
  200. if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
  201. /* Convert the Y2 instruction to a prefetch. */
  202. bundle &= ~(create_SrcBDest_Y2(-1) |
  203. create_Opcode_Y2(-1));
  204. bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
  205. create_Opcode_Y2(LW_OPCODE_Y2));
  206. /* Replace the load postincr with an addi */
  207. } else if (mem_op == MEMOP_LOAD_POSTINCR) {
  208. bundle = addi_X1(bundle, addr_reg, addr_reg,
  209. get_Imm8_X1(bundle));
  210. /* Replace the store postincr with an addi */
  211. } else if (mem_op == MEMOP_STORE_POSTINCR) {
  212. bundle = addi_X1(bundle, addr_reg, addr_reg,
  213. get_Dest_Imm8_X1(bundle));
  214. } else {
  215. /* Convert the X1 instruction to a nop. */
  216. bundle &= ~(create_Opcode_X1(-1) |
  217. create_UnShOpcodeExtension_X1(-1) |
  218. create_UnOpcodeExtension_X1(-1));
  219. bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
  220. create_UnShOpcodeExtension_X1(
  221. UN_0_SHUN_0_OPCODE_X1) |
  222. create_UnOpcodeExtension_X1(
  223. NOP_UN_0_SHUN_0_OPCODE_X1));
  224. }
  225. return bundle;
  226. }
  227. /*
  228. * Called after execve() has started the new image. This allows us
  229. * to reset the info state. Note that the the mmap'ed memory, if there
  230. * was any, has already been unmapped by the exec.
  231. */
  232. void single_step_execve(void)
  233. {
  234. struct thread_info *ti = current_thread_info();
  235. kfree(ti->step_state);
  236. ti->step_state = NULL;
  237. }
  238. /**
  239. * single_step_once() - entry point when single stepping has been triggered.
  240. * @regs: The machine register state
  241. *
  242. * When we arrive at this routine via a trampoline, the single step
  243. * engine copies the executing bundle to the single step buffer.
  244. * If the instruction is a condition branch, then the target is
  245. * reset to one past the next instruction. If the instruction
  246. * sets the lr, then that is noted. If the instruction is a jump
  247. * or call, then the new target pc is preserved and the current
  248. * bundle instruction set to null.
  249. *
  250. * The necessary post-single-step rewriting information is stored in
  251. * single_step_state-> We use data segment values because the
  252. * stack will be rewound when we run the rewritten single-stepped
  253. * instruction.
  254. */
  255. void single_step_once(struct pt_regs *regs)
  256. {
  257. extern tile_bundle_bits __single_step_ill_insn;
  258. extern tile_bundle_bits __single_step_j_insn;
  259. extern tile_bundle_bits __single_step_addli_insn;
  260. extern tile_bundle_bits __single_step_auli_insn;
  261. struct thread_info *info = (void *)current_thread_info();
  262. struct single_step_state *state = info->step_state;
  263. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  264. tile_bundle_bits __user *buffer, *pc;
  265. tile_bundle_bits bundle;
  266. int temp_reg;
  267. int target_reg = TREG_LR;
  268. int err;
  269. enum mem_op mem_op = MEMOP_NONE;
  270. int size = 0, sign_ext = 0; /* happy compiler */
  271. asm(
  272. " .pushsection .rodata.single_step\n"
  273. " .align 8\n"
  274. " .globl __single_step_ill_insn\n"
  275. "__single_step_ill_insn:\n"
  276. " ill\n"
  277. " .globl __single_step_addli_insn\n"
  278. "__single_step_addli_insn:\n"
  279. " { nop; addli r0, zero, 0 }\n"
  280. " .globl __single_step_auli_insn\n"
  281. "__single_step_auli_insn:\n"
  282. " { nop; auli r0, r0, 0 }\n"
  283. " .globl __single_step_j_insn\n"
  284. "__single_step_j_insn:\n"
  285. " j .\n"
  286. " .popsection\n"
  287. );
  288. if (state == NULL) {
  289. /* allocate a page of writable, executable memory */
  290. state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
  291. if (state == NULL) {
  292. pr_err("Out of kernel memory trying to single-step\n");
  293. return;
  294. }
  295. /* allocate a cache line of writable, executable memory */
  296. down_write(&current->mm->mmap_sem);
  297. buffer = (void __user *) do_mmap(NULL, 0, 64,
  298. PROT_EXEC | PROT_READ | PROT_WRITE,
  299. MAP_PRIVATE | MAP_ANONYMOUS,
  300. 0);
  301. up_write(&current->mm->mmap_sem);
  302. if (IS_ERR((void __force *)buffer)) {
  303. kfree(state);
  304. pr_err("Out of kernel pages trying to single-step\n");
  305. return;
  306. }
  307. state->buffer = buffer;
  308. state->is_enabled = 0;
  309. info->step_state = state;
  310. /* Validate our stored instruction patterns */
  311. BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
  312. ADDLI_OPCODE_X1);
  313. BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
  314. AULI_OPCODE_X1);
  315. BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
  316. BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
  317. BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
  318. }
  319. /*
  320. * If we are returning from a syscall, we still haven't hit the
  321. * "ill" for the swint1 instruction. So back the PC up to be
  322. * pointing at the swint1, but we'll actually return directly
  323. * back to the "ill" so we come back in via SIGILL as if we
  324. * had "executed" the swint1 without ever being in kernel space.
  325. */
  326. if (regs->faultnum == INT_SWINT_1)
  327. regs->pc -= 8;
  328. pc = (tile_bundle_bits __user *)(regs->pc);
  329. if (get_user(bundle, pc) != 0) {
  330. pr_err("Couldn't read instruction at %p trying to step\n", pc);
  331. return;
  332. }
  333. /* We'll follow the instruction with 2 ill op bundles */
  334. state->orig_pc = (unsigned long)pc;
  335. state->next_pc = (unsigned long)(pc + 1);
  336. state->branch_next_pc = 0;
  337. state->update = 0;
  338. if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
  339. /* two wide, check for control flow */
  340. int opcode = get_Opcode_X1(bundle);
  341. switch (opcode) {
  342. /* branches */
  343. case BRANCH_OPCODE_X1:
  344. {
  345. s32 offset = signExtend17(get_BrOff_X1(bundle));
  346. /*
  347. * For branches, we use a rewriting trick to let the
  348. * hardware evaluate whether the branch is taken or
  349. * untaken. We record the target offset and then
  350. * rewrite the branch instruction to target 1 insn
  351. * ahead if the branch is taken. We then follow the
  352. * rewritten branch with two bundles, each containing
  353. * an "ill" instruction. The supervisor examines the
  354. * pc after the single step code is executed, and if
  355. * the pc is the first ill instruction, then the
  356. * branch (if any) was not taken. If the pc is the
  357. * second ill instruction, then the branch was
  358. * taken. The new pc is computed for these cases, and
  359. * inserted into the registers for the thread. If
  360. * the pc is the start of the single step code, then
  361. * an exception or interrupt was taken before the
  362. * code started processing, and the same "original"
  363. * pc is restored. This change, different from the
  364. * original implementation, has the advantage of
  365. * executing a single user instruction.
  366. */
  367. state->branch_next_pc = (unsigned long)(pc + offset);
  368. /* rewrite branch offset to go forward one bundle */
  369. bundle = set_BrOff_X1(bundle, 2);
  370. }
  371. break;
  372. /* jumps */
  373. case JALB_OPCODE_X1:
  374. case JALF_OPCODE_X1:
  375. state->update = 1;
  376. state->next_pc =
  377. (unsigned long) (pc + get_JOffLong_X1(bundle));
  378. break;
  379. case JB_OPCODE_X1:
  380. case JF_OPCODE_X1:
  381. state->next_pc =
  382. (unsigned long) (pc + get_JOffLong_X1(bundle));
  383. bundle = nop_X1(bundle);
  384. break;
  385. case SPECIAL_0_OPCODE_X1:
  386. switch (get_RRROpcodeExtension_X1(bundle)) {
  387. /* jump-register */
  388. case JALRP_SPECIAL_0_OPCODE_X1:
  389. case JALR_SPECIAL_0_OPCODE_X1:
  390. state->update = 1;
  391. state->next_pc =
  392. regs->regs[get_SrcA_X1(bundle)];
  393. break;
  394. case JRP_SPECIAL_0_OPCODE_X1:
  395. case JR_SPECIAL_0_OPCODE_X1:
  396. state->next_pc =
  397. regs->regs[get_SrcA_X1(bundle)];
  398. bundle = nop_X1(bundle);
  399. break;
  400. case LNK_SPECIAL_0_OPCODE_X1:
  401. state->update = 1;
  402. target_reg = get_Dest_X1(bundle);
  403. break;
  404. /* stores */
  405. case SH_SPECIAL_0_OPCODE_X1:
  406. mem_op = MEMOP_STORE;
  407. size = 2;
  408. break;
  409. case SW_SPECIAL_0_OPCODE_X1:
  410. mem_op = MEMOP_STORE;
  411. size = 4;
  412. break;
  413. }
  414. break;
  415. /* loads and iret */
  416. case SHUN_0_OPCODE_X1:
  417. if (get_UnShOpcodeExtension_X1(bundle) ==
  418. UN_0_SHUN_0_OPCODE_X1) {
  419. switch (get_UnOpcodeExtension_X1(bundle)) {
  420. case LH_UN_0_SHUN_0_OPCODE_X1:
  421. mem_op = MEMOP_LOAD;
  422. size = 2;
  423. sign_ext = 1;
  424. break;
  425. case LH_U_UN_0_SHUN_0_OPCODE_X1:
  426. mem_op = MEMOP_LOAD;
  427. size = 2;
  428. sign_ext = 0;
  429. break;
  430. case LW_UN_0_SHUN_0_OPCODE_X1:
  431. mem_op = MEMOP_LOAD;
  432. size = 4;
  433. break;
  434. case IRET_UN_0_SHUN_0_OPCODE_X1:
  435. {
  436. unsigned long ex0_0 = __insn_mfspr(
  437. SPR_EX_CONTEXT_0_0);
  438. unsigned long ex0_1 = __insn_mfspr(
  439. SPR_EX_CONTEXT_0_1);
  440. /*
  441. * Special-case it if we're iret'ing
  442. * to PL0 again. Otherwise just let
  443. * it run and it will generate SIGILL.
  444. */
  445. if (EX1_PL(ex0_1) == USER_PL) {
  446. state->next_pc = ex0_0;
  447. regs->ex1 = ex0_1;
  448. bundle = nop_X1(bundle);
  449. }
  450. }
  451. }
  452. }
  453. break;
  454. #if CHIP_HAS_WH64()
  455. /* postincrement operations */
  456. case IMM_0_OPCODE_X1:
  457. switch (get_ImmOpcodeExtension_X1(bundle)) {
  458. case LWADD_IMM_0_OPCODE_X1:
  459. mem_op = MEMOP_LOAD_POSTINCR;
  460. size = 4;
  461. break;
  462. case LHADD_IMM_0_OPCODE_X1:
  463. mem_op = MEMOP_LOAD_POSTINCR;
  464. size = 2;
  465. sign_ext = 1;
  466. break;
  467. case LHADD_U_IMM_0_OPCODE_X1:
  468. mem_op = MEMOP_LOAD_POSTINCR;
  469. size = 2;
  470. sign_ext = 0;
  471. break;
  472. case SWADD_IMM_0_OPCODE_X1:
  473. mem_op = MEMOP_STORE_POSTINCR;
  474. size = 4;
  475. break;
  476. case SHADD_IMM_0_OPCODE_X1:
  477. mem_op = MEMOP_STORE_POSTINCR;
  478. size = 2;
  479. break;
  480. default:
  481. break;
  482. }
  483. break;
  484. #endif /* CHIP_HAS_WH64() */
  485. }
  486. if (state->update) {
  487. /*
  488. * Get an available register. We start with a
  489. * bitmask with 1's for available registers.
  490. * We truncate to the low 32 registers since
  491. * we are guaranteed to have set bits in the
  492. * low 32 bits, then use ctz to pick the first.
  493. */
  494. u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
  495. (1ULL << get_SrcA_X0(bundle)) |
  496. (1ULL << get_SrcB_X0(bundle)) |
  497. (1ULL << target_reg));
  498. temp_reg = __builtin_ctz(mask);
  499. state->update_reg = temp_reg;
  500. state->update_value = regs->regs[temp_reg];
  501. regs->regs[temp_reg] = (unsigned long) (pc+1);
  502. regs->flags |= PT_FLAGS_RESTORE_REGS;
  503. bundle = move_X1(bundle, target_reg, temp_reg);
  504. }
  505. } else {
  506. int opcode = get_Opcode_Y2(bundle);
  507. switch (opcode) {
  508. /* loads */
  509. case LH_OPCODE_Y2:
  510. mem_op = MEMOP_LOAD;
  511. size = 2;
  512. sign_ext = 1;
  513. break;
  514. case LH_U_OPCODE_Y2:
  515. mem_op = MEMOP_LOAD;
  516. size = 2;
  517. sign_ext = 0;
  518. break;
  519. case LW_OPCODE_Y2:
  520. mem_op = MEMOP_LOAD;
  521. size = 4;
  522. break;
  523. /* stores */
  524. case SH_OPCODE_Y2:
  525. mem_op = MEMOP_STORE;
  526. size = 2;
  527. break;
  528. case SW_OPCODE_Y2:
  529. mem_op = MEMOP_STORE;
  530. size = 4;
  531. break;
  532. }
  533. }
  534. /*
  535. * Check if we need to rewrite an unaligned load/store.
  536. * Returning zero is a special value meaning we need to SIGSEGV.
  537. */
  538. if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
  539. bundle = rewrite_load_store_unaligned(state, bundle, regs,
  540. mem_op, size, sign_ext);
  541. if (bundle == 0)
  542. return;
  543. }
  544. /* write the bundle to our execution area */
  545. buffer = state->buffer;
  546. err = __put_user(bundle, buffer++);
  547. /*
  548. * If we're really single-stepping, we take an INT_ILL after.
  549. * If we're just handling an unaligned access, we can just
  550. * jump directly back to where we were in user code.
  551. */
  552. if (is_single_step) {
  553. err |= __put_user(__single_step_ill_insn, buffer++);
  554. err |= __put_user(__single_step_ill_insn, buffer++);
  555. } else {
  556. long delta;
  557. if (state->update) {
  558. /* We have some state to update; do it inline */
  559. int ha16;
  560. bundle = __single_step_addli_insn;
  561. bundle |= create_Dest_X1(state->update_reg);
  562. bundle |= create_Imm16_X1(state->update_value);
  563. err |= __put_user(bundle, buffer++);
  564. bundle = __single_step_auli_insn;
  565. bundle |= create_Dest_X1(state->update_reg);
  566. bundle |= create_SrcA_X1(state->update_reg);
  567. ha16 = (state->update_value + 0x8000) >> 16;
  568. bundle |= create_Imm16_X1(ha16);
  569. err |= __put_user(bundle, buffer++);
  570. state->update = 0;
  571. }
  572. /* End with a jump back to the next instruction */
  573. delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
  574. (unsigned long)buffer) >>
  575. TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
  576. bundle = __single_step_j_insn;
  577. bundle |= create_JOffLong_X1(delta);
  578. err |= __put_user(bundle, buffer++);
  579. }
  580. if (err) {
  581. pr_err("Fault when writing to single-step buffer\n");
  582. return;
  583. }
  584. /*
  585. * Flush the buffer.
  586. * We do a local flush only, since this is a thread-specific buffer.
  587. */
  588. __flush_icache_range((unsigned long)state->buffer,
  589. (unsigned long)buffer);
  590. /* Indicate enabled */
  591. state->is_enabled = is_single_step;
  592. regs->pc = (unsigned long)state->buffer;
  593. /* Fault immediately if we are coming back from a syscall. */
  594. if (regs->faultnum == INT_SWINT_1)
  595. regs->pc += 8;
  596. }
  597. #else
  598. #include <linux/smp.h>
  599. #include <linux/ptrace.h>
  600. #include <arch/spr_def.h>
  601. static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
  602. /*
  603. * Called directly on the occasion of an interrupt.
  604. *
  605. * If the process doesn't have single step set, then we use this as an
  606. * opportunity to turn single step off.
  607. *
  608. * It has been mentioned that we could conditionally turn off single stepping
  609. * on each entry into the kernel and rely on single_step_once to turn it
  610. * on for the processes that matter (as we already do), but this
  611. * implementation is somewhat more efficient in that we muck with registers
  612. * once on a bum interrupt rather than on every entry into the kernel.
  613. *
  614. * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
  615. * so we have to run through this process again before we can say that an
  616. * instruction has executed.
  617. *
  618. * swint will set CANCELED, but it's a legitimate instruction. Fortunately
  619. * it changes the PC. If it hasn't changed, then we know that the interrupt
  620. * wasn't generated by swint and we'll need to run this process again before
  621. * we can say an instruction has executed.
  622. *
  623. * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
  624. * on with our lives.
  625. */
  626. void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
  627. {
  628. unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
  629. struct thread_info *info = (void *)current_thread_info();
  630. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  631. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  632. if (is_single_step == 0) {
  633. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
  634. } else if ((*ss_pc != regs->pc) ||
  635. (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
  636. ptrace_notify(SIGTRAP);
  637. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  638. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  639. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  640. }
  641. }
  642. /*
  643. * Called from need_singlestep. Set up the control registers and the enable
  644. * register, then return back.
  645. */
  646. void single_step_once(struct pt_regs *regs)
  647. {
  648. unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
  649. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  650. *ss_pc = regs->pc;
  651. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  652. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  653. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  654. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
  655. }
  656. void single_step_execve(void)
  657. {
  658. /* Nothing */
  659. }
  660. #endif /* !__tilegx__ */