single_step.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that enables instruction single-stepping.
  15. * Derived from iLib's single-stepping code.
  16. */
  17. #ifndef __tilegx__ /* No support for single-step yet. */
  18. /* These functions are only used on the TILE platform */
  19. #include <linux/slab.h>
  20. #include <linux/thread_info.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/mman.h>
  23. #include <linux/types.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/opcode-tile.h>
  26. #include <asm/opcode_constants.h>
  27. #include <arch/abi.h>
  28. #define signExtend17(val) sign_extend((val), 17)
  29. #define TILE_X1_MASK (0xffffffffULL << 31)
  30. int unaligned_printk;
  31. static int __init setup_unaligned_printk(char *str)
  32. {
  33. long val;
  34. if (strict_strtol(str, 0, &val) != 0)
  35. return 0;
  36. unaligned_printk = val;
  37. printk("Printk for each unaligned data accesses is %s\n",
  38. unaligned_printk ? "enabled" : "disabled");
  39. return 1;
  40. }
  41. __setup("unaligned_printk=", setup_unaligned_printk);
  42. unsigned int unaligned_fixup_count;
  43. enum mem_op {
  44. MEMOP_NONE,
  45. MEMOP_LOAD,
  46. MEMOP_STORE,
  47. MEMOP_LOAD_POSTINCR,
  48. MEMOP_STORE_POSTINCR
  49. };
  50. static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, int32_t offset)
  51. {
  52. tile_bundle_bits result;
  53. /* mask out the old offset */
  54. tile_bundle_bits mask = create_BrOff_X1(-1);
  55. result = n & (~mask);
  56. /* or in the new offset */
  57. result |= create_BrOff_X1(offset);
  58. return result;
  59. }
  60. static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
  61. {
  62. tile_bundle_bits result;
  63. tile_bundle_bits op;
  64. result = n & (~TILE_X1_MASK);
  65. op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
  66. create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
  67. create_Dest_X1(dest) |
  68. create_SrcB_X1(TREG_ZERO) |
  69. create_SrcA_X1(src) ;
  70. result |= op;
  71. return result;
  72. }
  73. static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
  74. {
  75. return move_X1(n, TREG_ZERO, TREG_ZERO);
  76. }
  77. static inline tile_bundle_bits addi_X1(
  78. tile_bundle_bits n, int dest, int src, int imm)
  79. {
  80. n &= ~TILE_X1_MASK;
  81. n |= (create_SrcA_X1(src) |
  82. create_Dest_X1(dest) |
  83. create_Imm8_X1(imm) |
  84. create_S_X1(0) |
  85. create_Opcode_X1(IMM_0_OPCODE_X1) |
  86. create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
  87. return n;
  88. }
  89. static tile_bundle_bits rewrite_load_store_unaligned(
  90. struct single_step_state *state,
  91. tile_bundle_bits bundle,
  92. struct pt_regs *regs,
  93. enum mem_op mem_op,
  94. int size, int sign_ext)
  95. {
  96. unsigned char *addr;
  97. int val_reg, addr_reg, err, val;
  98. /* Get address and value registers */
  99. if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
  100. addr_reg = get_SrcA_Y2(bundle);
  101. val_reg = get_SrcBDest_Y2(bundle);
  102. } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  103. addr_reg = get_SrcA_X1(bundle);
  104. val_reg = get_Dest_X1(bundle);
  105. } else {
  106. addr_reg = get_SrcA_X1(bundle);
  107. val_reg = get_SrcB_X1(bundle);
  108. }
  109. /*
  110. * If registers are not GPRs, don't try to handle it.
  111. *
  112. * FIXME: we could handle non-GPR loads by getting the real value
  113. * from memory, writing it to the single step buffer, using a
  114. * temp_reg to hold a pointer to that memory, then executing that
  115. * instruction and resetting temp_reg. For non-GPR stores, it's a
  116. * little trickier; we could use the single step buffer for that
  117. * too, but we'd have to add some more state bits so that we could
  118. * call back in here to copy that value to the real target. For
  119. * now, we just handle the simple case.
  120. */
  121. if ((val_reg >= PTREGS_NR_GPRS &&
  122. (val_reg != TREG_ZERO ||
  123. mem_op == MEMOP_LOAD ||
  124. mem_op == MEMOP_LOAD_POSTINCR)) ||
  125. addr_reg >= PTREGS_NR_GPRS)
  126. return bundle;
  127. /* If it's aligned, don't handle it specially */
  128. addr = (void *)regs->regs[addr_reg];
  129. if (((unsigned long)addr % size) == 0)
  130. return bundle;
  131. #ifndef __LITTLE_ENDIAN
  132. # error We assume little-endian representation with copy_xx_user size 2 here
  133. #endif
  134. /* Handle unaligned load/store */
  135. if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  136. unsigned short val_16;
  137. switch (size) {
  138. case 2:
  139. err = copy_from_user(&val_16, addr, sizeof(val_16));
  140. val = sign_ext ? ((short)val_16) : val_16;
  141. break;
  142. case 4:
  143. err = copy_from_user(&val, addr, sizeof(val));
  144. break;
  145. default:
  146. BUG();
  147. }
  148. if (err == 0) {
  149. state->update_reg = val_reg;
  150. state->update_value = val;
  151. state->update = 1;
  152. }
  153. } else {
  154. val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
  155. err = copy_to_user(addr, &val, size);
  156. }
  157. if (err) {
  158. siginfo_t info = {
  159. .si_signo = SIGSEGV,
  160. .si_code = SEGV_MAPERR,
  161. .si_addr = (void __user *)addr
  162. };
  163. force_sig_info(info.si_signo, &info, current);
  164. return (tile_bundle_bits) 0;
  165. }
  166. if (unaligned_fixup == 0) {
  167. siginfo_t info = {
  168. .si_signo = SIGBUS,
  169. .si_code = BUS_ADRALN,
  170. .si_addr = (void __user *)addr
  171. };
  172. force_sig_info(info.si_signo, &info, current);
  173. return (tile_bundle_bits) 0;
  174. }
  175. if (unaligned_printk || unaligned_fixup_count == 0) {
  176. printk("Process %d/%s: PC %#lx: Fixup of"
  177. " unaligned %s at %#lx.\n",
  178. current->pid, current->comm, regs->pc,
  179. (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) ?
  180. "load" : "store",
  181. (unsigned long)addr);
  182. if (!unaligned_printk) {
  183. printk("\n"
  184. "Unaligned fixups in the kernel will slow your application considerably.\n"
  185. "You can find them by writing \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n"
  186. "which requests the kernel show all unaligned fixups, or writing a \"0\"\n"
  187. "to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n"
  188. "access will become a SIGBUS you can debug. No further warnings will be\n"
  189. "shown so as to avoid additional slowdown, but you can track the number\n"
  190. "of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n"
  191. "Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n"
  192. "\n");
  193. }
  194. }
  195. ++unaligned_fixup_count;
  196. if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
  197. /* Convert the Y2 instruction to a prefetch. */
  198. bundle &= ~(create_SrcBDest_Y2(-1) |
  199. create_Opcode_Y2(-1));
  200. bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
  201. create_Opcode_Y2(LW_OPCODE_Y2));
  202. /* Replace the load postincr with an addi */
  203. } else if (mem_op == MEMOP_LOAD_POSTINCR) {
  204. bundle = addi_X1(bundle, addr_reg, addr_reg,
  205. get_Imm8_X1(bundle));
  206. /* Replace the store postincr with an addi */
  207. } else if (mem_op == MEMOP_STORE_POSTINCR) {
  208. bundle = addi_X1(bundle, addr_reg, addr_reg,
  209. get_Dest_Imm8_X1(bundle));
  210. } else {
  211. /* Convert the X1 instruction to a nop. */
  212. bundle &= ~(create_Opcode_X1(-1) |
  213. create_UnShOpcodeExtension_X1(-1) |
  214. create_UnOpcodeExtension_X1(-1));
  215. bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
  216. create_UnShOpcodeExtension_X1(
  217. UN_0_SHUN_0_OPCODE_X1) |
  218. create_UnOpcodeExtension_X1(
  219. NOP_UN_0_SHUN_0_OPCODE_X1));
  220. }
  221. return bundle;
  222. }
  223. /**
  224. * single_step_once() - entry point when single stepping has been triggered.
  225. * @regs: The machine register state
  226. *
  227. * When we arrive at this routine via a trampoline, the single step
  228. * engine copies the executing bundle to the single step buffer.
  229. * If the instruction is a condition branch, then the target is
  230. * reset to one past the next instruction. If the instruction
  231. * sets the lr, then that is noted. If the instruction is a jump
  232. * or call, then the new target pc is preserved and the current
  233. * bundle instruction set to null.
  234. *
  235. * The necessary post-single-step rewriting information is stored in
  236. * single_step_state-> We use data segment values because the
  237. * stack will be rewound when we run the rewritten single-stepped
  238. * instruction.
  239. */
  240. void single_step_once(struct pt_regs *regs)
  241. {
  242. extern tile_bundle_bits __single_step_ill_insn;
  243. extern tile_bundle_bits __single_step_j_insn;
  244. extern tile_bundle_bits __single_step_addli_insn;
  245. extern tile_bundle_bits __single_step_auli_insn;
  246. struct thread_info *info = (void *)current_thread_info();
  247. struct single_step_state *state = info->step_state;
  248. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  249. tile_bundle_bits *buffer, *pc;
  250. tile_bundle_bits bundle;
  251. int temp_reg;
  252. int target_reg = TREG_LR;
  253. int err;
  254. enum mem_op mem_op = MEMOP_NONE;
  255. int size = 0, sign_ext = 0; /* happy compiler */
  256. asm(
  257. " .pushsection .rodata.single_step\n"
  258. " .align 8\n"
  259. " .globl __single_step_ill_insn\n"
  260. "__single_step_ill_insn:\n"
  261. " ill\n"
  262. " .globl __single_step_addli_insn\n"
  263. "__single_step_addli_insn:\n"
  264. " { nop; addli r0, zero, 0 }\n"
  265. " .globl __single_step_auli_insn\n"
  266. "__single_step_auli_insn:\n"
  267. " { nop; auli r0, r0, 0 }\n"
  268. " .globl __single_step_j_insn\n"
  269. "__single_step_j_insn:\n"
  270. " j .\n"
  271. " .popsection\n"
  272. );
  273. if (state == NULL) {
  274. /* allocate a page of writable, executable memory */
  275. state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
  276. if (state == NULL) {
  277. printk("Out of kernel memory trying to single-step\n");
  278. return;
  279. }
  280. /* allocate a cache line of writable, executable memory */
  281. down_write(&current->mm->mmap_sem);
  282. buffer = (void *) do_mmap(0, 0, 64,
  283. PROT_EXEC | PROT_READ | PROT_WRITE,
  284. MAP_PRIVATE | MAP_ANONYMOUS,
  285. 0);
  286. up_write(&current->mm->mmap_sem);
  287. if ((int)buffer < 0 && (int)buffer > -PAGE_SIZE) {
  288. kfree(state);
  289. printk("Out of kernel pages trying to single-step\n");
  290. return;
  291. }
  292. state->buffer = buffer;
  293. state->is_enabled = 0;
  294. info->step_state = state;
  295. /* Validate our stored instruction patterns */
  296. BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
  297. ADDLI_OPCODE_X1);
  298. BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
  299. AULI_OPCODE_X1);
  300. BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
  301. BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
  302. BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
  303. }
  304. /*
  305. * If we are returning from a syscall, we still haven't hit the
  306. * "ill" for the swint1 instruction. So back the PC up to be
  307. * pointing at the swint1, but we'll actually return directly
  308. * back to the "ill" so we come back in via SIGILL as if we
  309. * had "executed" the swint1 without ever being in kernel space.
  310. */
  311. if (regs->faultnum == INT_SWINT_1)
  312. regs->pc -= 8;
  313. pc = (tile_bundle_bits *)(regs->pc);
  314. bundle = pc[0];
  315. /* We'll follow the instruction with 2 ill op bundles */
  316. state->orig_pc = (unsigned long) pc;
  317. state->next_pc = (unsigned long)(pc + 1);
  318. state->branch_next_pc = 0;
  319. state->update = 0;
  320. if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
  321. /* two wide, check for control flow */
  322. int opcode = get_Opcode_X1(bundle);
  323. switch (opcode) {
  324. /* branches */
  325. case BRANCH_OPCODE_X1:
  326. {
  327. int32_t offset = signExtend17(get_BrOff_X1(bundle));
  328. /*
  329. * For branches, we use a rewriting trick to let the
  330. * hardware evaluate whether the branch is taken or
  331. * untaken. We record the target offset and then
  332. * rewrite the branch instruction to target 1 insn
  333. * ahead if the branch is taken. We then follow the
  334. * rewritten branch with two bundles, each containing
  335. * an "ill" instruction. The supervisor examines the
  336. * pc after the single step code is executed, and if
  337. * the pc is the first ill instruction, then the
  338. * branch (if any) was not taken. If the pc is the
  339. * second ill instruction, then the branch was
  340. * taken. The new pc is computed for these cases, and
  341. * inserted into the registers for the thread. If
  342. * the pc is the start of the single step code, then
  343. * an exception or interrupt was taken before the
  344. * code started processing, and the same "original"
  345. * pc is restored. This change, different from the
  346. * original implementation, has the advantage of
  347. * executing a single user instruction.
  348. */
  349. state->branch_next_pc = (unsigned long)(pc + offset);
  350. /* rewrite branch offset to go forward one bundle */
  351. bundle = set_BrOff_X1(bundle, 2);
  352. }
  353. break;
  354. /* jumps */
  355. case JALB_OPCODE_X1:
  356. case JALF_OPCODE_X1:
  357. state->update = 1;
  358. state->next_pc =
  359. (unsigned long) (pc + get_JOffLong_X1(bundle));
  360. break;
  361. case JB_OPCODE_X1:
  362. case JF_OPCODE_X1:
  363. state->next_pc =
  364. (unsigned long) (pc + get_JOffLong_X1(bundle));
  365. bundle = nop_X1(bundle);
  366. break;
  367. case SPECIAL_0_OPCODE_X1:
  368. switch (get_RRROpcodeExtension_X1(bundle)) {
  369. /* jump-register */
  370. case JALRP_SPECIAL_0_OPCODE_X1:
  371. case JALR_SPECIAL_0_OPCODE_X1:
  372. state->update = 1;
  373. state->next_pc =
  374. regs->regs[get_SrcA_X1(bundle)];
  375. break;
  376. case JRP_SPECIAL_0_OPCODE_X1:
  377. case JR_SPECIAL_0_OPCODE_X1:
  378. state->next_pc =
  379. regs->regs[get_SrcA_X1(bundle)];
  380. bundle = nop_X1(bundle);
  381. break;
  382. case LNK_SPECIAL_0_OPCODE_X1:
  383. state->update = 1;
  384. target_reg = get_Dest_X1(bundle);
  385. break;
  386. /* stores */
  387. case SH_SPECIAL_0_OPCODE_X1:
  388. mem_op = MEMOP_STORE;
  389. size = 2;
  390. break;
  391. case SW_SPECIAL_0_OPCODE_X1:
  392. mem_op = MEMOP_STORE;
  393. size = 4;
  394. break;
  395. }
  396. break;
  397. /* loads and iret */
  398. case SHUN_0_OPCODE_X1:
  399. if (get_UnShOpcodeExtension_X1(bundle) ==
  400. UN_0_SHUN_0_OPCODE_X1) {
  401. switch (get_UnOpcodeExtension_X1(bundle)) {
  402. case LH_UN_0_SHUN_0_OPCODE_X1:
  403. mem_op = MEMOP_LOAD;
  404. size = 2;
  405. sign_ext = 1;
  406. break;
  407. case LH_U_UN_0_SHUN_0_OPCODE_X1:
  408. mem_op = MEMOP_LOAD;
  409. size = 2;
  410. sign_ext = 0;
  411. break;
  412. case LW_UN_0_SHUN_0_OPCODE_X1:
  413. mem_op = MEMOP_LOAD;
  414. size = 4;
  415. break;
  416. case IRET_UN_0_SHUN_0_OPCODE_X1:
  417. {
  418. unsigned long ex0_0 = __insn_mfspr(
  419. SPR_EX_CONTEXT_0_0);
  420. unsigned long ex0_1 = __insn_mfspr(
  421. SPR_EX_CONTEXT_0_1);
  422. /*
  423. * Special-case it if we're iret'ing
  424. * to PL0 again. Otherwise just let
  425. * it run and it will generate SIGILL.
  426. */
  427. if (EX1_PL(ex0_1) == USER_PL) {
  428. state->next_pc = ex0_0;
  429. regs->ex1 = ex0_1;
  430. bundle = nop_X1(bundle);
  431. }
  432. }
  433. }
  434. }
  435. break;
  436. #if CHIP_HAS_WH64()
  437. /* postincrement operations */
  438. case IMM_0_OPCODE_X1:
  439. switch (get_ImmOpcodeExtension_X1(bundle)) {
  440. case LWADD_IMM_0_OPCODE_X1:
  441. mem_op = MEMOP_LOAD_POSTINCR;
  442. size = 4;
  443. break;
  444. case LHADD_IMM_0_OPCODE_X1:
  445. mem_op = MEMOP_LOAD_POSTINCR;
  446. size = 2;
  447. sign_ext = 1;
  448. break;
  449. case LHADD_U_IMM_0_OPCODE_X1:
  450. mem_op = MEMOP_LOAD_POSTINCR;
  451. size = 2;
  452. sign_ext = 0;
  453. break;
  454. case SWADD_IMM_0_OPCODE_X1:
  455. mem_op = MEMOP_STORE_POSTINCR;
  456. size = 4;
  457. break;
  458. case SHADD_IMM_0_OPCODE_X1:
  459. mem_op = MEMOP_STORE_POSTINCR;
  460. size = 2;
  461. break;
  462. default:
  463. break;
  464. }
  465. break;
  466. #endif /* CHIP_HAS_WH64() */
  467. }
  468. if (state->update) {
  469. /*
  470. * Get an available register. We start with a
  471. * bitmask with 1's for available registers.
  472. * We truncate to the low 32 registers since
  473. * we are guaranteed to have set bits in the
  474. * low 32 bits, then use ctz to pick the first.
  475. */
  476. u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
  477. (1ULL << get_SrcA_X0(bundle)) |
  478. (1ULL << get_SrcB_X0(bundle)) |
  479. (1ULL << target_reg));
  480. temp_reg = __builtin_ctz(mask);
  481. state->update_reg = temp_reg;
  482. state->update_value = regs->regs[temp_reg];
  483. regs->regs[temp_reg] = (unsigned long) (pc+1);
  484. regs->flags |= PT_FLAGS_RESTORE_REGS;
  485. bundle = move_X1(bundle, target_reg, temp_reg);
  486. }
  487. } else {
  488. int opcode = get_Opcode_Y2(bundle);
  489. switch (opcode) {
  490. /* loads */
  491. case LH_OPCODE_Y2:
  492. mem_op = MEMOP_LOAD;
  493. size = 2;
  494. sign_ext = 1;
  495. break;
  496. case LH_U_OPCODE_Y2:
  497. mem_op = MEMOP_LOAD;
  498. size = 2;
  499. sign_ext = 0;
  500. break;
  501. case LW_OPCODE_Y2:
  502. mem_op = MEMOP_LOAD;
  503. size = 4;
  504. break;
  505. /* stores */
  506. case SH_OPCODE_Y2:
  507. mem_op = MEMOP_STORE;
  508. size = 2;
  509. break;
  510. case SW_OPCODE_Y2:
  511. mem_op = MEMOP_STORE;
  512. size = 4;
  513. break;
  514. }
  515. }
  516. /*
  517. * Check if we need to rewrite an unaligned load/store.
  518. * Returning zero is a special value meaning we need to SIGSEGV.
  519. */
  520. if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
  521. bundle = rewrite_load_store_unaligned(state, bundle, regs,
  522. mem_op, size, sign_ext);
  523. if (bundle == 0)
  524. return;
  525. }
  526. /* write the bundle to our execution area */
  527. buffer = state->buffer;
  528. err = __put_user(bundle, buffer++);
  529. /*
  530. * If we're really single-stepping, we take an INT_ILL after.
  531. * If we're just handling an unaligned access, we can just
  532. * jump directly back to where we were in user code.
  533. */
  534. if (is_single_step) {
  535. err |= __put_user(__single_step_ill_insn, buffer++);
  536. err |= __put_user(__single_step_ill_insn, buffer++);
  537. } else {
  538. long delta;
  539. if (state->update) {
  540. /* We have some state to update; do it inline */
  541. int ha16;
  542. bundle = __single_step_addli_insn;
  543. bundle |= create_Dest_X1(state->update_reg);
  544. bundle |= create_Imm16_X1(state->update_value);
  545. err |= __put_user(bundle, buffer++);
  546. bundle = __single_step_auli_insn;
  547. bundle |= create_Dest_X1(state->update_reg);
  548. bundle |= create_SrcA_X1(state->update_reg);
  549. ha16 = (state->update_value + 0x8000) >> 16;
  550. bundle |= create_Imm16_X1(ha16);
  551. err |= __put_user(bundle, buffer++);
  552. state->update = 0;
  553. }
  554. /* End with a jump back to the next instruction */
  555. delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
  556. (unsigned long)buffer) >>
  557. TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
  558. bundle = __single_step_j_insn;
  559. bundle |= create_JOffLong_X1(delta);
  560. err |= __put_user(bundle, buffer++);
  561. }
  562. if (err) {
  563. printk("Fault when writing to single-step buffer\n");
  564. return;
  565. }
  566. /*
  567. * Flush the buffer.
  568. * We do a local flush only, since this is a thread-specific buffer.
  569. */
  570. __flush_icache_range((unsigned long) state->buffer,
  571. (unsigned long) buffer);
  572. /* Indicate enabled */
  573. state->is_enabled = is_single_step;
  574. regs->pc = (unsigned long) state->buffer;
  575. /* Fault immediately if we are coming back from a syscall. */
  576. if (regs->faultnum == INT_SWINT_1)
  577. regs->pc += 8;
  578. }
  579. #endif /* !__tilegx__ */