backtrace.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <asm/backtrace.h>
  17. #include <arch/chip.h>
  18. #if TILE_CHIP < 10
  19. #include <asm/opcode-tile.h>
  20. #define TREG_SP 54
  21. #define TREG_LR 55
  22. /** A decoded bundle used for backtracer analysis. */
  23. struct BacktraceBundle {
  24. tile_bundle_bits bits;
  25. int num_insns;
  26. struct tile_decoded_instruction
  27. insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
  28. };
  29. /* This implementation only makes sense for native tools. */
  30. /** Default function to read memory. */
  31. static bool bt_read_memory(void *result, VirtualAddress addr,
  32. size_t size, void *extra)
  33. {
  34. /* FIXME: this should do some horrible signal stuff to catch
  35. * SEGV cleanly and fail.
  36. *
  37. * Or else the caller should do the setjmp for efficiency.
  38. */
  39. memcpy(result, (const void *)addr, size);
  40. return true;
  41. }
  42. /** Locates an instruction inside the given bundle that
  43. * has the specified mnemonic, and whose first 'num_operands_to_match'
  44. * operands exactly match those in 'operand_values'.
  45. */
  46. static const struct tile_decoded_instruction *find_matching_insn(
  47. const struct BacktraceBundle *bundle,
  48. tile_mnemonic mnemonic,
  49. const int *operand_values,
  50. int num_operands_to_match)
  51. {
  52. int i, j;
  53. bool match;
  54. for (i = 0; i < bundle->num_insns; i++) {
  55. const struct tile_decoded_instruction *insn =
  56. &bundle->insns[i];
  57. if (insn->opcode->mnemonic != mnemonic)
  58. continue;
  59. match = true;
  60. for (j = 0; j < num_operands_to_match; j++) {
  61. if (operand_values[j] != insn->operand_values[j]) {
  62. match = false;
  63. break;
  64. }
  65. }
  66. if (match)
  67. return insn;
  68. }
  69. return NULL;
  70. }
  71. /** Does this bundle contain an 'iret' instruction? */
  72. static inline bool bt_has_iret(const struct BacktraceBundle *bundle)
  73. {
  74. return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
  75. }
  76. /** Does this bundle contain an 'addi sp, sp, OFFSET' or
  77. * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
  78. */
  79. static bool bt_has_addi_sp(const struct BacktraceBundle *bundle, int *adjust)
  80. {
  81. static const int vals[2] = { TREG_SP, TREG_SP };
  82. const struct tile_decoded_instruction *insn =
  83. find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
  84. if (insn == NULL)
  85. insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
  86. if (insn == NULL)
  87. return false;
  88. *adjust = insn->operand_values[2];
  89. return true;
  90. }
  91. /** Does this bundle contain any 'info OP' or 'infol OP'
  92. * instruction, and if so, what are their OP? Note that OP is interpreted
  93. * as an unsigned value by this code since that's what the caller wants.
  94. * Returns the number of info ops found.
  95. */
  96. static int bt_get_info_ops(const struct BacktraceBundle *bundle,
  97. int operands[MAX_INFO_OPS_PER_BUNDLE])
  98. {
  99. int num_ops = 0;
  100. int i;
  101. for (i = 0; i < bundle->num_insns; i++) {
  102. const struct tile_decoded_instruction *insn =
  103. &bundle->insns[i];
  104. if (insn->opcode->mnemonic == TILE_OPC_INFO ||
  105. insn->opcode->mnemonic == TILE_OPC_INFOL) {
  106. operands[num_ops++] = insn->operand_values[0];
  107. }
  108. }
  109. return num_ops;
  110. }
  111. /** Does this bundle contain a jrp instruction, and if so, to which
  112. * register is it jumping?
  113. */
  114. static bool bt_has_jrp(const struct BacktraceBundle *bundle, int *target_reg)
  115. {
  116. const struct tile_decoded_instruction *insn =
  117. find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
  118. if (insn == NULL)
  119. return false;
  120. *target_reg = insn->operand_values[0];
  121. return true;
  122. }
  123. /** Does this bundle modify the specified register in any way? */
  124. static bool bt_modifies_reg(const struct BacktraceBundle *bundle, int reg)
  125. {
  126. int i, j;
  127. for (i = 0; i < bundle->num_insns; i++) {
  128. const struct tile_decoded_instruction *insn =
  129. &bundle->insns[i];
  130. if (insn->opcode->implicitly_written_register == reg)
  131. return true;
  132. for (j = 0; j < insn->opcode->num_operands; j++)
  133. if (insn->operands[j]->is_dest_reg &&
  134. insn->operand_values[j] == reg)
  135. return true;
  136. }
  137. return false;
  138. }
  139. /** Does this bundle modify sp? */
  140. static inline bool bt_modifies_sp(const struct BacktraceBundle *bundle)
  141. {
  142. return bt_modifies_reg(bundle, TREG_SP);
  143. }
  144. /** Does this bundle modify lr? */
  145. static inline bool bt_modifies_lr(const struct BacktraceBundle *bundle)
  146. {
  147. return bt_modifies_reg(bundle, TREG_LR);
  148. }
  149. /** Does this bundle contain the instruction 'move fp, sp'? */
  150. static inline bool bt_has_move_r52_sp(const struct BacktraceBundle *bundle)
  151. {
  152. static const int vals[2] = { 52, TREG_SP };
  153. return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
  154. }
  155. /** Does this bundle contain the instruction 'sw sp, lr'? */
  156. static inline bool bt_has_sw_sp_lr(const struct BacktraceBundle *bundle)
  157. {
  158. static const int vals[2] = { TREG_SP, TREG_LR };
  159. return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
  160. }
  161. /** Locates the caller's PC and SP for a program starting at the
  162. * given address.
  163. */
  164. static void find_caller_pc_and_caller_sp(CallerLocation *location,
  165. const VirtualAddress start_pc,
  166. BacktraceMemoryReader read_memory_func,
  167. void *read_memory_func_extra)
  168. {
  169. /* Have we explicitly decided what the sp is,
  170. * rather than just the default?
  171. */
  172. bool sp_determined = false;
  173. /* Has any bundle seen so far modified lr? */
  174. bool lr_modified = false;
  175. /* Have we seen a move from sp to fp? */
  176. bool sp_moved_to_r52 = false;
  177. /* Have we seen a terminating bundle? */
  178. bool seen_terminating_bundle = false;
  179. /* Cut down on round-trip reading overhead by reading several
  180. * bundles at a time.
  181. */
  182. tile_bundle_bits prefetched_bundles[32];
  183. int num_bundles_prefetched = 0;
  184. int next_bundle = 0;
  185. VirtualAddress pc;
  186. /* Default to assuming that the caller's sp is the current sp.
  187. * This is necessary to handle the case where we start backtracing
  188. * right at the end of the epilog.
  189. */
  190. location->sp_location = SP_LOC_OFFSET;
  191. location->sp_offset = 0;
  192. /* Default to having no idea where the caller PC is. */
  193. location->pc_location = PC_LOC_UNKNOWN;
  194. /* Don't even try if the PC is not aligned. */
  195. if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
  196. return;
  197. for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
  198. struct BacktraceBundle bundle;
  199. int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
  200. int one_ago, jrp_reg;
  201. bool has_jrp;
  202. if (next_bundle >= num_bundles_prefetched) {
  203. /* Prefetch some bytes, but don't cross a page
  204. * boundary since that might cause a read failure we
  205. * don't care about if we only need the first few
  206. * bytes. Note: we don't care what the actual page
  207. * size is; using the minimum possible page size will
  208. * prevent any problems.
  209. */
  210. unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
  211. if (bytes_to_prefetch > sizeof prefetched_bundles)
  212. bytes_to_prefetch = sizeof prefetched_bundles;
  213. if (!read_memory_func(prefetched_bundles, pc,
  214. bytes_to_prefetch,
  215. read_memory_func_extra)) {
  216. if (pc == start_pc) {
  217. /* The program probably called a bad
  218. * address, such as a NULL pointer.
  219. * So treat this as if we are at the
  220. * start of the function prolog so the
  221. * backtrace will show how we got here.
  222. */
  223. location->pc_location = PC_LOC_IN_LR;
  224. return;
  225. }
  226. /* Unreadable address. Give up. */
  227. break;
  228. }
  229. next_bundle = 0;
  230. num_bundles_prefetched =
  231. bytes_to_prefetch / sizeof(tile_bundle_bits);
  232. }
  233. /* Decode the next bundle. */
  234. bundle.bits = prefetched_bundles[next_bundle++];
  235. bundle.num_insns =
  236. parse_insn_tile(bundle.bits, pc, bundle.insns);
  237. num_info_ops = bt_get_info_ops(&bundle, info_operands);
  238. /* First look at any one_ago info ops if they are interesting,
  239. * since they should shadow any non-one-ago info ops.
  240. */
  241. for (one_ago = (pc != start_pc) ? 1 : 0;
  242. one_ago >= 0; one_ago--) {
  243. int i;
  244. for (i = 0; i < num_info_ops; i++) {
  245. int info_operand = info_operands[i];
  246. if (info_operand < CALLER_UNKNOWN_BASE) {
  247. /* Weird; reserved value, ignore it. */
  248. continue;
  249. }
  250. /* Skip info ops which are not in the
  251. * "one_ago" mode we want right now.
  252. */
  253. if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
  254. != (one_ago != 0))
  255. continue;
  256. /* Clear the flag to make later checking
  257. * easier. */
  258. info_operand &= ~ONE_BUNDLE_AGO_FLAG;
  259. /* Default to looking at PC_IN_LR_FLAG. */
  260. if (info_operand & PC_IN_LR_FLAG)
  261. location->pc_location =
  262. PC_LOC_IN_LR;
  263. else
  264. location->pc_location =
  265. PC_LOC_ON_STACK;
  266. switch (info_operand) {
  267. case CALLER_UNKNOWN_BASE:
  268. location->pc_location = PC_LOC_UNKNOWN;
  269. location->sp_location = SP_LOC_UNKNOWN;
  270. return;
  271. case CALLER_SP_IN_R52_BASE:
  272. case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
  273. location->sp_location = SP_LOC_IN_R52;
  274. return;
  275. default:
  276. {
  277. const unsigned int val = info_operand
  278. - CALLER_SP_OFFSET_BASE;
  279. const unsigned int sp_offset =
  280. (val >> NUM_INFO_OP_FLAGS) * 8;
  281. if (sp_offset < 32768) {
  282. /* This is a properly encoded
  283. * SP offset. */
  284. location->sp_location =
  285. SP_LOC_OFFSET;
  286. location->sp_offset =
  287. sp_offset;
  288. return;
  289. } else {
  290. /* This looked like an SP
  291. * offset, but it's outside
  292. * the legal range, so this
  293. * must be an unrecognized
  294. * info operand. Ignore it.
  295. */
  296. }
  297. }
  298. break;
  299. }
  300. }
  301. }
  302. if (seen_terminating_bundle) {
  303. /* We saw a terminating bundle during the previous
  304. * iteration, so we were only looking for an info op.
  305. */
  306. break;
  307. }
  308. if (bundle.bits == 0) {
  309. /* Wacky terminating bundle. Stop looping, and hope
  310. * we've already seen enough to find the caller.
  311. */
  312. break;
  313. }
  314. /*
  315. * Try to determine caller's SP.
  316. */
  317. if (!sp_determined) {
  318. int adjust;
  319. if (bt_has_addi_sp(&bundle, &adjust)) {
  320. location->sp_location = SP_LOC_OFFSET;
  321. if (adjust <= 0) {
  322. /* We are in prolog about to adjust
  323. * SP. */
  324. location->sp_offset = 0;
  325. } else {
  326. /* We are in epilog restoring SP. */
  327. location->sp_offset = adjust;
  328. }
  329. sp_determined = true;
  330. } else {
  331. if (bt_has_move_r52_sp(&bundle)) {
  332. /* Maybe in prolog, creating an
  333. * alloca-style frame. But maybe in
  334. * the middle of a fixed-size frame
  335. * clobbering r52 with SP.
  336. */
  337. sp_moved_to_r52 = true;
  338. }
  339. if (bt_modifies_sp(&bundle)) {
  340. if (sp_moved_to_r52) {
  341. /* We saw SP get saved into
  342. * r52 earlier (or now), which
  343. * must have been in the
  344. * prolog, so we now know that
  345. * SP is still holding the
  346. * caller's sp value.
  347. */
  348. location->sp_location =
  349. SP_LOC_OFFSET;
  350. location->sp_offset = 0;
  351. } else {
  352. /* Someone must have saved
  353. * aside the caller's SP value
  354. * into r52, so r52 holds the
  355. * current value.
  356. */
  357. location->sp_location =
  358. SP_LOC_IN_R52;
  359. }
  360. sp_determined = true;
  361. }
  362. }
  363. }
  364. if (bt_has_iret(&bundle)) {
  365. /* This is a terminating bundle. */
  366. seen_terminating_bundle = true;
  367. continue;
  368. }
  369. /*
  370. * Try to determine caller's PC.
  371. */
  372. jrp_reg = -1;
  373. has_jrp = bt_has_jrp(&bundle, &jrp_reg);
  374. if (has_jrp)
  375. seen_terminating_bundle = true;
  376. if (location->pc_location == PC_LOC_UNKNOWN) {
  377. if (has_jrp) {
  378. if (jrp_reg == TREG_LR && !lr_modified) {
  379. /* Looks like a leaf function, or else
  380. * lr is already restored. */
  381. location->pc_location =
  382. PC_LOC_IN_LR;
  383. } else {
  384. location->pc_location =
  385. PC_LOC_ON_STACK;
  386. }
  387. } else if (bt_has_sw_sp_lr(&bundle)) {
  388. /* In prolog, spilling initial lr to stack. */
  389. location->pc_location = PC_LOC_IN_LR;
  390. } else if (bt_modifies_lr(&bundle)) {
  391. lr_modified = true;
  392. }
  393. }
  394. }
  395. }
  396. void backtrace_init(BacktraceIterator *state,
  397. BacktraceMemoryReader read_memory_func,
  398. void *read_memory_func_extra,
  399. VirtualAddress pc, VirtualAddress lr,
  400. VirtualAddress sp, VirtualAddress r52)
  401. {
  402. CallerLocation location;
  403. VirtualAddress fp, initial_frame_caller_pc;
  404. if (read_memory_func == NULL) {
  405. read_memory_func = bt_read_memory;
  406. }
  407. /* Find out where we are in the initial frame. */
  408. find_caller_pc_and_caller_sp(&location, pc,
  409. read_memory_func, read_memory_func_extra);
  410. switch (location.sp_location) {
  411. case SP_LOC_UNKNOWN:
  412. /* Give up. */
  413. fp = -1;
  414. break;
  415. case SP_LOC_IN_R52:
  416. fp = r52;
  417. break;
  418. case SP_LOC_OFFSET:
  419. fp = sp + location.sp_offset;
  420. break;
  421. default:
  422. /* Give up. */
  423. fp = -1;
  424. break;
  425. }
  426. /* The frame pointer should theoretically be aligned mod 8. If
  427. * it's not even aligned mod 4 then something terrible happened
  428. * and we should mark it as invalid.
  429. */
  430. if (fp % 4 != 0)
  431. fp = -1;
  432. /* -1 means "don't know initial_frame_caller_pc". */
  433. initial_frame_caller_pc = -1;
  434. switch (location.pc_location) {
  435. case PC_LOC_UNKNOWN:
  436. /* Give up. */
  437. fp = -1;
  438. break;
  439. case PC_LOC_IN_LR:
  440. if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
  441. /* Give up. */
  442. fp = -1;
  443. } else {
  444. initial_frame_caller_pc = lr;
  445. }
  446. break;
  447. case PC_LOC_ON_STACK:
  448. /* Leave initial_frame_caller_pc as -1,
  449. * meaning check the stack.
  450. */
  451. break;
  452. default:
  453. /* Give up. */
  454. fp = -1;
  455. break;
  456. }
  457. state->pc = pc;
  458. state->sp = sp;
  459. state->fp = fp;
  460. state->initial_frame_caller_pc = initial_frame_caller_pc;
  461. state->read_memory_func = read_memory_func;
  462. state->read_memory_func_extra = read_memory_func_extra;
  463. }
  464. bool backtrace_next(BacktraceIterator *state)
  465. {
  466. VirtualAddress next_fp, next_pc, next_frame[2];
  467. if (state->fp == -1) {
  468. /* No parent frame. */
  469. return false;
  470. }
  471. /* Try to read the frame linkage data chaining to the next function. */
  472. if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
  473. state->read_memory_func_extra)) {
  474. return false;
  475. }
  476. next_fp = next_frame[1];
  477. if (next_fp % 4 != 0) {
  478. /* Caller's frame pointer is suspect, so give up.
  479. * Technically it should be aligned mod 8, but we will
  480. * be forgiving here.
  481. */
  482. return false;
  483. }
  484. if (state->initial_frame_caller_pc != -1) {
  485. /* We must be in the initial stack frame and already know the
  486. * caller PC.
  487. */
  488. next_pc = state->initial_frame_caller_pc;
  489. /* Force reading stack next time, in case we were in the
  490. * initial frame. We don't do this above just to paranoidly
  491. * avoid changing the struct at all when we return false.
  492. */
  493. state->initial_frame_caller_pc = -1;
  494. } else {
  495. /* Get the caller PC from the frame linkage area. */
  496. next_pc = next_frame[0];
  497. if (next_pc == 0 ||
  498. next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
  499. /* The PC is suspect, so give up. */
  500. return false;
  501. }
  502. }
  503. /* Update state to become the caller's stack frame. */
  504. state->pc = next_pc;
  505. state->sp = state->fp;
  506. state->fp = next_fp;
  507. return true;
  508. }
  509. #else /* TILE_CHIP < 10 */
  510. void backtrace_init(BacktraceIterator *state,
  511. BacktraceMemoryReader read_memory_func,
  512. void *read_memory_func_extra,
  513. VirtualAddress pc, VirtualAddress lr,
  514. VirtualAddress sp, VirtualAddress r52)
  515. {
  516. state->pc = pc;
  517. state->sp = sp;
  518. state->fp = -1;
  519. state->initial_frame_caller_pc = -1;
  520. state->read_memory_func = read_memory_func;
  521. state->read_memory_func_extra = read_memory_func_extra;
  522. }
  523. bool backtrace_next(BacktraceIterator *state) { return false; }
  524. #endif /* TILE_CHIP < 10 */