backtrace.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <asm/backtrace.h>
  17. #include <arch/chip.h>
  18. #if TILE_CHIP < 10
  19. #include <asm/opcode-tile.h>
  20. #define TREG_SP 54
  21. #define TREG_LR 55
  22. /** A decoded bundle used for backtracer analysis. */
  23. typedef struct {
  24. tile_bundle_bits bits;
  25. int num_insns;
  26. struct tile_decoded_instruction
  27. insns[TILE_MAX_INSTRUCTIONS_PER_BUNDLE];
  28. } BacktraceBundle;
  29. /* This implementation only makes sense for native tools. */
  30. /** Default function to read memory. */
  31. static bool
  32. bt_read_memory(void *result, VirtualAddress addr, size_t size, void *extra)
  33. {
  34. /* FIXME: this should do some horrible signal stuff to catch
  35. * SEGV cleanly and fail.
  36. *
  37. * Or else the caller should do the setjmp for efficiency.
  38. */
  39. memcpy(result, (const void *)addr, size);
  40. return true;
  41. }
  42. /** Locates an instruction inside the given bundle that
  43. * has the specified mnemonic, and whose first 'num_operands_to_match'
  44. * operands exactly match those in 'operand_values'.
  45. */
  46. static const struct tile_decoded_instruction*
  47. find_matching_insn(const BacktraceBundle *bundle,
  48. tile_mnemonic mnemonic,
  49. const int *operand_values,
  50. int num_operands_to_match)
  51. {
  52. int i, j;
  53. bool match;
  54. for (i = 0; i < bundle->num_insns; i++) {
  55. const struct tile_decoded_instruction *insn =
  56. &bundle->insns[i];
  57. if (insn->opcode->mnemonic != mnemonic)
  58. continue;
  59. match = true;
  60. for (j = 0; j < num_operands_to_match; j++) {
  61. if (operand_values[j] != insn->operand_values[j]) {
  62. match = false;
  63. break;
  64. }
  65. }
  66. if (match)
  67. return insn;
  68. }
  69. return NULL;
  70. }
  71. /** Does this bundle contain an 'iret' instruction? */
  72. static inline bool
  73. bt_has_iret(const BacktraceBundle *bundle)
  74. {
  75. return find_matching_insn(bundle, TILE_OPC_IRET, NULL, 0) != NULL;
  76. }
  77. /** Does this bundle contain an 'addi sp, sp, OFFSET' or
  78. * 'addli sp, sp, OFFSET' instruction, and if so, what is OFFSET?
  79. */
  80. static bool
  81. bt_has_addi_sp(const BacktraceBundle *bundle, int *adjust)
  82. {
  83. static const int vals[2] = { TREG_SP, TREG_SP };
  84. const struct tile_decoded_instruction *insn =
  85. find_matching_insn(bundle, TILE_OPC_ADDI, vals, 2);
  86. if (insn == NULL)
  87. insn = find_matching_insn(bundle, TILE_OPC_ADDLI, vals, 2);
  88. if (insn == NULL)
  89. return false;
  90. *adjust = insn->operand_values[2];
  91. return true;
  92. }
  93. /** Does this bundle contain any 'info OP' or 'infol OP'
  94. * instruction, and if so, what are their OP? Note that OP is interpreted
  95. * as an unsigned value by this code since that's what the caller wants.
  96. * Returns the number of info ops found.
  97. */
  98. static int
  99. bt_get_info_ops(const BacktraceBundle *bundle,
  100. int operands[MAX_INFO_OPS_PER_BUNDLE])
  101. {
  102. int num_ops = 0;
  103. int i;
  104. for (i = 0; i < bundle->num_insns; i++) {
  105. const struct tile_decoded_instruction *insn =
  106. &bundle->insns[i];
  107. if (insn->opcode->mnemonic == TILE_OPC_INFO ||
  108. insn->opcode->mnemonic == TILE_OPC_INFOL) {
  109. operands[num_ops++] = insn->operand_values[0];
  110. }
  111. }
  112. return num_ops;
  113. }
  114. /** Does this bundle contain a jrp instruction, and if so, to which
  115. * register is it jumping?
  116. */
  117. static bool
  118. bt_has_jrp(const BacktraceBundle *bundle, int *target_reg)
  119. {
  120. const struct tile_decoded_instruction *insn =
  121. find_matching_insn(bundle, TILE_OPC_JRP, NULL, 0);
  122. if (insn == NULL)
  123. return false;
  124. *target_reg = insn->operand_values[0];
  125. return true;
  126. }
  127. /** Does this bundle modify the specified register in any way? */
  128. static bool
  129. bt_modifies_reg(const BacktraceBundle *bundle, int reg)
  130. {
  131. int i, j;
  132. for (i = 0; i < bundle->num_insns; i++) {
  133. const struct tile_decoded_instruction *insn =
  134. &bundle->insns[i];
  135. if (insn->opcode->implicitly_written_register == reg)
  136. return true;
  137. for (j = 0; j < insn->opcode->num_operands; j++)
  138. if (insn->operands[j]->is_dest_reg &&
  139. insn->operand_values[j] == reg)
  140. return true;
  141. }
  142. return false;
  143. }
  144. /** Does this bundle modify sp? */
  145. static inline bool
  146. bt_modifies_sp(const BacktraceBundle *bundle)
  147. {
  148. return bt_modifies_reg(bundle, TREG_SP);
  149. }
  150. /** Does this bundle modify lr? */
  151. static inline bool
  152. bt_modifies_lr(const BacktraceBundle *bundle)
  153. {
  154. return bt_modifies_reg(bundle, TREG_LR);
  155. }
  156. /** Does this bundle contain the instruction 'move fp, sp'? */
  157. static inline bool
  158. bt_has_move_r52_sp(const BacktraceBundle *bundle)
  159. {
  160. static const int vals[2] = { 52, TREG_SP };
  161. return find_matching_insn(bundle, TILE_OPC_MOVE, vals, 2) != NULL;
  162. }
  163. /** Does this bundle contain the instruction 'sw sp, lr'? */
  164. static inline bool
  165. bt_has_sw_sp_lr(const BacktraceBundle *bundle)
  166. {
  167. static const int vals[2] = { TREG_SP, TREG_LR };
  168. return find_matching_insn(bundle, TILE_OPC_SW, vals, 2) != NULL;
  169. }
  170. /** Locates the caller's PC and SP for a program starting at the
  171. * given address.
  172. */
  173. static void
  174. find_caller_pc_and_caller_sp(CallerLocation *location,
  175. const VirtualAddress start_pc,
  176. BacktraceMemoryReader read_memory_func,
  177. void *read_memory_func_extra)
  178. {
  179. /* Have we explicitly decided what the sp is,
  180. * rather than just the default?
  181. */
  182. bool sp_determined = false;
  183. /* Has any bundle seen so far modified lr? */
  184. bool lr_modified = false;
  185. /* Have we seen a move from sp to fp? */
  186. bool sp_moved_to_r52 = false;
  187. /* Have we seen a terminating bundle? */
  188. bool seen_terminating_bundle = false;
  189. /* Cut down on round-trip reading overhead by reading several
  190. * bundles at a time.
  191. */
  192. tile_bundle_bits prefetched_bundles[32];
  193. int num_bundles_prefetched = 0;
  194. int next_bundle = 0;
  195. VirtualAddress pc;
  196. /* Default to assuming that the caller's sp is the current sp.
  197. * This is necessary to handle the case where we start backtracing
  198. * right at the end of the epilog.
  199. */
  200. location->sp_location = SP_LOC_OFFSET;
  201. location->sp_offset = 0;
  202. /* Default to having no idea where the caller PC is. */
  203. location->pc_location = PC_LOC_UNKNOWN;
  204. /* Don't even try if the PC is not aligned. */
  205. if (start_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0)
  206. return;
  207. for (pc = start_pc;; pc += sizeof(tile_bundle_bits)) {
  208. BacktraceBundle bundle;
  209. int num_info_ops, info_operands[MAX_INFO_OPS_PER_BUNDLE];
  210. int one_ago, jrp_reg;
  211. bool has_jrp;
  212. if (next_bundle >= num_bundles_prefetched) {
  213. /* Prefetch some bytes, but don't cross a page
  214. * boundary since that might cause a read failure we
  215. * don't care about if we only need the first few
  216. * bytes. Note: we don't care what the actual page
  217. * size is; using the minimum possible page size will
  218. * prevent any problems.
  219. */
  220. unsigned int bytes_to_prefetch = 4096 - (pc & 4095);
  221. if (bytes_to_prefetch > sizeof prefetched_bundles)
  222. bytes_to_prefetch = sizeof prefetched_bundles;
  223. if (!read_memory_func(prefetched_bundles, pc,
  224. bytes_to_prefetch,
  225. read_memory_func_extra)) {
  226. if (pc == start_pc) {
  227. /* The program probably called a bad
  228. * address, such as a NULL pointer.
  229. * So treat this as if we are at the
  230. * start of the function prolog so the
  231. * backtrace will show how we got here.
  232. */
  233. location->pc_location = PC_LOC_IN_LR;
  234. return;
  235. }
  236. /* Unreadable address. Give up. */
  237. break;
  238. }
  239. next_bundle = 0;
  240. num_bundles_prefetched =
  241. bytes_to_prefetch / sizeof(tile_bundle_bits);
  242. }
  243. /* Decode the next bundle. */
  244. bundle.bits = prefetched_bundles[next_bundle++];
  245. bundle.num_insns =
  246. parse_insn_tile(bundle.bits, pc, bundle.insns);
  247. num_info_ops = bt_get_info_ops(&bundle, info_operands);
  248. /* First look at any one_ago info ops if they are interesting,
  249. * since they should shadow any non-one-ago info ops.
  250. */
  251. for (one_ago = (pc != start_pc) ? 1 : 0;
  252. one_ago >= 0; one_ago--) {
  253. int i;
  254. for (i = 0; i < num_info_ops; i++) {
  255. int info_operand = info_operands[i];
  256. if (info_operand < CALLER_UNKNOWN_BASE) {
  257. /* Weird; reserved value, ignore it. */
  258. continue;
  259. }
  260. /* Skip info ops which are not in the
  261. * "one_ago" mode we want right now.
  262. */
  263. if (((info_operand & ONE_BUNDLE_AGO_FLAG) != 0)
  264. != (one_ago != 0))
  265. continue;
  266. /* Clear the flag to make later checking
  267. * easier. */
  268. info_operand &= ~ONE_BUNDLE_AGO_FLAG;
  269. /* Default to looking at PC_IN_LR_FLAG. */
  270. if (info_operand & PC_IN_LR_FLAG)
  271. location->pc_location =
  272. PC_LOC_IN_LR;
  273. else
  274. location->pc_location =
  275. PC_LOC_ON_STACK;
  276. switch (info_operand) {
  277. case CALLER_UNKNOWN_BASE:
  278. location->pc_location = PC_LOC_UNKNOWN;
  279. location->sp_location = SP_LOC_UNKNOWN;
  280. return;
  281. case CALLER_SP_IN_R52_BASE:
  282. case CALLER_SP_IN_R52_BASE | PC_IN_LR_FLAG:
  283. location->sp_location = SP_LOC_IN_R52;
  284. return;
  285. default:
  286. {
  287. const unsigned int val = info_operand
  288. - CALLER_SP_OFFSET_BASE;
  289. const unsigned int sp_offset =
  290. (val >> NUM_INFO_OP_FLAGS) * 8;
  291. if (sp_offset < 32768) {
  292. /* This is a properly encoded
  293. * SP offset. */
  294. location->sp_location =
  295. SP_LOC_OFFSET;
  296. location->sp_offset =
  297. sp_offset;
  298. return;
  299. } else {
  300. /* This looked like an SP
  301. * offset, but it's outside
  302. * the legal range, so this
  303. * must be an unrecognized
  304. * info operand. Ignore it.
  305. */
  306. }
  307. }
  308. break;
  309. }
  310. }
  311. }
  312. if (seen_terminating_bundle) {
  313. /* We saw a terminating bundle during the previous
  314. * iteration, so we were only looking for an info op.
  315. */
  316. break;
  317. }
  318. if (bundle.bits == 0) {
  319. /* Wacky terminating bundle. Stop looping, and hope
  320. * we've already seen enough to find the caller.
  321. */
  322. break;
  323. }
  324. /*
  325. * Try to determine caller's SP.
  326. */
  327. if (!sp_determined) {
  328. int adjust;
  329. if (bt_has_addi_sp(&bundle, &adjust)) {
  330. location->sp_location = SP_LOC_OFFSET;
  331. if (adjust <= 0) {
  332. /* We are in prolog about to adjust
  333. * SP. */
  334. location->sp_offset = 0;
  335. } else {
  336. /* We are in epilog restoring SP. */
  337. location->sp_offset = adjust;
  338. }
  339. sp_determined = true;
  340. } else {
  341. if (bt_has_move_r52_sp(&bundle)) {
  342. /* Maybe in prolog, creating an
  343. * alloca-style frame. But maybe in
  344. * the middle of a fixed-size frame
  345. * clobbering r52 with SP.
  346. */
  347. sp_moved_to_r52 = true;
  348. }
  349. if (bt_modifies_sp(&bundle)) {
  350. if (sp_moved_to_r52) {
  351. /* We saw SP get saved into
  352. * r52 earlier (or now), which
  353. * must have been in the
  354. * prolog, so we now know that
  355. * SP is still holding the
  356. * caller's sp value.
  357. */
  358. location->sp_location =
  359. SP_LOC_OFFSET;
  360. location->sp_offset = 0;
  361. } else {
  362. /* Someone must have saved
  363. * aside the caller's SP value
  364. * into r52, so r52 holds the
  365. * current value.
  366. */
  367. location->sp_location =
  368. SP_LOC_IN_R52;
  369. }
  370. sp_determined = true;
  371. }
  372. }
  373. }
  374. if (bt_has_iret(&bundle)) {
  375. /* This is a terminating bundle. */
  376. seen_terminating_bundle = true;
  377. continue;
  378. }
  379. /*
  380. * Try to determine caller's PC.
  381. */
  382. jrp_reg = -1;
  383. has_jrp = bt_has_jrp(&bundle, &jrp_reg);
  384. if (has_jrp)
  385. seen_terminating_bundle = true;
  386. if (location->pc_location == PC_LOC_UNKNOWN) {
  387. if (has_jrp) {
  388. if (jrp_reg == TREG_LR && !lr_modified) {
  389. /* Looks like a leaf function, or else
  390. * lr is already restored. */
  391. location->pc_location =
  392. PC_LOC_IN_LR;
  393. } else {
  394. location->pc_location =
  395. PC_LOC_ON_STACK;
  396. }
  397. } else if (bt_has_sw_sp_lr(&bundle)) {
  398. /* In prolog, spilling initial lr to stack. */
  399. location->pc_location = PC_LOC_IN_LR;
  400. } else if (bt_modifies_lr(&bundle)) {
  401. lr_modified = true;
  402. }
  403. }
  404. }
  405. }
  406. void
  407. backtrace_init(BacktraceIterator *state,
  408. BacktraceMemoryReader read_memory_func,
  409. void *read_memory_func_extra,
  410. VirtualAddress pc, VirtualAddress lr,
  411. VirtualAddress sp, VirtualAddress r52)
  412. {
  413. CallerLocation location;
  414. VirtualAddress fp, initial_frame_caller_pc;
  415. if (read_memory_func == NULL) {
  416. read_memory_func = bt_read_memory;
  417. }
  418. /* Find out where we are in the initial frame. */
  419. find_caller_pc_and_caller_sp(&location, pc,
  420. read_memory_func, read_memory_func_extra);
  421. switch (location.sp_location) {
  422. case SP_LOC_UNKNOWN:
  423. /* Give up. */
  424. fp = -1;
  425. break;
  426. case SP_LOC_IN_R52:
  427. fp = r52;
  428. break;
  429. case SP_LOC_OFFSET:
  430. fp = sp + location.sp_offset;
  431. break;
  432. default:
  433. /* Give up. */
  434. fp = -1;
  435. break;
  436. }
  437. /* The frame pointer should theoretically be aligned mod 8. If
  438. * it's not even aligned mod 4 then something terrible happened
  439. * and we should mark it as invalid.
  440. */
  441. if (fp % 4 != 0)
  442. fp = -1;
  443. /* -1 means "don't know initial_frame_caller_pc". */
  444. initial_frame_caller_pc = -1;
  445. switch (location.pc_location) {
  446. case PC_LOC_UNKNOWN:
  447. /* Give up. */
  448. fp = -1;
  449. break;
  450. case PC_LOC_IN_LR:
  451. if (lr == 0 || lr % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
  452. /* Give up. */
  453. fp = -1;
  454. } else {
  455. initial_frame_caller_pc = lr;
  456. }
  457. break;
  458. case PC_LOC_ON_STACK:
  459. /* Leave initial_frame_caller_pc as -1,
  460. * meaning check the stack.
  461. */
  462. break;
  463. default:
  464. /* Give up. */
  465. fp = -1;
  466. break;
  467. }
  468. state->pc = pc;
  469. state->sp = sp;
  470. state->fp = fp;
  471. state->initial_frame_caller_pc = initial_frame_caller_pc;
  472. state->read_memory_func = read_memory_func;
  473. state->read_memory_func_extra = read_memory_func_extra;
  474. }
  475. bool
  476. backtrace_next(BacktraceIterator *state)
  477. {
  478. VirtualAddress next_fp, next_pc, next_frame[2];
  479. if (state->fp == -1) {
  480. /* No parent frame. */
  481. return false;
  482. }
  483. /* Try to read the frame linkage data chaining to the next function. */
  484. if (!state->read_memory_func(&next_frame, state->fp, sizeof next_frame,
  485. state->read_memory_func_extra)) {
  486. return false;
  487. }
  488. next_fp = next_frame[1];
  489. if (next_fp % 4 != 0) {
  490. /* Caller's frame pointer is suspect, so give up.
  491. * Technically it should be aligned mod 8, but we will
  492. * be forgiving here.
  493. */
  494. return false;
  495. }
  496. if (state->initial_frame_caller_pc != -1) {
  497. /* We must be in the initial stack frame and already know the
  498. * caller PC.
  499. */
  500. next_pc = state->initial_frame_caller_pc;
  501. /* Force reading stack next time, in case we were in the
  502. * initial frame. We don't do this above just to paranoidly
  503. * avoid changing the struct at all when we return false.
  504. */
  505. state->initial_frame_caller_pc = -1;
  506. } else {
  507. /* Get the caller PC from the frame linkage area. */
  508. next_pc = next_frame[0];
  509. if (next_pc == 0 ||
  510. next_pc % TILE_BUNDLE_ALIGNMENT_IN_BYTES != 0) {
  511. /* The PC is suspect, so give up. */
  512. return false;
  513. }
  514. }
  515. /* Update state to become the caller's stack frame. */
  516. state->pc = next_pc;
  517. state->sp = state->fp;
  518. state->fp = next_fp;
  519. return true;
  520. }
  521. #else /* TILE_CHIP < 10 */
  522. void
  523. backtrace_init(BacktraceIterator *state,
  524. BacktraceMemoryReader read_memory_func,
  525. void *read_memory_func_extra,
  526. VirtualAddress pc, VirtualAddress lr,
  527. VirtualAddress sp, VirtualAddress r52)
  528. {
  529. state->pc = pc;
  530. state->sp = sp;
  531. state->fp = -1;
  532. state->initial_frame_caller_pc = -1;
  533. state->read_memory_func = read_memory_func;
  534. state->read_memory_func_extra = read_memory_func_extra;
  535. }
  536. bool backtrace_next(BacktraceIterator *state) { return false; }
  537. #endif /* TILE_CHIP < 10 */