unwind.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184
  1. /*
  2. * Copyright (C) 2002-2006 Novell, Inc.
  3. * Jan Beulich <jbeulich@novell.com>
  4. * This code is released under version 2 of the GNU GPL.
  5. *
  6. * A simple API for unwinding kernel stacks. This is used for
  7. * debugging and error reporting purposes. The kernel doesn't need
  8. * full-blown stack unwinding with all the bells and whistles, so there
  9. * is not much point in implementing the full Dwarf2 unwind API.
  10. */
  11. #include <linux/unwind.h>
  12. #include <linux/module.h>
  13. #include <linux/bootmem.h>
  14. #include <linux/sort.h>
  15. #include <linux/stop_machine.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/sections.h>
  18. #include <asm/uaccess.h>
  19. #include <asm/unaligned.h>
  20. extern char __start_unwind[], __end_unwind[];
  21. extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];
  22. #define MAX_STACK_DEPTH 8
  23. #define EXTRA_INFO(f) { \
  24. BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
  25. % FIELD_SIZEOF(struct unwind_frame_info, f)) \
  26. + offsetof(struct unwind_frame_info, f) \
  27. / FIELD_SIZEOF(struct unwind_frame_info, f), \
  28. FIELD_SIZEOF(struct unwind_frame_info, f) \
  29. }
  30. #define PTREGS_INFO(f) EXTRA_INFO(regs.f)
  31. static const struct {
  32. unsigned offs:BITS_PER_LONG / 2;
  33. unsigned width:BITS_PER_LONG / 2;
  34. } reg_info[] = {
  35. UNW_REGISTER_INFO
  36. };
  37. #undef PTREGS_INFO
  38. #undef EXTRA_INFO
  39. #ifndef REG_INVALID
  40. #define REG_INVALID(r) (reg_info[r].width == 0)
  41. #endif
  42. #define DW_CFA_nop 0x00
  43. #define DW_CFA_set_loc 0x01
  44. #define DW_CFA_advance_loc1 0x02
  45. #define DW_CFA_advance_loc2 0x03
  46. #define DW_CFA_advance_loc4 0x04
  47. #define DW_CFA_offset_extended 0x05
  48. #define DW_CFA_restore_extended 0x06
  49. #define DW_CFA_undefined 0x07
  50. #define DW_CFA_same_value 0x08
  51. #define DW_CFA_register 0x09
  52. #define DW_CFA_remember_state 0x0a
  53. #define DW_CFA_restore_state 0x0b
  54. #define DW_CFA_def_cfa 0x0c
  55. #define DW_CFA_def_cfa_register 0x0d
  56. #define DW_CFA_def_cfa_offset 0x0e
  57. #define DW_CFA_def_cfa_expression 0x0f
  58. #define DW_CFA_expression 0x10
  59. #define DW_CFA_offset_extended_sf 0x11
  60. #define DW_CFA_def_cfa_sf 0x12
  61. #define DW_CFA_def_cfa_offset_sf 0x13
  62. #define DW_CFA_val_offset 0x14
  63. #define DW_CFA_val_offset_sf 0x15
  64. #define DW_CFA_val_expression 0x16
  65. #define DW_CFA_lo_user 0x1c
  66. #define DW_CFA_GNU_window_save 0x2d
  67. #define DW_CFA_GNU_args_size 0x2e
  68. #define DW_CFA_GNU_negative_offset_extended 0x2f
  69. #define DW_CFA_hi_user 0x3f
  70. #define DW_EH_PE_FORM 0x07
  71. #define DW_EH_PE_native 0x00
  72. #define DW_EH_PE_leb128 0x01
  73. #define DW_EH_PE_data2 0x02
  74. #define DW_EH_PE_data4 0x03
  75. #define DW_EH_PE_data8 0x04
  76. #define DW_EH_PE_signed 0x08
  77. #define DW_EH_PE_ADJUST 0x70
  78. #define DW_EH_PE_abs 0x00
  79. #define DW_EH_PE_pcrel 0x10
  80. #define DW_EH_PE_textrel 0x20
  81. #define DW_EH_PE_datarel 0x30
  82. #define DW_EH_PE_funcrel 0x40
  83. #define DW_EH_PE_aligned 0x50
  84. #define DW_EH_PE_indirect 0x80
  85. #define DW_EH_PE_omit 0xff
  86. typedef unsigned long uleb128_t;
  87. typedef signed long sleb128_t;
  88. static struct unwind_table {
  89. struct {
  90. unsigned long pc;
  91. unsigned long range;
  92. } core, init;
  93. const void *address;
  94. unsigned long size;
  95. const unsigned char *header;
  96. unsigned long hdrsz;
  97. struct unwind_table *link;
  98. const char *name;
  99. } root_table;
  100. struct unwind_item {
  101. enum item_location {
  102. Nowhere,
  103. Memory,
  104. Register,
  105. Value
  106. } where;
  107. uleb128_t value;
  108. };
  109. struct unwind_state {
  110. uleb128_t loc, org;
  111. const u8 *cieStart, *cieEnd;
  112. uleb128_t codeAlign;
  113. sleb128_t dataAlign;
  114. struct cfa {
  115. uleb128_t reg, offs;
  116. } cfa;
  117. struct unwind_item regs[ARRAY_SIZE(reg_info)];
  118. unsigned stackDepth:8;
  119. unsigned version:8;
  120. const u8 *label;
  121. const u8 *stack[MAX_STACK_DEPTH];
  122. };
  123. static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
  124. static struct unwind_table *find_table(unsigned long pc)
  125. {
  126. struct unwind_table *table;
  127. for (table = &root_table; table; table = table->link)
  128. if ((pc >= table->core.pc
  129. && pc < table->core.pc + table->core.range)
  130. || (pc >= table->init.pc
  131. && pc < table->init.pc + table->init.range))
  132. break;
  133. return table;
  134. }
  135. static unsigned long read_pointer(const u8 **pLoc,
  136. const void *end,
  137. signed ptrType);
  138. static void init_unwind_table(struct unwind_table *table,
  139. const char *name,
  140. const void *core_start,
  141. unsigned long core_size,
  142. const void *init_start,
  143. unsigned long init_size,
  144. const void *table_start,
  145. unsigned long table_size,
  146. const u8 *header_start,
  147. unsigned long header_size)
  148. {
  149. const u8 *ptr = header_start + 4;
  150. const u8 *end = header_start + header_size;
  151. table->core.pc = (unsigned long)core_start;
  152. table->core.range = core_size;
  153. table->init.pc = (unsigned long)init_start;
  154. table->init.range = init_size;
  155. table->address = table_start;
  156. table->size = table_size;
  157. /* See if the linker provided table looks valid. */
  158. if (header_size <= 4
  159. || header_start[0] != 1
  160. || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
  161. || header_start[2] == DW_EH_PE_omit
  162. || read_pointer(&ptr, end, header_start[2]) <= 0
  163. || header_start[3] == DW_EH_PE_omit)
  164. header_start = NULL;
  165. table->hdrsz = header_size;
  166. smp_wmb();
  167. table->header = header_start;
  168. table->link = NULL;
  169. table->name = name;
  170. }
  171. void __init unwind_init(void)
  172. {
  173. init_unwind_table(&root_table, "kernel",
  174. _text, _end - _text,
  175. NULL, 0,
  176. __start_unwind, __end_unwind - __start_unwind,
  177. __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);
  178. }
  179. static const u32 bad_cie, not_fde;
  180. static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
  181. static signed fde_pointer_type(const u32 *cie);
  182. struct eh_frame_hdr_table_entry {
  183. unsigned long start, fde;
  184. };
  185. static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
  186. {
  187. const struct eh_frame_hdr_table_entry *e1 = p1;
  188. const struct eh_frame_hdr_table_entry *e2 = p2;
  189. return (e1->start > e2->start) - (e1->start < e2->start);
  190. }
  191. static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
  192. {
  193. struct eh_frame_hdr_table_entry *e1 = p1;
  194. struct eh_frame_hdr_table_entry *e2 = p2;
  195. unsigned long v;
  196. v = e1->start;
  197. e1->start = e2->start;
  198. e2->start = v;
  199. v = e1->fde;
  200. e1->fde = e2->fde;
  201. e2->fde = v;
  202. }
  203. static void __init setup_unwind_table(struct unwind_table *table,
  204. void *(*alloc)(unsigned long))
  205. {
  206. const u8 *ptr;
  207. unsigned long tableSize = table->size, hdrSize;
  208. unsigned n;
  209. const u32 *fde;
  210. struct {
  211. u8 version;
  212. u8 eh_frame_ptr_enc;
  213. u8 fde_count_enc;
  214. u8 table_enc;
  215. unsigned long eh_frame_ptr;
  216. unsigned int fde_count;
  217. struct eh_frame_hdr_table_entry table[];
  218. } __attribute__((__packed__)) *header;
  219. if (table->header)
  220. return;
  221. if (table->hdrsz)
  222. printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n",
  223. table->name);
  224. if (tableSize & (sizeof(*fde) - 1))
  225. return;
  226. for (fde = table->address, n = 0;
  227. tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
  228. tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
  229. const u32 *cie = cie_for_fde(fde, table);
  230. signed ptrType;
  231. if (cie == &not_fde)
  232. continue;
  233. if (cie == NULL
  234. || cie == &bad_cie
  235. || (ptrType = fde_pointer_type(cie)) < 0)
  236. return;
  237. ptr = (const u8 *)(fde + 2);
  238. if (!read_pointer(&ptr,
  239. (const u8 *)(fde + 1) + *fde,
  240. ptrType))
  241. return;
  242. ++n;
  243. }
  244. if (tableSize || !n)
  245. return;
  246. hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
  247. + 2 * n * sizeof(unsigned long);
  248. header = alloc(hdrSize);
  249. if (!header)
  250. return;
  251. header->version = 1;
  252. header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native;
  253. header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4;
  254. header->table_enc = DW_EH_PE_abs|DW_EH_PE_native;
  255. put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
  256. BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
  257. % __alignof(typeof(header->fde_count)));
  258. header->fde_count = n;
  259. BUILD_BUG_ON(offsetof(typeof(*header), table)
  260. % __alignof(typeof(*header->table)));
  261. for (fde = table->address, tableSize = table->size, n = 0;
  262. tableSize;
  263. tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
  264. const u32 *cie = fde + 1 - fde[1] / sizeof(*fde);
  265. if (!fde[1])
  266. continue; /* this is a CIE */
  267. ptr = (const u8 *)(fde + 2);
  268. header->table[n].start = read_pointer(&ptr,
  269. (const u8 *)(fde + 1) + *fde,
  270. fde_pointer_type(cie));
  271. header->table[n].fde = (unsigned long)fde;
  272. ++n;
  273. }
  274. WARN_ON(n != header->fde_count);
  275. sort(header->table,
  276. n,
  277. sizeof(*header->table),
  278. cmp_eh_frame_hdr_table_entries,
  279. swap_eh_frame_hdr_table_entries);
  280. table->hdrsz = hdrSize;
  281. smp_wmb();
  282. table->header = (const void *)header;
  283. }
  284. static void *__init balloc(unsigned long sz)
  285. {
  286. return __alloc_bootmem_nopanic(sz,
  287. sizeof(unsigned int),
  288. __pa(MAX_DMA_ADDRESS));
  289. }
  290. void __init unwind_setup(void)
  291. {
  292. setup_unwind_table(&root_table, balloc);
  293. }
  294. #ifdef CONFIG_MODULES
  295. static struct unwind_table *last_table;
  296. /* Must be called with module_mutex held. */
  297. void *unwind_add_table(struct module *module,
  298. const void *table_start,
  299. unsigned long table_size)
  300. {
  301. struct unwind_table *table;
  302. if (table_size <= 0)
  303. return NULL;
  304. table = kmalloc(sizeof(*table), GFP_KERNEL);
  305. if (!table)
  306. return NULL;
  307. init_unwind_table(table, module->name,
  308. module->module_core, module->core_size,
  309. module->module_init, module->init_size,
  310. table_start, table_size,
  311. NULL, 0);
  312. if (last_table)
  313. last_table->link = table;
  314. else
  315. root_table.link = table;
  316. last_table = table;
  317. return table;
  318. }
  319. struct unlink_table_info
  320. {
  321. struct unwind_table *table;
  322. int init_only;
  323. };
  324. static int unlink_table(void *arg)
  325. {
  326. struct unlink_table_info *info = arg;
  327. struct unwind_table *table = info->table, *prev;
  328. for (prev = &root_table; prev->link && prev->link != table; prev = prev->link)
  329. ;
  330. if (prev->link) {
  331. if (info->init_only) {
  332. table->init.pc = 0;
  333. table->init.range = 0;
  334. info->table = NULL;
  335. } else {
  336. prev->link = table->link;
  337. if (!prev->link)
  338. last_table = prev;
  339. }
  340. } else
  341. info->table = NULL;
  342. return 0;
  343. }
  344. /* Must be called with module_mutex held. */
  345. void unwind_remove_table(void *handle, int init_only)
  346. {
  347. struct unwind_table *table = handle;
  348. struct unlink_table_info info;
  349. if (!table || table == &root_table)
  350. return;
  351. if (init_only && table == last_table) {
  352. table->init.pc = 0;
  353. table->init.range = 0;
  354. return;
  355. }
  356. info.table = table;
  357. info.init_only = init_only;
  358. stop_machine_run(unlink_table, &info, NR_CPUS);
  359. if (info.table)
  360. kfree(table);
  361. }
  362. #endif /* CONFIG_MODULES */
  363. static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
  364. {
  365. const u8 *cur = *pcur;
  366. uleb128_t value;
  367. unsigned shift;
  368. for (shift = 0, value = 0; cur < end; shift += 7) {
  369. if (shift + 7 > 8 * sizeof(value)
  370. && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
  371. cur = end + 1;
  372. break;
  373. }
  374. value |= (uleb128_t)(*cur & 0x7f) << shift;
  375. if (!(*cur++ & 0x80))
  376. break;
  377. }
  378. *pcur = cur;
  379. return value;
  380. }
  381. static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
  382. {
  383. const u8 *cur = *pcur;
  384. sleb128_t value;
  385. unsigned shift;
  386. for (shift = 0, value = 0; cur < end; shift += 7) {
  387. if (shift + 7 > 8 * sizeof(value)
  388. && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
  389. cur = end + 1;
  390. break;
  391. }
  392. value |= (sleb128_t)(*cur & 0x7f) << shift;
  393. if (!(*cur & 0x80)) {
  394. value |= -(*cur++ & 0x40) << shift;
  395. break;
  396. }
  397. }
  398. *pcur = cur;
  399. return value;
  400. }
  401. static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
  402. {
  403. const u32 *cie;
  404. if (!*fde || (*fde & (sizeof(*fde) - 1)))
  405. return &bad_cie;
  406. if (!fde[1])
  407. return &not_fde; /* this is a CIE */
  408. if ((fde[1] & (sizeof(*fde) - 1))
  409. || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address)
  410. return NULL; /* this is not a valid FDE */
  411. cie = fde + 1 - fde[1] / sizeof(*fde);
  412. if (*cie <= sizeof(*cie) + 4
  413. || *cie >= fde[1] - sizeof(*fde)
  414. || (*cie & (sizeof(*cie) - 1))
  415. || cie[1])
  416. return NULL; /* this is not a (valid) CIE */
  417. return cie;
  418. }
  419. static unsigned long read_pointer(const u8 **pLoc,
  420. const void *end,
  421. signed ptrType)
  422. {
  423. unsigned long value = 0;
  424. union {
  425. const u8 *p8;
  426. const u16 *p16u;
  427. const s16 *p16s;
  428. const u32 *p32u;
  429. const s32 *p32s;
  430. const unsigned long *pul;
  431. } ptr;
  432. if (ptrType < 0 || ptrType == DW_EH_PE_omit)
  433. return 0;
  434. ptr.p8 = *pLoc;
  435. switch(ptrType & DW_EH_PE_FORM) {
  436. case DW_EH_PE_data2:
  437. if (end < (const void *)(ptr.p16u + 1))
  438. return 0;
  439. if(ptrType & DW_EH_PE_signed)
  440. value = get_unaligned(ptr.p16s++);
  441. else
  442. value = get_unaligned(ptr.p16u++);
  443. break;
  444. case DW_EH_PE_data4:
  445. #ifdef CONFIG_64BIT
  446. if (end < (const void *)(ptr.p32u + 1))
  447. return 0;
  448. if(ptrType & DW_EH_PE_signed)
  449. value = get_unaligned(ptr.p32s++);
  450. else
  451. value = get_unaligned(ptr.p32u++);
  452. break;
  453. case DW_EH_PE_data8:
  454. BUILD_BUG_ON(sizeof(u64) != sizeof(value));
  455. #else
  456. BUILD_BUG_ON(sizeof(u32) != sizeof(value));
  457. #endif
  458. case DW_EH_PE_native:
  459. if (end < (const void *)(ptr.pul + 1))
  460. return 0;
  461. value = get_unaligned(ptr.pul++);
  462. break;
  463. case DW_EH_PE_leb128:
  464. BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
  465. value = ptrType & DW_EH_PE_signed
  466. ? get_sleb128(&ptr.p8, end)
  467. : get_uleb128(&ptr.p8, end);
  468. if ((const void *)ptr.p8 > end)
  469. return 0;
  470. break;
  471. default:
  472. return 0;
  473. }
  474. switch(ptrType & DW_EH_PE_ADJUST) {
  475. case DW_EH_PE_abs:
  476. break;
  477. case DW_EH_PE_pcrel:
  478. value += (unsigned long)*pLoc;
  479. break;
  480. default:
  481. return 0;
  482. }
  483. if ((ptrType & DW_EH_PE_indirect)
  484. && probe_kernel_address((unsigned long *)value, value))
  485. return 0;
  486. *pLoc = ptr.p8;
  487. return value;
  488. }
  489. static signed fde_pointer_type(const u32 *cie)
  490. {
  491. const u8 *ptr = (const u8 *)(cie + 2);
  492. unsigned version = *ptr;
  493. if (version != 1)
  494. return -1; /* unsupported */
  495. if (*++ptr) {
  496. const char *aug;
  497. const u8 *end = (const u8 *)(cie + 1) + *cie;
  498. uleb128_t len;
  499. /* check if augmentation size is first (and thus present) */
  500. if (*ptr != 'z')
  501. return -1;
  502. /* check if augmentation string is nul-terminated */
  503. if ((ptr = memchr(aug = (const void *)ptr, 0, end - ptr)) == NULL)
  504. return -1;
  505. ++ptr; /* skip terminator */
  506. get_uleb128(&ptr, end); /* skip code alignment */
  507. get_sleb128(&ptr, end); /* skip data alignment */
  508. /* skip return address column */
  509. version <= 1 ? (void)++ptr : (void)get_uleb128(&ptr, end);
  510. len = get_uleb128(&ptr, end); /* augmentation length */
  511. if (ptr + len < ptr || ptr + len > end)
  512. return -1;
  513. end = ptr + len;
  514. while (*++aug) {
  515. if (ptr >= end)
  516. return -1;
  517. switch(*aug) {
  518. case 'L':
  519. ++ptr;
  520. break;
  521. case 'P': {
  522. signed ptrType = *ptr++;
  523. if (!read_pointer(&ptr, end, ptrType) || ptr > end)
  524. return -1;
  525. }
  526. break;
  527. case 'R':
  528. return *ptr;
  529. default:
  530. return -1;
  531. }
  532. }
  533. }
  534. return DW_EH_PE_native|DW_EH_PE_abs;
  535. }
  536. static int advance_loc(unsigned long delta, struct unwind_state *state)
  537. {
  538. state->loc += delta * state->codeAlign;
  539. return delta > 0;
  540. }
  541. static void set_rule(uleb128_t reg,
  542. enum item_location where,
  543. uleb128_t value,
  544. struct unwind_state *state)
  545. {
  546. if (reg < ARRAY_SIZE(state->regs)) {
  547. state->regs[reg].where = where;
  548. state->regs[reg].value = value;
  549. }
  550. }
  551. static int processCFI(const u8 *start,
  552. const u8 *end,
  553. unsigned long targetLoc,
  554. signed ptrType,
  555. struct unwind_state *state)
  556. {
  557. union {
  558. const u8 *p8;
  559. const u16 *p16;
  560. const u32 *p32;
  561. } ptr;
  562. int result = 1;
  563. if (start != state->cieStart) {
  564. state->loc = state->org;
  565. result = processCFI(state->cieStart, state->cieEnd, 0, ptrType, state);
  566. if (targetLoc == 0 && state->label == NULL)
  567. return result;
  568. }
  569. for (ptr.p8 = start; result && ptr.p8 < end; ) {
  570. switch(*ptr.p8 >> 6) {
  571. uleb128_t value;
  572. case 0:
  573. switch(*ptr.p8++) {
  574. case DW_CFA_nop:
  575. break;
  576. case DW_CFA_set_loc:
  577. if ((state->loc = read_pointer(&ptr.p8, end, ptrType)) == 0)
  578. result = 0;
  579. break;
  580. case DW_CFA_advance_loc1:
  581. result = ptr.p8 < end && advance_loc(*ptr.p8++, state);
  582. break;
  583. case DW_CFA_advance_loc2:
  584. result = ptr.p8 <= end + 2
  585. && advance_loc(*ptr.p16++, state);
  586. break;
  587. case DW_CFA_advance_loc4:
  588. result = ptr.p8 <= end + 4
  589. && advance_loc(*ptr.p32++, state);
  590. break;
  591. case DW_CFA_offset_extended:
  592. value = get_uleb128(&ptr.p8, end);
  593. set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
  594. break;
  595. case DW_CFA_val_offset:
  596. value = get_uleb128(&ptr.p8, end);
  597. set_rule(value, Value, get_uleb128(&ptr.p8, end), state);
  598. break;
  599. case DW_CFA_offset_extended_sf:
  600. value = get_uleb128(&ptr.p8, end);
  601. set_rule(value, Memory, get_sleb128(&ptr.p8, end), state);
  602. break;
  603. case DW_CFA_val_offset_sf:
  604. value = get_uleb128(&ptr.p8, end);
  605. set_rule(value, Value, get_sleb128(&ptr.p8, end), state);
  606. break;
  607. case DW_CFA_restore_extended:
  608. case DW_CFA_undefined:
  609. case DW_CFA_same_value:
  610. set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0, state);
  611. break;
  612. case DW_CFA_register:
  613. value = get_uleb128(&ptr.p8, end);
  614. set_rule(value,
  615. Register,
  616. get_uleb128(&ptr.p8, end), state);
  617. break;
  618. case DW_CFA_remember_state:
  619. if (ptr.p8 == state->label) {
  620. state->label = NULL;
  621. return 1;
  622. }
  623. if (state->stackDepth >= MAX_STACK_DEPTH)
  624. return 0;
  625. state->stack[state->stackDepth++] = ptr.p8;
  626. break;
  627. case DW_CFA_restore_state:
  628. if (state->stackDepth) {
  629. const uleb128_t loc = state->loc;
  630. const u8 *label = state->label;
  631. state->label = state->stack[state->stackDepth - 1];
  632. memcpy(&state->cfa, &badCFA, sizeof(state->cfa));
  633. memset(state->regs, 0, sizeof(state->regs));
  634. state->stackDepth = 0;
  635. result = processCFI(start, end, 0, ptrType, state);
  636. state->loc = loc;
  637. state->label = label;
  638. } else
  639. return 0;
  640. break;
  641. case DW_CFA_def_cfa:
  642. state->cfa.reg = get_uleb128(&ptr.p8, end);
  643. /*nobreak*/
  644. case DW_CFA_def_cfa_offset:
  645. state->cfa.offs = get_uleb128(&ptr.p8, end);
  646. break;
  647. case DW_CFA_def_cfa_sf:
  648. state->cfa.reg = get_uleb128(&ptr.p8, end);
  649. /*nobreak*/
  650. case DW_CFA_def_cfa_offset_sf:
  651. state->cfa.offs = get_sleb128(&ptr.p8, end)
  652. * state->dataAlign;
  653. break;
  654. case DW_CFA_def_cfa_register:
  655. state->cfa.reg = get_uleb128(&ptr.p8, end);
  656. break;
  657. /*todo case DW_CFA_def_cfa_expression: */
  658. /*todo case DW_CFA_expression: */
  659. /*todo case DW_CFA_val_expression: */
  660. case DW_CFA_GNU_args_size:
  661. get_uleb128(&ptr.p8, end);
  662. break;
  663. case DW_CFA_GNU_negative_offset_extended:
  664. value = get_uleb128(&ptr.p8, end);
  665. set_rule(value,
  666. Memory,
  667. (uleb128_t)0 - get_uleb128(&ptr.p8, end), state);
  668. break;
  669. case DW_CFA_GNU_window_save:
  670. default:
  671. result = 0;
  672. break;
  673. }
  674. break;
  675. case 1:
  676. result = advance_loc(*ptr.p8++ & 0x3f, state);
  677. break;
  678. case 2:
  679. value = *ptr.p8++ & 0x3f;
  680. set_rule(value, Memory, get_uleb128(&ptr.p8, end), state);
  681. break;
  682. case 3:
  683. set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
  684. break;
  685. }
  686. if (ptr.p8 > end)
  687. result = 0;
  688. if (result && targetLoc != 0 && targetLoc < state->loc)
  689. return 1;
  690. }
  691. return result
  692. && ptr.p8 == end
  693. && (targetLoc == 0
  694. || (/*todo While in theory this should apply, gcc in practice omits
  695. everything past the function prolog, and hence the location
  696. never reaches the end of the function.
  697. targetLoc < state->loc &&*/ state->label == NULL));
  698. }
  699. /* Unwind to previous to frame. Returns 0 if successful, negative
  700. * number in case of an error. */
  701. int unwind(struct unwind_frame_info *frame)
  702. {
  703. #define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
  704. const u32 *fde = NULL, *cie = NULL;
  705. const u8 *ptr = NULL, *end = NULL;
  706. unsigned long pc = UNW_PC(frame) - frame->call_frame;
  707. unsigned long startLoc = 0, endLoc = 0, cfa;
  708. unsigned i;
  709. signed ptrType = -1;
  710. uleb128_t retAddrReg = 0;
  711. const struct unwind_table *table;
  712. struct unwind_state state;
  713. if (UNW_PC(frame) == 0)
  714. return -EINVAL;
  715. if ((table = find_table(pc)) != NULL
  716. && !(table->size & (sizeof(*fde) - 1))) {
  717. const u8 *hdr = table->header;
  718. unsigned long tableSize;
  719. smp_rmb();
  720. if (hdr && hdr[0] == 1) {
  721. switch(hdr[3] & DW_EH_PE_FORM) {
  722. case DW_EH_PE_native: tableSize = sizeof(unsigned long); break;
  723. case DW_EH_PE_data2: tableSize = 2; break;
  724. case DW_EH_PE_data4: tableSize = 4; break;
  725. case DW_EH_PE_data8: tableSize = 8; break;
  726. default: tableSize = 0; break;
  727. }
  728. ptr = hdr + 4;
  729. end = hdr + table->hdrsz;
  730. if (tableSize
  731. && read_pointer(&ptr, end, hdr[1])
  732. == (unsigned long)table->address
  733. && (i = read_pointer(&ptr, end, hdr[2])) > 0
  734. && i == (end - ptr) / (2 * tableSize)
  735. && !((end - ptr) % (2 * tableSize))) {
  736. do {
  737. const u8 *cur = ptr + (i / 2) * (2 * tableSize);
  738. startLoc = read_pointer(&cur,
  739. cur + tableSize,
  740. hdr[3]);
  741. if (pc < startLoc)
  742. i /= 2;
  743. else {
  744. ptr = cur - tableSize;
  745. i = (i + 1) / 2;
  746. }
  747. } while (startLoc && i > 1);
  748. if (i == 1
  749. && (startLoc = read_pointer(&ptr,
  750. ptr + tableSize,
  751. hdr[3])) != 0
  752. && pc >= startLoc)
  753. fde = (void *)read_pointer(&ptr,
  754. ptr + tableSize,
  755. hdr[3]);
  756. }
  757. }
  758. if (fde != NULL) {
  759. cie = cie_for_fde(fde, table);
  760. ptr = (const u8 *)(fde + 2);
  761. if(cie != NULL
  762. && cie != &bad_cie
  763. && cie != &not_fde
  764. && (ptrType = fde_pointer_type(cie)) >= 0
  765. && read_pointer(&ptr,
  766. (const u8 *)(fde + 1) + *fde,
  767. ptrType) == startLoc) {
  768. if (!(ptrType & DW_EH_PE_indirect))
  769. ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
  770. endLoc = startLoc
  771. + read_pointer(&ptr,
  772. (const u8 *)(fde + 1) + *fde,
  773. ptrType);
  774. if(pc >= endLoc)
  775. fde = NULL;
  776. } else
  777. fde = NULL;
  778. }
  779. if (fde == NULL) {
  780. for (fde = table->address, tableSize = table->size;
  781. cie = NULL, tableSize > sizeof(*fde)
  782. && tableSize - sizeof(*fde) >= *fde;
  783. tableSize -= sizeof(*fde) + *fde,
  784. fde += 1 + *fde / sizeof(*fde)) {
  785. cie = cie_for_fde(fde, table);
  786. if (cie == &bad_cie) {
  787. cie = NULL;
  788. break;
  789. }
  790. if (cie == NULL
  791. || cie == &not_fde
  792. || (ptrType = fde_pointer_type(cie)) < 0)
  793. continue;
  794. ptr = (const u8 *)(fde + 2);
  795. startLoc = read_pointer(&ptr,
  796. (const u8 *)(fde + 1) + *fde,
  797. ptrType);
  798. if (!startLoc)
  799. continue;
  800. if (!(ptrType & DW_EH_PE_indirect))
  801. ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
  802. endLoc = startLoc
  803. + read_pointer(&ptr,
  804. (const u8 *)(fde + 1) + *fde,
  805. ptrType);
  806. if (pc >= startLoc && pc < endLoc)
  807. break;
  808. }
  809. }
  810. }
  811. if (cie != NULL) {
  812. memset(&state, 0, sizeof(state));
  813. state.cieEnd = ptr; /* keep here temporarily */
  814. ptr = (const u8 *)(cie + 2);
  815. end = (const u8 *)(cie + 1) + *cie;
  816. frame->call_frame = 1;
  817. if ((state.version = *ptr) != 1)
  818. cie = NULL; /* unsupported version */
  819. else if (*++ptr) {
  820. /* check if augmentation size is first (and thus present) */
  821. if (*ptr == 'z') {
  822. while (++ptr < end && *ptr) {
  823. switch(*ptr) {
  824. /* check for ignorable (or already handled)
  825. * nul-terminated augmentation string */
  826. case 'L':
  827. case 'P':
  828. case 'R':
  829. continue;
  830. case 'S':
  831. frame->call_frame = 0;
  832. continue;
  833. default:
  834. break;
  835. }
  836. break;
  837. }
  838. }
  839. if (ptr >= end || *ptr)
  840. cie = NULL;
  841. }
  842. ++ptr;
  843. }
  844. if (cie != NULL) {
  845. /* get code aligment factor */
  846. state.codeAlign = get_uleb128(&ptr, end);
  847. /* get data aligment factor */
  848. state.dataAlign = get_sleb128(&ptr, end);
  849. if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
  850. cie = NULL;
  851. else {
  852. retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
  853. /* skip augmentation */
  854. if (((const char *)(cie + 2))[1] == 'z') {
  855. uleb128_t augSize = get_uleb128(&ptr, end);
  856. ptr += augSize;
  857. }
  858. if (ptr > end
  859. || retAddrReg >= ARRAY_SIZE(reg_info)
  860. || REG_INVALID(retAddrReg)
  861. || reg_info[retAddrReg].width != sizeof(unsigned long))
  862. cie = NULL;
  863. }
  864. }
  865. if (cie != NULL) {
  866. state.cieStart = ptr;
  867. ptr = state.cieEnd;
  868. state.cieEnd = end;
  869. end = (const u8 *)(fde + 1) + *fde;
  870. /* skip augmentation */
  871. if (((const char *)(cie + 2))[1] == 'z') {
  872. uleb128_t augSize = get_uleb128(&ptr, end);
  873. if ((ptr += augSize) > end)
  874. fde = NULL;
  875. }
  876. }
  877. if (cie == NULL || fde == NULL) {
  878. #ifdef CONFIG_FRAME_POINTER
  879. unsigned long top, bottom;
  880. top = STACK_TOP(frame->task);
  881. bottom = STACK_BOTTOM(frame->task);
  882. # if FRAME_RETADDR_OFFSET < 0
  883. if (UNW_SP(frame) < top
  884. && UNW_FP(frame) <= UNW_SP(frame)
  885. && bottom < UNW_FP(frame)
  886. # else
  887. if (UNW_SP(frame) > top
  888. && UNW_FP(frame) >= UNW_SP(frame)
  889. && bottom > UNW_FP(frame)
  890. # endif
  891. && !((UNW_SP(frame) | UNW_FP(frame))
  892. & (sizeof(unsigned long) - 1))) {
  893. unsigned long link;
  894. if (!probe_kernel_address(
  895. (unsigned long *)(UNW_FP(frame)
  896. + FRAME_LINK_OFFSET),
  897. link)
  898. # if FRAME_RETADDR_OFFSET < 0
  899. && link > bottom && link < UNW_FP(frame)
  900. # else
  901. && link > UNW_FP(frame) && link < bottom
  902. # endif
  903. && !(link & (sizeof(link) - 1))
  904. && !probe_kernel_address(
  905. (unsigned long *)(UNW_FP(frame)
  906. + FRAME_RETADDR_OFFSET), UNW_PC(frame))) {
  907. UNW_SP(frame) = UNW_FP(frame) + FRAME_RETADDR_OFFSET
  908. # if FRAME_RETADDR_OFFSET < 0
  909. -
  910. # else
  911. +
  912. # endif
  913. sizeof(UNW_PC(frame));
  914. UNW_FP(frame) = link;
  915. return 0;
  916. }
  917. }
  918. #endif
  919. return -ENXIO;
  920. }
  921. state.org = startLoc;
  922. memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
  923. /* process instructions */
  924. if (!processCFI(ptr, end, pc, ptrType, &state)
  925. || state.loc > endLoc
  926. || state.regs[retAddrReg].where == Nowhere
  927. || state.cfa.reg >= ARRAY_SIZE(reg_info)
  928. || reg_info[state.cfa.reg].width != sizeof(unsigned long)
  929. || state.cfa.offs % sizeof(unsigned long))
  930. return -EIO;
  931. /* update frame */
  932. #ifndef CONFIG_AS_CFI_SIGNAL_FRAME
  933. if(frame->call_frame
  934. && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
  935. frame->call_frame = 0;
  936. #endif
  937. cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
  938. startLoc = min((unsigned long)UNW_SP(frame), cfa);
  939. endLoc = max((unsigned long)UNW_SP(frame), cfa);
  940. if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
  941. startLoc = min(STACK_LIMIT(cfa), cfa);
  942. endLoc = max(STACK_LIMIT(cfa), cfa);
  943. }
  944. #ifndef CONFIG_64BIT
  945. # define CASES CASE(8); CASE(16); CASE(32)
  946. #else
  947. # define CASES CASE(8); CASE(16); CASE(32); CASE(64)
  948. #endif
  949. for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
  950. if (REG_INVALID(i)) {
  951. if (state.regs[i].where == Nowhere)
  952. continue;
  953. return -EIO;
  954. }
  955. switch(state.regs[i].where) {
  956. default:
  957. break;
  958. case Register:
  959. if (state.regs[i].value >= ARRAY_SIZE(reg_info)
  960. || REG_INVALID(state.regs[i].value)
  961. || reg_info[i].width > reg_info[state.regs[i].value].width)
  962. return -EIO;
  963. switch(reg_info[state.regs[i].value].width) {
  964. #define CASE(n) \
  965. case sizeof(u##n): \
  966. state.regs[i].value = FRAME_REG(state.regs[i].value, \
  967. const u##n); \
  968. break
  969. CASES;
  970. #undef CASE
  971. default:
  972. return -EIO;
  973. }
  974. break;
  975. }
  976. }
  977. for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
  978. if (REG_INVALID(i))
  979. continue;
  980. switch(state.regs[i].where) {
  981. case Nowhere:
  982. if (reg_info[i].width != sizeof(UNW_SP(frame))
  983. || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
  984. != &UNW_SP(frame))
  985. continue;
  986. UNW_SP(frame) = cfa;
  987. break;
  988. case Register:
  989. switch(reg_info[i].width) {
  990. #define CASE(n) case sizeof(u##n): \
  991. FRAME_REG(i, u##n) = state.regs[i].value; \
  992. break
  993. CASES;
  994. #undef CASE
  995. default:
  996. return -EIO;
  997. }
  998. break;
  999. case Value:
  1000. if (reg_info[i].width != sizeof(unsigned long))
  1001. return -EIO;
  1002. FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
  1003. * state.dataAlign;
  1004. break;
  1005. case Memory: {
  1006. unsigned long addr = cfa + state.regs[i].value
  1007. * state.dataAlign;
  1008. if ((state.regs[i].value * state.dataAlign)
  1009. % sizeof(unsigned long)
  1010. || addr < startLoc
  1011. || addr + sizeof(unsigned long) < addr
  1012. || addr + sizeof(unsigned long) > endLoc)
  1013. return -EIO;
  1014. switch(reg_info[i].width) {
  1015. #define CASE(n) case sizeof(u##n): \
  1016. probe_kernel_address((u##n *)addr, FRAME_REG(i, u##n)); \
  1017. break
  1018. CASES;
  1019. #undef CASE
  1020. default:
  1021. return -EIO;
  1022. }
  1023. }
  1024. break;
  1025. }
  1026. }
  1027. return 0;
  1028. #undef CASES
  1029. #undef FRAME_REG
  1030. }
  1031. EXPORT_SYMBOL(unwind);
  1032. int unwind_init_frame_info(struct unwind_frame_info *info,
  1033. struct task_struct *tsk,
  1034. /*const*/ struct pt_regs *regs)
  1035. {
  1036. info->task = tsk;
  1037. info->call_frame = 0;
  1038. arch_unw_init_frame_info(info, regs);
  1039. return 0;
  1040. }
  1041. EXPORT_SYMBOL(unwind_init_frame_info);
  1042. /*
  1043. * Prepare to unwind a blocked task.
  1044. */
  1045. int unwind_init_blocked(struct unwind_frame_info *info,
  1046. struct task_struct *tsk)
  1047. {
  1048. info->task = tsk;
  1049. info->call_frame = 0;
  1050. arch_unw_init_blocked(info);
  1051. return 0;
  1052. }
  1053. EXPORT_SYMBOL(unwind_init_blocked);
  1054. /*
  1055. * Prepare to unwind the currently running thread.
  1056. */
  1057. int unwind_init_running(struct unwind_frame_info *info,
  1058. asmlinkage int (*callback)(struct unwind_frame_info *,
  1059. void *arg),
  1060. void *arg)
  1061. {
  1062. info->task = current;
  1063. info->call_frame = 0;
  1064. return arch_unwind_init_running(info, callback, arg);
  1065. }
  1066. EXPORT_SYMBOL(unwind_init_running);
  1067. /*
  1068. * Unwind until the return pointer is in user-land (or until an error
  1069. * occurs). Returns 0 if successful, negative number in case of
  1070. * error.
  1071. */
  1072. int unwind_to_user(struct unwind_frame_info *info)
  1073. {
  1074. while (!arch_unw_user_mode(info)) {
  1075. int err = unwind(info);
  1076. if (err < 0)
  1077. return err;
  1078. }
  1079. return 0;
  1080. }
  1081. EXPORT_SYMBOL(unwind_to_user);