traps.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later
  5. */
  6. #include <linux/bug.h>
  7. #include <linux/uaccess.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/fs.h>
  12. #include <linux/rbtree.h>
  13. #include <asm/traps.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/cplb.h>
  16. #include <asm/dma.h>
  17. #include <asm/blackfin.h>
  18. #include <asm/irq_handler.h>
  19. #include <linux/irq.h>
  20. #include <asm/trace.h>
  21. #include <asm/fixed_code.h>
  22. #ifdef CONFIG_KGDB
  23. # include <linux/kgdb.h>
  24. # define CHK_DEBUGGER_TRAP() \
  25. do { \
  26. kgdb_handle_exception(trapnr, sig, info.si_code, fp); \
  27. } while (0)
  28. # define CHK_DEBUGGER_TRAP_MAYBE() \
  29. do { \
  30. if (kgdb_connected) \
  31. CHK_DEBUGGER_TRAP(); \
  32. } while (0)
  33. #else
  34. # define CHK_DEBUGGER_TRAP() do { } while (0)
  35. # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0)
  36. #endif
  37. #ifdef CONFIG_DEBUG_VERBOSE
  38. #define verbose_printk(fmt, arg...) \
  39. printk(fmt, ##arg)
  40. #else
  41. #define verbose_printk(fmt, arg...) \
  42. ({ if (0) printk(fmt, ##arg); 0; })
  43. #endif
  44. #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
  45. u32 last_seqstat;
  46. #ifdef CONFIG_DEBUG_MMRS_MODULE
  47. EXPORT_SYMBOL(last_seqstat);
  48. #endif
  49. #endif
  50. /* Initiate the event table handler */
  51. void __init trap_init(void)
  52. {
  53. CSYNC();
  54. bfin_write_EVT3(trap);
  55. CSYNC();
  56. }
  57. static void decode_address(char *buf, unsigned long address)
  58. {
  59. #ifdef CONFIG_DEBUG_VERBOSE
  60. struct task_struct *p;
  61. struct mm_struct *mm;
  62. unsigned long flags, offset;
  63. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  64. struct rb_node *n;
  65. #ifdef CONFIG_KALLSYMS
  66. unsigned long symsize;
  67. const char *symname;
  68. char *modname;
  69. char *delim = ":";
  70. char namebuf[128];
  71. #endif
  72. buf += sprintf(buf, "<0x%08lx> ", address);
  73. #ifdef CONFIG_KALLSYMS
  74. /* look up the address and see if we are in kernel space */
  75. symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
  76. if (symname) {
  77. /* yeah! kernel space! */
  78. if (!modname)
  79. modname = delim = "";
  80. sprintf(buf, "{ %s%s%s%s + 0x%lx }",
  81. delim, modname, delim, symname,
  82. (unsigned long)offset);
  83. return;
  84. }
  85. #endif
  86. if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
  87. /* Problem in fixed code section? */
  88. strcat(buf, "/* Maybe fixed code section */");
  89. return;
  90. } else if (address < CONFIG_BOOT_LOAD) {
  91. /* Problem somewhere before the kernel start address */
  92. strcat(buf, "/* Maybe null pointer? */");
  93. return;
  94. } else if (address >= COREMMR_BASE) {
  95. strcat(buf, "/* core mmrs */");
  96. return;
  97. } else if (address >= SYSMMR_BASE) {
  98. strcat(buf, "/* system mmrs */");
  99. return;
  100. } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
  101. strcat(buf, "/* on-chip L1 ROM */");
  102. return;
  103. }
  104. /*
  105. * Don't walk any of the vmas if we are oopsing, it has been known
  106. * to cause problems - corrupt vmas (kernel crashes) cause double faults
  107. */
  108. if (oops_in_progress) {
  109. strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
  110. return;
  111. }
  112. /* looks like we're off in user-land, so let's walk all the
  113. * mappings of all our processes and see if we can't be a whee
  114. * bit more specific
  115. */
  116. write_lock_irqsave(&tasklist_lock, flags);
  117. for_each_process(p) {
  118. mm = (in_atomic ? p->mm : get_task_mm(p));
  119. if (!mm)
  120. continue;
  121. if (!down_read_trylock(&mm->mmap_sem)) {
  122. if (!in_atomic)
  123. mmput(mm);
  124. continue;
  125. }
  126. for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
  127. struct vm_area_struct *vma;
  128. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  129. if (address >= vma->vm_start && address < vma->vm_end) {
  130. char _tmpbuf[256];
  131. char *name = p->comm;
  132. struct file *file = vma->vm_file;
  133. if (file) {
  134. char *d_name = d_path(&file->f_path, _tmpbuf,
  135. sizeof(_tmpbuf));
  136. if (!IS_ERR(d_name))
  137. name = d_name;
  138. }
  139. /* FLAT does not have its text aligned to the start of
  140. * the map while FDPIC ELF does ...
  141. */
  142. /* before we can check flat/fdpic, we need to
  143. * make sure current is valid
  144. */
  145. if ((unsigned long)current >= FIXED_CODE_START &&
  146. !((unsigned long)current & 0x3)) {
  147. if (current->mm &&
  148. (address > current->mm->start_code) &&
  149. (address < current->mm->end_code))
  150. offset = address - current->mm->start_code;
  151. else
  152. offset = (address - vma->vm_start) +
  153. (vma->vm_pgoff << PAGE_SHIFT);
  154. sprintf(buf, "[ %s + 0x%lx ]", name, offset);
  155. } else
  156. sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
  157. name, vma->vm_start, vma->vm_end);
  158. up_read(&mm->mmap_sem);
  159. if (!in_atomic)
  160. mmput(mm);
  161. if (buf[0] == '\0')
  162. sprintf(buf, "[ %s ] dynamic memory", name);
  163. goto done;
  164. }
  165. }
  166. up_read(&mm->mmap_sem);
  167. if (!in_atomic)
  168. mmput(mm);
  169. }
  170. /*
  171. * we were unable to find this address anywhere,
  172. * or some MMs were skipped because they were in use.
  173. */
  174. sprintf(buf, "/* kernel dynamic memory */");
  175. done:
  176. write_unlock_irqrestore(&tasklist_lock, flags);
  177. #else
  178. sprintf(buf, " ");
  179. #endif
  180. }
  181. asmlinkage void double_fault_c(struct pt_regs *fp)
  182. {
  183. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  184. int j;
  185. trace_buffer_save(j);
  186. #endif
  187. console_verbose();
  188. oops_in_progress = 1;
  189. #ifdef CONFIG_DEBUG_VERBOSE
  190. printk(KERN_EMERG "Double Fault\n");
  191. #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
  192. if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
  193. unsigned int cpu = raw_smp_processor_id();
  194. char buf[150];
  195. decode_address(buf, cpu_pda[cpu].retx_doublefault);
  196. printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
  197. (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
  198. decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
  199. printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
  200. decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
  201. printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
  202. decode_address(buf, fp->retx);
  203. printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
  204. } else
  205. #endif
  206. {
  207. dump_bfin_process(fp);
  208. dump_bfin_mem(fp);
  209. show_regs(fp);
  210. dump_bfin_trace_buffer();
  211. }
  212. #endif
  213. panic("Double Fault - unrecoverable event");
  214. }
  215. static int kernel_mode_regs(struct pt_regs *regs)
  216. {
  217. return regs->ipend & 0xffc0;
  218. }
  219. asmlinkage notrace void trap_c(struct pt_regs *fp)
  220. {
  221. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  222. int j;
  223. #endif
  224. unsigned int cpu = raw_smp_processor_id();
  225. const char *strerror = NULL;
  226. int sig = 0;
  227. siginfo_t info;
  228. unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
  229. trace_buffer_save(j);
  230. #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
  231. last_seqstat = (u32)fp->seqstat;
  232. #endif
  233. /* Important - be very careful dereferncing pointers - will lead to
  234. * double faults if the stack has become corrupt
  235. */
  236. /* trap_c() will be called for exceptions. During exceptions
  237. * processing, the pc value should be set with retx value.
  238. * With this change we can cleanup some code in signal.c- TODO
  239. */
  240. fp->orig_pc = fp->retx;
  241. /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
  242. trapnr, fp->ipend, fp->pc, fp->retx); */
  243. /* send the appropriate signal to the user program */
  244. switch (trapnr) {
  245. /* This table works in conjuction with the one in ./mach-common/entry.S
  246. * Some exceptions are handled there (in assembly, in exception space)
  247. * Some are handled here, (in C, in interrupt space)
  248. * Some, like CPLB, are handled in both, where the normal path is
  249. * handled in assembly/exception space, and the error path is handled
  250. * here
  251. */
  252. /* 0x00 - Linux Syscall, getting here is an error */
  253. /* 0x01 - userspace gdb breakpoint, handled here */
  254. case VEC_EXCPT01:
  255. info.si_code = TRAP_ILLTRAP;
  256. sig = SIGTRAP;
  257. CHK_DEBUGGER_TRAP_MAYBE();
  258. /* Check if this is a breakpoint in kernel space */
  259. if (kernel_mode_regs(fp))
  260. goto traps_done;
  261. else
  262. break;
  263. /* 0x03 - User Defined, userspace stack overflow */
  264. case VEC_EXCPT03:
  265. info.si_code = SEGV_STACKFLOW;
  266. sig = SIGSEGV;
  267. strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
  268. CHK_DEBUGGER_TRAP_MAYBE();
  269. break;
  270. /* 0x02 - KGDB initial connection and break signal trap */
  271. case VEC_EXCPT02:
  272. #ifdef CONFIG_KGDB
  273. info.si_code = TRAP_ILLTRAP;
  274. sig = SIGTRAP;
  275. CHK_DEBUGGER_TRAP();
  276. goto traps_done;
  277. #endif
  278. /* 0x04 - User Defined */
  279. /* 0x05 - User Defined */
  280. /* 0x06 - User Defined */
  281. /* 0x07 - User Defined */
  282. /* 0x08 - User Defined */
  283. /* 0x09 - User Defined */
  284. /* 0x0A - User Defined */
  285. /* 0x0B - User Defined */
  286. /* 0x0C - User Defined */
  287. /* 0x0D - User Defined */
  288. /* 0x0E - User Defined */
  289. /* 0x0F - User Defined */
  290. /* If we got here, it is most likely that someone was trying to use a
  291. * custom exception handler, and it is not actually installed properly
  292. */
  293. case VEC_EXCPT04 ... VEC_EXCPT15:
  294. info.si_code = ILL_ILLPARAOP;
  295. sig = SIGILL;
  296. strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
  297. CHK_DEBUGGER_TRAP_MAYBE();
  298. break;
  299. /* 0x10 HW Single step, handled here */
  300. case VEC_STEP:
  301. info.si_code = TRAP_STEP;
  302. sig = SIGTRAP;
  303. CHK_DEBUGGER_TRAP_MAYBE();
  304. /* Check if this is a single step in kernel space */
  305. if (kernel_mode_regs(fp))
  306. goto traps_done;
  307. else
  308. break;
  309. /* 0x11 - Trace Buffer Full, handled here */
  310. case VEC_OVFLOW:
  311. info.si_code = TRAP_TRACEFLOW;
  312. sig = SIGTRAP;
  313. strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
  314. CHK_DEBUGGER_TRAP_MAYBE();
  315. break;
  316. /* 0x12 - Reserved, Caught by default */
  317. /* 0x13 - Reserved, Caught by default */
  318. /* 0x14 - Reserved, Caught by default */
  319. /* 0x15 - Reserved, Caught by default */
  320. /* 0x16 - Reserved, Caught by default */
  321. /* 0x17 - Reserved, Caught by default */
  322. /* 0x18 - Reserved, Caught by default */
  323. /* 0x19 - Reserved, Caught by default */
  324. /* 0x1A - Reserved, Caught by default */
  325. /* 0x1B - Reserved, Caught by default */
  326. /* 0x1C - Reserved, Caught by default */
  327. /* 0x1D - Reserved, Caught by default */
  328. /* 0x1E - Reserved, Caught by default */
  329. /* 0x1F - Reserved, Caught by default */
  330. /* 0x20 - Reserved, Caught by default */
  331. /* 0x21 - Undefined Instruction, handled here */
  332. case VEC_UNDEF_I:
  333. #ifdef CONFIG_BUG
  334. if (kernel_mode_regs(fp)) {
  335. switch (report_bug(fp->pc, fp)) {
  336. case BUG_TRAP_TYPE_NONE:
  337. break;
  338. case BUG_TRAP_TYPE_WARN:
  339. dump_bfin_trace_buffer();
  340. fp->pc += 2;
  341. goto traps_done;
  342. case BUG_TRAP_TYPE_BUG:
  343. /* call to panic() will dump trace, and it is
  344. * off at this point, so it won't be clobbered
  345. */
  346. panic("BUG()");
  347. }
  348. }
  349. #endif
  350. info.si_code = ILL_ILLOPC;
  351. sig = SIGILL;
  352. strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
  353. CHK_DEBUGGER_TRAP_MAYBE();
  354. break;
  355. /* 0x22 - Illegal Instruction Combination, handled here */
  356. case VEC_ILGAL_I:
  357. info.si_code = ILL_ILLPARAOP;
  358. sig = SIGILL;
  359. strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
  360. CHK_DEBUGGER_TRAP_MAYBE();
  361. break;
  362. /* 0x23 - Data CPLB protection violation, handled here */
  363. case VEC_CPLB_VL:
  364. info.si_code = ILL_CPLB_VI;
  365. sig = SIGSEGV;
  366. strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
  367. CHK_DEBUGGER_TRAP_MAYBE();
  368. break;
  369. /* 0x24 - Data access misaligned, handled here */
  370. case VEC_MISALI_D:
  371. info.si_code = BUS_ADRALN;
  372. sig = SIGBUS;
  373. strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
  374. CHK_DEBUGGER_TRAP_MAYBE();
  375. break;
  376. /* 0x25 - Unrecoverable Event, handled here */
  377. case VEC_UNCOV:
  378. info.si_code = ILL_ILLEXCPT;
  379. sig = SIGILL;
  380. strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
  381. CHK_DEBUGGER_TRAP_MAYBE();
  382. break;
  383. /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
  384. error case is handled here */
  385. case VEC_CPLB_M:
  386. info.si_code = BUS_ADRALN;
  387. sig = SIGBUS;
  388. strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
  389. break;
  390. /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
  391. case VEC_CPLB_MHIT:
  392. info.si_code = ILL_CPLB_MULHIT;
  393. sig = SIGSEGV;
  394. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  395. if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
  396. strerror = KERN_NOTICE "NULL pointer access\n";
  397. else
  398. #endif
  399. strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
  400. CHK_DEBUGGER_TRAP_MAYBE();
  401. break;
  402. /* 0x28 - Emulation Watchpoint, handled here */
  403. case VEC_WATCH:
  404. info.si_code = TRAP_WATCHPT;
  405. sig = SIGTRAP;
  406. pr_debug(EXC_0x28(KERN_DEBUG));
  407. CHK_DEBUGGER_TRAP_MAYBE();
  408. /* Check if this is a watchpoint in kernel space */
  409. if (kernel_mode_regs(fp))
  410. goto traps_done;
  411. else
  412. break;
  413. #ifdef CONFIG_BF535
  414. /* 0x29 - Instruction fetch access error (535 only) */
  415. case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
  416. info.si_code = BUS_OPFETCH;
  417. sig = SIGBUS;
  418. strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
  419. CHK_DEBUGGER_TRAP_MAYBE();
  420. break;
  421. #else
  422. /* 0x29 - Reserved, Caught by default */
  423. #endif
  424. /* 0x2A - Instruction fetch misaligned, handled here */
  425. case VEC_MISALI_I:
  426. info.si_code = BUS_ADRALN;
  427. sig = SIGBUS;
  428. strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
  429. CHK_DEBUGGER_TRAP_MAYBE();
  430. break;
  431. /* 0x2B - Instruction CPLB protection violation, handled here */
  432. case VEC_CPLB_I_VL:
  433. info.si_code = ILL_CPLB_VI;
  434. sig = SIGBUS;
  435. strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
  436. CHK_DEBUGGER_TRAP_MAYBE();
  437. break;
  438. /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
  439. case VEC_CPLB_I_M:
  440. info.si_code = ILL_CPLB_MISS;
  441. sig = SIGBUS;
  442. strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
  443. break;
  444. /* 0x2D - Instruction CPLB Multiple Hits, handled here */
  445. case VEC_CPLB_I_MHIT:
  446. info.si_code = ILL_CPLB_MULHIT;
  447. sig = SIGSEGV;
  448. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  449. if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
  450. strerror = KERN_NOTICE "Jump to NULL address\n";
  451. else
  452. #endif
  453. strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
  454. CHK_DEBUGGER_TRAP_MAYBE();
  455. break;
  456. /* 0x2E - Illegal use of Supervisor Resource, handled here */
  457. case VEC_ILL_RES:
  458. info.si_code = ILL_PRVOPC;
  459. sig = SIGILL;
  460. strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
  461. CHK_DEBUGGER_TRAP_MAYBE();
  462. break;
  463. /* 0x2F - Reserved, Caught by default */
  464. /* 0x30 - Reserved, Caught by default */
  465. /* 0x31 - Reserved, Caught by default */
  466. /* 0x32 - Reserved, Caught by default */
  467. /* 0x33 - Reserved, Caught by default */
  468. /* 0x34 - Reserved, Caught by default */
  469. /* 0x35 - Reserved, Caught by default */
  470. /* 0x36 - Reserved, Caught by default */
  471. /* 0x37 - Reserved, Caught by default */
  472. /* 0x38 - Reserved, Caught by default */
  473. /* 0x39 - Reserved, Caught by default */
  474. /* 0x3A - Reserved, Caught by default */
  475. /* 0x3B - Reserved, Caught by default */
  476. /* 0x3C - Reserved, Caught by default */
  477. /* 0x3D - Reserved, Caught by default */
  478. /* 0x3E - Reserved, Caught by default */
  479. /* 0x3F - Reserved, Caught by default */
  480. case VEC_HWERR:
  481. info.si_code = BUS_ADRALN;
  482. sig = SIGBUS;
  483. switch (fp->seqstat & SEQSTAT_HWERRCAUSE) {
  484. /* System MMR Error */
  485. case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
  486. info.si_code = BUS_ADRALN;
  487. sig = SIGBUS;
  488. strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
  489. break;
  490. /* External Memory Addressing Error */
  491. case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
  492. if (ANOMALY_05000310) {
  493. static unsigned long anomaly_rets;
  494. if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
  495. (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) {
  496. /*
  497. * A false hardware error will happen while fetching at
  498. * the L1 instruction SRAM boundary. Ignore it.
  499. */
  500. anomaly_rets = fp->rets;
  501. goto traps_done;
  502. } else if (fp->rets == anomaly_rets) {
  503. /*
  504. * While boundary code returns to a function, at the ret
  505. * point, a new false hardware error might occur too based
  506. * on tests. Ignore it too.
  507. */
  508. goto traps_done;
  509. } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
  510. (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) {
  511. /*
  512. * If boundary code calls a function, at the entry point,
  513. * a new false hardware error maybe happen based on tests.
  514. * Ignore it too.
  515. */
  516. goto traps_done;
  517. } else
  518. anomaly_rets = 0;
  519. }
  520. info.si_code = BUS_ADRERR;
  521. sig = SIGBUS;
  522. strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
  523. break;
  524. /* Performance Monitor Overflow */
  525. case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
  526. strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
  527. break;
  528. /* RAISE 5 instruction */
  529. case (SEQSTAT_HWERRCAUSE_RAISE_5):
  530. printk(KERN_NOTICE HWC_x18(KERN_NOTICE));
  531. break;
  532. default: /* Reserved */
  533. printk(KERN_NOTICE HWC_default(KERN_NOTICE));
  534. break;
  535. }
  536. CHK_DEBUGGER_TRAP_MAYBE();
  537. break;
  538. /*
  539. * We should be handling all known exception types above,
  540. * if we get here we hit a reserved one, so panic
  541. */
  542. default:
  543. info.si_code = ILL_ILLPARAOP;
  544. sig = SIGILL;
  545. verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
  546. (fp->seqstat & SEQSTAT_EXCAUSE));
  547. CHK_DEBUGGER_TRAP_MAYBE();
  548. break;
  549. }
  550. BUG_ON(sig == 0);
  551. /* If the fault was caused by a kernel thread, or interrupt handler
  552. * we will kernel panic, so the system reboots.
  553. */
  554. if (kernel_mode_regs(fp) || (current && !current->mm)) {
  555. console_verbose();
  556. oops_in_progress = 1;
  557. }
  558. if (sig != SIGTRAP) {
  559. if (strerror)
  560. verbose_printk(strerror);
  561. dump_bfin_process(fp);
  562. dump_bfin_mem(fp);
  563. show_regs(fp);
  564. /* Print out the trace buffer if it makes sense */
  565. #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
  566. if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M)
  567. verbose_printk(KERN_NOTICE "No trace since you do not have "
  568. "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
  569. else
  570. #endif
  571. dump_bfin_trace_buffer();
  572. if (oops_in_progress) {
  573. /* Dump the current kernel stack */
  574. verbose_printk(KERN_NOTICE "Kernel Stack\n");
  575. show_stack(current, NULL);
  576. print_modules();
  577. #ifndef CONFIG_ACCESS_CHECK
  578. verbose_printk(KERN_EMERG "Please turn on "
  579. "CONFIG_ACCESS_CHECK\n");
  580. #endif
  581. panic("Kernel exception");
  582. } else {
  583. #ifdef CONFIG_DEBUG_VERBOSE
  584. unsigned long *stack;
  585. /* Dump the user space stack */
  586. stack = (unsigned long *)rdusp();
  587. verbose_printk(KERN_NOTICE "Userspace Stack\n");
  588. show_stack(NULL, stack);
  589. #endif
  590. }
  591. }
  592. #ifdef CONFIG_IPIPE
  593. if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
  594. #endif
  595. {
  596. info.si_signo = sig;
  597. info.si_errno = 0;
  598. switch (trapnr) {
  599. case VEC_CPLB_VL:
  600. case VEC_MISALI_D:
  601. case VEC_CPLB_M:
  602. case VEC_CPLB_MHIT:
  603. info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr;
  604. break;
  605. default:
  606. info.si_addr = (void __user *)fp->pc;
  607. break;
  608. }
  609. force_sig_info(sig, &info, current);
  610. }
  611. if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) ||
  612. (ANOMALY_05000281 && trapnr == VEC_HWERR) ||
  613. (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL)))
  614. fp->pc = SAFE_USER_INSTRUCTION;
  615. traps_done:
  616. trace_buffer_restore(j);
  617. }
  618. /* Typical exception handling routines */
  619. #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
  620. /*
  621. * Similar to get_user, do some address checking, then dereference
  622. * Return true on success, false on bad address
  623. */
  624. static bool get_instruction(unsigned short *val, unsigned short *address)
  625. {
  626. unsigned long addr = (unsigned long)address;
  627. /* Check for odd addresses */
  628. if (addr & 0x1)
  629. return false;
  630. /* MMR region will never have instructions */
  631. if (addr >= SYSMMR_BASE)
  632. return false;
  633. switch (bfin_mem_access_type(addr, 2)) {
  634. case BFIN_MEM_ACCESS_CORE:
  635. case BFIN_MEM_ACCESS_CORE_ONLY:
  636. *val = *address;
  637. return true;
  638. case BFIN_MEM_ACCESS_DMA:
  639. dma_memcpy(val, address, 2);
  640. return true;
  641. case BFIN_MEM_ACCESS_ITEST:
  642. isram_memcpy(val, address, 2);
  643. return true;
  644. default: /* invalid access */
  645. return false;
  646. }
  647. }
  648. /*
  649. * decode the instruction if we are printing out the trace, as it
  650. * makes things easier to follow, without running it through objdump
  651. * These are the normal instructions which cause change of flow, which
  652. * would be at the source of the trace buffer
  653. */
  654. #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
  655. static void decode_instruction(unsigned short *address)
  656. {
  657. unsigned short opcode;
  658. if (get_instruction(&opcode, address)) {
  659. if (opcode == 0x0010)
  660. verbose_printk("RTS");
  661. else if (opcode == 0x0011)
  662. verbose_printk("RTI");
  663. else if (opcode == 0x0012)
  664. verbose_printk("RTX");
  665. else if (opcode == 0x0013)
  666. verbose_printk("RTN");
  667. else if (opcode == 0x0014)
  668. verbose_printk("RTE");
  669. else if (opcode == 0x0025)
  670. verbose_printk("EMUEXCPT");
  671. else if (opcode >= 0x0040 && opcode <= 0x0047)
  672. verbose_printk("STI R%i", opcode & 7);
  673. else if (opcode >= 0x0050 && opcode <= 0x0057)
  674. verbose_printk("JUMP (P%i)", opcode & 7);
  675. else if (opcode >= 0x0060 && opcode <= 0x0067)
  676. verbose_printk("CALL (P%i)", opcode & 7);
  677. else if (opcode >= 0x0070 && opcode <= 0x0077)
  678. verbose_printk("CALL (PC+P%i)", opcode & 7);
  679. else if (opcode >= 0x0080 && opcode <= 0x0087)
  680. verbose_printk("JUMP (PC+P%i)", opcode & 7);
  681. else if (opcode >= 0x0090 && opcode <= 0x009F)
  682. verbose_printk("RAISE 0x%x", opcode & 0xF);
  683. else if (opcode >= 0x00A0 && opcode <= 0x00AF)
  684. verbose_printk("EXCPT 0x%x", opcode & 0xF);
  685. else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
  686. verbose_printk("IF !CC JUMP");
  687. else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
  688. verbose_printk("IF CC JUMP");
  689. else if (opcode >= 0x2000 && opcode <= 0x2fff)
  690. verbose_printk("JUMP.S");
  691. else if (opcode >= 0xe080 && opcode <= 0xe0ff)
  692. verbose_printk("LSETUP");
  693. else if (opcode >= 0xe200 && opcode <= 0xe2ff)
  694. verbose_printk("JUMP.L");
  695. else if (opcode >= 0xe300 && opcode <= 0xe3ff)
  696. verbose_printk("CALL pcrel");
  697. else
  698. verbose_printk("0x%04x", opcode);
  699. }
  700. }
  701. #endif
  702. void dump_bfin_trace_buffer(void)
  703. {
  704. #ifdef CONFIG_DEBUG_VERBOSE
  705. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  706. int tflags, i = 0;
  707. char buf[150];
  708. unsigned short *addr;
  709. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  710. int j, index;
  711. #endif
  712. trace_buffer_save(tflags);
  713. printk(KERN_NOTICE "Hardware Trace:\n");
  714. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  715. printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
  716. #endif
  717. if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
  718. for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
  719. decode_address(buf, (unsigned long)bfin_read_TBUF());
  720. printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
  721. addr = (unsigned short *)bfin_read_TBUF();
  722. decode_address(buf, (unsigned long)addr);
  723. printk(KERN_NOTICE " Source : %s ", buf);
  724. decode_instruction(addr);
  725. printk("\n");
  726. }
  727. }
  728. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  729. if (trace_buff_offset)
  730. index = trace_buff_offset / 4;
  731. else
  732. index = EXPAND_LEN;
  733. j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
  734. while (j) {
  735. decode_address(buf, software_trace_buff[index]);
  736. printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
  737. index -= 1;
  738. if (index < 0 )
  739. index = EXPAND_LEN;
  740. decode_address(buf, software_trace_buff[index]);
  741. printk(KERN_NOTICE " Source : %s ", buf);
  742. decode_instruction((unsigned short *)software_trace_buff[index]);
  743. printk("\n");
  744. index -= 1;
  745. if (index < 0)
  746. index = EXPAND_LEN;
  747. j--;
  748. i++;
  749. }
  750. #endif
  751. trace_buffer_restore(tflags);
  752. #endif
  753. #endif
  754. }
  755. EXPORT_SYMBOL(dump_bfin_trace_buffer);
  756. #ifdef CONFIG_BUG
  757. int is_valid_bugaddr(unsigned long addr)
  758. {
  759. unsigned short opcode;
  760. if (!get_instruction(&opcode, (unsigned short *)addr))
  761. return 0;
  762. return opcode == BFIN_BUG_OPCODE;
  763. }
  764. #endif
  765. /*
  766. * Checks to see if the address pointed to is either a
  767. * 16-bit CALL instruction, or a 32-bit CALL instruction
  768. */
  769. static bool is_bfin_call(unsigned short *addr)
  770. {
  771. unsigned short opcode = 0, *ins_addr;
  772. ins_addr = (unsigned short *)addr;
  773. if (!get_instruction(&opcode, ins_addr))
  774. return false;
  775. if ((opcode >= 0x0060 && opcode <= 0x0067) ||
  776. (opcode >= 0x0070 && opcode <= 0x0077))
  777. return true;
  778. ins_addr--;
  779. if (!get_instruction(&opcode, ins_addr))
  780. return false;
  781. if (opcode >= 0xE300 && opcode <= 0xE3FF)
  782. return true;
  783. return false;
  784. }
  785. void show_stack(struct task_struct *task, unsigned long *stack)
  786. {
  787. #ifdef CONFIG_PRINTK
  788. unsigned int *addr, *endstack, *fp = 0, *frame;
  789. unsigned short *ins_addr;
  790. char buf[150];
  791. unsigned int i, j, ret_addr, frame_no = 0;
  792. /*
  793. * If we have been passed a specific stack, use that one otherwise
  794. * if we have been passed a task structure, use that, otherwise
  795. * use the stack of where the variable "stack" exists
  796. */
  797. if (stack == NULL) {
  798. if (task) {
  799. /* We know this is a kernel stack, so this is the start/end */
  800. stack = (unsigned long *)task->thread.ksp;
  801. endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
  802. } else {
  803. /* print out the existing stack info */
  804. stack = (unsigned long *)&stack;
  805. endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
  806. }
  807. } else
  808. endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
  809. printk(KERN_NOTICE "Stack info:\n");
  810. decode_address(buf, (unsigned int)stack);
  811. printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
  812. if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
  813. printk(KERN_NOTICE "Invalid stack pointer\n");
  814. return;
  815. }
  816. /* First thing is to look for a frame pointer */
  817. for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
  818. if (*addr & 0x1)
  819. continue;
  820. ins_addr = (unsigned short *)*addr;
  821. ins_addr--;
  822. if (is_bfin_call(ins_addr))
  823. fp = addr - 1;
  824. if (fp) {
  825. /* Let's check to see if it is a frame pointer */
  826. while (fp >= (addr - 1) && fp < endstack
  827. && fp && ((unsigned int) fp & 0x3) == 0)
  828. fp = (unsigned int *)*fp;
  829. if (fp == 0 || fp == endstack) {
  830. fp = addr - 1;
  831. break;
  832. }
  833. fp = 0;
  834. }
  835. }
  836. if (fp) {
  837. frame = fp;
  838. printk(KERN_NOTICE " FP: (0x%p)\n", fp);
  839. } else
  840. frame = 0;
  841. /*
  842. * Now that we think we know where things are, we
  843. * walk the stack again, this time printing things out
  844. * incase there is no frame pointer, we still look for
  845. * valid return addresses
  846. */
  847. /* First time print out data, next time, print out symbols */
  848. for (j = 0; j <= 1; j++) {
  849. if (j)
  850. printk(KERN_NOTICE "Return addresses in stack:\n");
  851. else
  852. printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
  853. fp = frame;
  854. frame_no = 0;
  855. for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
  856. addr < endstack; addr++, i++) {
  857. ret_addr = 0;
  858. if (!j && i % 8 == 0)
  859. printk(KERN_NOTICE "%p:",addr);
  860. /* if it is an odd address, or zero, just skip it */
  861. if (*addr & 0x1 || !*addr)
  862. goto print;
  863. ins_addr = (unsigned short *)*addr;
  864. /* Go back one instruction, and see if it is a CALL */
  865. ins_addr--;
  866. ret_addr = is_bfin_call(ins_addr);
  867. print:
  868. if (!j && stack == (unsigned long *)addr)
  869. printk("[%08x]", *addr);
  870. else if (ret_addr)
  871. if (j) {
  872. decode_address(buf, (unsigned int)*addr);
  873. if (frame == addr) {
  874. printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
  875. continue;
  876. }
  877. printk(KERN_NOTICE " address : %s\n", buf);
  878. } else
  879. printk("<%08x>", *addr);
  880. else if (fp == addr) {
  881. if (j)
  882. frame = addr+1;
  883. else
  884. printk("(%08x)", *addr);
  885. fp = (unsigned int *)*addr;
  886. frame_no++;
  887. } else if (!j)
  888. printk(" %08x ", *addr);
  889. }
  890. if (!j)
  891. printk("\n");
  892. }
  893. #endif
  894. }
  895. EXPORT_SYMBOL(show_stack);
  896. void dump_stack(void)
  897. {
  898. unsigned long stack;
  899. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  900. int tflags;
  901. #endif
  902. trace_buffer_save(tflags);
  903. dump_bfin_trace_buffer();
  904. show_stack(current, &stack);
  905. trace_buffer_restore(tflags);
  906. }
  907. EXPORT_SYMBOL(dump_stack);
  908. void dump_bfin_process(struct pt_regs *fp)
  909. {
  910. #ifdef CONFIG_DEBUG_VERBOSE
  911. /* We should be able to look at fp->ipend, but we don't push it on the
  912. * stack all the time, so do this until we fix that */
  913. unsigned int context = bfin_read_IPEND();
  914. if (oops_in_progress)
  915. verbose_printk(KERN_EMERG "Kernel OOPS in progress\n");
  916. if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
  917. verbose_printk(KERN_NOTICE "HW Error context\n");
  918. else if (context & 0x0020)
  919. verbose_printk(KERN_NOTICE "Deferred Exception context\n");
  920. else if (context & 0x3FC0)
  921. verbose_printk(KERN_NOTICE "Interrupt context\n");
  922. else if (context & 0x4000)
  923. verbose_printk(KERN_NOTICE "Deferred Interrupt context\n");
  924. else if (context & 0x8000)
  925. verbose_printk(KERN_NOTICE "Kernel process context\n");
  926. /* Because we are crashing, and pointers could be bad, we check things
  927. * pretty closely before we use them
  928. */
  929. if ((unsigned long)current >= FIXED_CODE_START &&
  930. !((unsigned long)current & 0x3) && current->pid) {
  931. verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
  932. if (current->comm >= (char *)FIXED_CODE_START)
  933. verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
  934. current->comm, current->pid);
  935. else
  936. verbose_printk(KERN_NOTICE "COMM= invalid");
  937. printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
  938. if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
  939. verbose_printk(KERN_NOTICE
  940. "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
  941. " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
  942. (void *)current->mm->start_code,
  943. (void *)current->mm->end_code,
  944. (void *)current->mm->start_data,
  945. (void *)current->mm->end_data,
  946. (void *)current->mm->end_data,
  947. (void *)current->mm->brk,
  948. (void *)current->mm->start_stack);
  949. else
  950. verbose_printk(KERN_NOTICE "invalid mm\n");
  951. } else
  952. verbose_printk(KERN_NOTICE
  953. "No Valid process in current context\n");
  954. #endif
  955. }
  956. void dump_bfin_mem(struct pt_regs *fp)
  957. {
  958. #ifdef CONFIG_DEBUG_VERBOSE
  959. unsigned short *addr, *erraddr, val = 0, err = 0;
  960. char sti = 0, buf[6];
  961. erraddr = (void *)fp->pc;
  962. verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
  963. for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
  964. addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
  965. addr++) {
  966. if (!((unsigned long)addr & 0xF))
  967. verbose_printk(KERN_NOTICE "0x%p: ", addr);
  968. if (!get_instruction(&val, addr)) {
  969. val = 0;
  970. sprintf(buf, "????");
  971. } else
  972. sprintf(buf, "%04x", val);
  973. if (addr == erraddr) {
  974. verbose_printk("[%s]", buf);
  975. err = val;
  976. } else
  977. verbose_printk(" %s ", buf);
  978. /* Do any previous instructions turn on interrupts? */
  979. if (addr <= erraddr && /* in the past */
  980. ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
  981. val == 0x017b)) /* [SP++] = RETI */
  982. sti = 1;
  983. }
  984. verbose_printk("\n");
  985. /* Hardware error interrupts can be deferred */
  986. if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
  987. oops_in_progress)){
  988. verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
  989. #ifndef CONFIG_DEBUG_HWERR
  990. verbose_printk(KERN_NOTICE
  991. "The remaining message may be meaningless\n"
  992. "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
  993. #else
  994. /* If we are handling only one peripheral interrupt
  995. * and current mm and pid are valid, and the last error
  996. * was in that user space process's text area
  997. * print it out - because that is where the problem exists
  998. */
  999. if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
  1000. (current->pid && current->mm)) {
  1001. /* And the last RETI points to the current userspace context */
  1002. if ((fp + 1)->pc >= current->mm->start_code &&
  1003. (fp + 1)->pc <= current->mm->end_code) {
  1004. verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
  1005. verbose_printk(KERN_NOTICE "-------------------------------------------\n");
  1006. show_regs(fp + 1);
  1007. verbose_printk(KERN_NOTICE "-------------------------------------------\n");
  1008. }
  1009. }
  1010. #endif
  1011. }
  1012. #endif
  1013. }
  1014. void show_regs(struct pt_regs *fp)
  1015. {
  1016. #ifdef CONFIG_DEBUG_VERBOSE
  1017. char buf [150];
  1018. struct irqaction *action;
  1019. unsigned int i;
  1020. unsigned long flags = 0;
  1021. unsigned int cpu = raw_smp_processor_id();
  1022. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  1023. verbose_printk(KERN_NOTICE "\n");
  1024. if (CPUID != bfin_cpuid())
  1025. verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
  1026. "but running on:0x%04x (Rev %d)\n",
  1027. CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
  1028. verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
  1029. CPU, bfin_compiled_revid());
  1030. if (bfin_compiled_revid() != bfin_revid())
  1031. verbose_printk("(Detected 0.%d)", bfin_revid());
  1032. verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
  1033. get_cclk()/1000000, get_sclk()/1000000,
  1034. #ifdef CONFIG_MPU
  1035. "mpu on"
  1036. #else
  1037. "mpu off"
  1038. #endif
  1039. );
  1040. verbose_printk(KERN_NOTICE "%s", linux_banner);
  1041. verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
  1042. verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
  1043. (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
  1044. if (fp->ipend & EVT_IRPTEN)
  1045. verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
  1046. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
  1047. EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
  1048. verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
  1049. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
  1050. verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
  1051. if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
  1052. verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
  1053. (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
  1054. #ifdef EBIU_ERRMST
  1055. /* If the error was from the EBIU, print it out */
  1056. if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
  1057. verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n",
  1058. bfin_read_EBIU_ERRMST());
  1059. verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n",
  1060. bfin_read_EBIU_ERRADD());
  1061. }
  1062. #endif
  1063. }
  1064. verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n",
  1065. fp->seqstat & SEQSTAT_EXCAUSE);
  1066. for (i = 2; i <= 15 ; i++) {
  1067. if (fp->ipend & (1 << i)) {
  1068. if (i != 4) {
  1069. decode_address(buf, bfin_read32(EVT0 + 4*i));
  1070. verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf);
  1071. } else
  1072. verbose_printk(KERN_NOTICE " interrupts disabled\n");
  1073. }
  1074. }
  1075. /* if no interrupts are going off, don't print this out */
  1076. if (fp->ipend & ~0x3F) {
  1077. for (i = 0; i < (NR_IRQS - 1); i++) {
  1078. if (!in_atomic)
  1079. raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
  1080. action = irq_desc[i].action;
  1081. if (!action)
  1082. goto unlock;
  1083. decode_address(buf, (unsigned int)action->handler);
  1084. verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf);
  1085. for (action = action->next; action; action = action->next) {
  1086. decode_address(buf, (unsigned int)action->handler);
  1087. verbose_printk(", %s", buf);
  1088. }
  1089. verbose_printk("\n");
  1090. unlock:
  1091. if (!in_atomic)
  1092. raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  1093. }
  1094. }
  1095. decode_address(buf, fp->rete);
  1096. verbose_printk(KERN_NOTICE " RETE: %s\n", buf);
  1097. decode_address(buf, fp->retn);
  1098. verbose_printk(KERN_NOTICE " RETN: %s\n", buf);
  1099. decode_address(buf, fp->retx);
  1100. verbose_printk(KERN_NOTICE " RETX: %s\n", buf);
  1101. decode_address(buf, fp->rets);
  1102. verbose_printk(KERN_NOTICE " RETS: %s\n", buf);
  1103. decode_address(buf, fp->pc);
  1104. verbose_printk(KERN_NOTICE " PC : %s\n", buf);
  1105. if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
  1106. (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
  1107. decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
  1108. verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
  1109. decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
  1110. verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
  1111. }
  1112. verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
  1113. verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
  1114. fp->r0, fp->r1, fp->r2, fp->r3);
  1115. verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
  1116. fp->r4, fp->r5, fp->r6, fp->r7);
  1117. verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
  1118. fp->p0, fp->p1, fp->p2, fp->p3);
  1119. verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
  1120. fp->p4, fp->p5, fp->fp, (long)fp);
  1121. verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n",
  1122. fp->lb0, fp->lt0, fp->lc0);
  1123. verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n",
  1124. fp->lb1, fp->lt1, fp->lc1);
  1125. verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
  1126. fp->b0, fp->l0, fp->m0, fp->i0);
  1127. verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
  1128. fp->b1, fp->l1, fp->m1, fp->i1);
  1129. verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
  1130. fp->b2, fp->l2, fp->m2, fp->i2);
  1131. verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
  1132. fp->b3, fp->l3, fp->m3, fp->i3);
  1133. verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
  1134. fp->a0w, fp->a0x, fp->a1w, fp->a1x);
  1135. verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n",
  1136. rdusp(), fp->astat);
  1137. verbose_printk(KERN_NOTICE "\n");
  1138. #endif
  1139. }
  1140. #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
  1141. asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
  1142. #endif
  1143. static DEFINE_SPINLOCK(bfin_spinlock_lock);
  1144. asmlinkage int sys_bfin_spinlock(int *p)
  1145. {
  1146. int ret, tmp = 0;
  1147. spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
  1148. ret = get_user(tmp, p);
  1149. if (likely(ret == 0)) {
  1150. if (unlikely(tmp))
  1151. ret = 1;
  1152. else
  1153. put_user(1, p);
  1154. }
  1155. spin_unlock(&bfin_spinlock_lock);
  1156. return ret;
  1157. }
  1158. int bfin_request_exception(unsigned int exception, void (*handler)(void))
  1159. {
  1160. void (*curr_handler)(void);
  1161. if (exception > 0x3F)
  1162. return -EINVAL;
  1163. curr_handler = ex_table[exception];
  1164. if (curr_handler != ex_replaceable)
  1165. return -EBUSY;
  1166. ex_table[exception] = handler;
  1167. return 0;
  1168. }
  1169. EXPORT_SYMBOL(bfin_request_exception);
  1170. int bfin_free_exception(unsigned int exception, void (*handler)(void))
  1171. {
  1172. void (*curr_handler)(void);
  1173. if (exception > 0x3F)
  1174. return -EINVAL;
  1175. curr_handler = ex_table[exception];
  1176. if (curr_handler != handler)
  1177. return -EBUSY;
  1178. ex_table[exception] = ex_replaceable;
  1179. return 0;
  1180. }
  1181. EXPORT_SYMBOL(bfin_free_exception);
  1182. void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
  1183. {
  1184. switch (cplb_panic) {
  1185. case CPLB_NO_UNLOCKED:
  1186. printk(KERN_EMERG "All CPLBs are locked\n");
  1187. break;
  1188. case CPLB_PROT_VIOL:
  1189. return;
  1190. case CPLB_NO_ADDR_MATCH:
  1191. return;
  1192. case CPLB_UNKNOWN_ERR:
  1193. printk(KERN_EMERG "Unknown CPLB Exception\n");
  1194. break;
  1195. }
  1196. oops_in_progress = 1;
  1197. dump_bfin_process(fp);
  1198. dump_bfin_mem(fp);
  1199. show_regs(fp);
  1200. dump_stack();
  1201. panic("Unrecoverable event");
  1202. }