traps.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later
  5. */
  6. #include <linux/bug.h>
  7. #include <linux/uaccess.h>
  8. #include <linux/interrupt.h>
  9. #include <linux/module.h>
  10. #include <linux/kallsyms.h>
  11. #include <linux/fs.h>
  12. #include <linux/rbtree.h>
  13. #include <asm/traps.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/cplb.h>
  16. #include <asm/dma.h>
  17. #include <asm/blackfin.h>
  18. #include <asm/irq_handler.h>
  19. #include <linux/irq.h>
  20. #include <asm/trace.h>
  21. #include <asm/fixed_code.h>
  22. #ifdef CONFIG_KGDB
  23. # include <linux/kgdb.h>
  24. # define CHK_DEBUGGER_TRAP() \
  25. do { \
  26. kgdb_handle_exception(trapnr, sig, info.si_code, fp); \
  27. } while (0)
  28. # define CHK_DEBUGGER_TRAP_MAYBE() \
  29. do { \
  30. if (kgdb_connected) \
  31. CHK_DEBUGGER_TRAP(); \
  32. } while (0)
  33. #else
  34. # define CHK_DEBUGGER_TRAP() do { } while (0)
  35. # define CHK_DEBUGGER_TRAP_MAYBE() do { } while (0)
  36. #endif
  37. #ifdef CONFIG_DEBUG_VERBOSE
  38. #define verbose_printk(fmt, arg...) \
  39. printk(fmt, ##arg)
  40. #else
  41. #define verbose_printk(fmt, arg...) \
  42. ({ if (0) printk(fmt, ##arg); 0; })
  43. #endif
  44. #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
  45. u32 last_seqstat;
  46. #ifdef CONFIG_DEBUG_MMRS_MODULE
  47. EXPORT_SYMBOL(last_seqstat);
  48. #endif
  49. #endif
  50. /* Initiate the event table handler */
  51. void __init trap_init(void)
  52. {
  53. CSYNC();
  54. bfin_write_EVT3(trap);
  55. CSYNC();
  56. }
  57. static void decode_address(char *buf, unsigned long address)
  58. {
  59. #ifdef CONFIG_DEBUG_VERBOSE
  60. struct task_struct *p;
  61. struct mm_struct *mm;
  62. unsigned long flags, offset;
  63. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  64. struct rb_node *n;
  65. #ifdef CONFIG_KALLSYMS
  66. unsigned long symsize;
  67. const char *symname;
  68. char *modname;
  69. char *delim = ":";
  70. char namebuf[128];
  71. #endif
  72. buf += sprintf(buf, "<0x%08lx> ", address);
  73. #ifdef CONFIG_KALLSYMS
  74. /* look up the address and see if we are in kernel space */
  75. symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
  76. if (symname) {
  77. /* yeah! kernel space! */
  78. if (!modname)
  79. modname = delim = "";
  80. sprintf(buf, "{ %s%s%s%s + 0x%lx }",
  81. delim, modname, delim, symname,
  82. (unsigned long)offset);
  83. return;
  84. }
  85. #endif
  86. if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
  87. /* Problem in fixed code section? */
  88. strcat(buf, "/* Maybe fixed code section */");
  89. return;
  90. } else if (address < CONFIG_BOOT_LOAD) {
  91. /* Problem somewhere before the kernel start address */
  92. strcat(buf, "/* Maybe null pointer? */");
  93. return;
  94. } else if (address >= COREMMR_BASE) {
  95. strcat(buf, "/* core mmrs */");
  96. return;
  97. } else if (address >= SYSMMR_BASE) {
  98. strcat(buf, "/* system mmrs */");
  99. return;
  100. } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
  101. strcat(buf, "/* on-chip L1 ROM */");
  102. return;
  103. }
  104. /*
  105. * Don't walk any of the vmas if we are oopsing, it has been known
  106. * to cause problems - corrupt vmas (kernel crashes) cause double faults
  107. */
  108. if (oops_in_progress) {
  109. strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
  110. return;
  111. }
  112. /* looks like we're off in user-land, so let's walk all the
  113. * mappings of all our processes and see if we can't be a whee
  114. * bit more specific
  115. */
  116. write_lock_irqsave(&tasklist_lock, flags);
  117. for_each_process(p) {
  118. mm = (in_atomic ? p->mm : get_task_mm(p));
  119. if (!mm)
  120. continue;
  121. for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
  122. struct vm_area_struct *vma;
  123. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  124. if (address >= vma->vm_start && address < vma->vm_end) {
  125. char _tmpbuf[256];
  126. char *name = p->comm;
  127. struct file *file = vma->vm_file;
  128. if (file) {
  129. char *d_name = d_path(&file->f_path, _tmpbuf,
  130. sizeof(_tmpbuf));
  131. if (!IS_ERR(d_name))
  132. name = d_name;
  133. }
  134. /* FLAT does not have its text aligned to the start of
  135. * the map while FDPIC ELF does ...
  136. */
  137. /* before we can check flat/fdpic, we need to
  138. * make sure current is valid
  139. */
  140. if ((unsigned long)current >= FIXED_CODE_START &&
  141. !((unsigned long)current & 0x3)) {
  142. if (current->mm &&
  143. (address > current->mm->start_code) &&
  144. (address < current->mm->end_code))
  145. offset = address - current->mm->start_code;
  146. else
  147. offset = (address - vma->vm_start) +
  148. (vma->vm_pgoff << PAGE_SHIFT);
  149. sprintf(buf, "[ %s + 0x%lx ]", name, offset);
  150. } else
  151. sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
  152. name, vma->vm_start, vma->vm_end);
  153. if (!in_atomic)
  154. mmput(mm);
  155. if (buf[0] == '\0')
  156. sprintf(buf, "[ %s ] dynamic memory", name);
  157. goto done;
  158. }
  159. }
  160. if (!in_atomic)
  161. mmput(mm);
  162. }
  163. /* we were unable to find this address anywhere */
  164. sprintf(buf, "/* kernel dynamic memory */");
  165. done:
  166. write_unlock_irqrestore(&tasklist_lock, flags);
  167. #else
  168. sprintf(buf, " ");
  169. #endif
  170. }
  171. asmlinkage void double_fault_c(struct pt_regs *fp)
  172. {
  173. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  174. int j;
  175. trace_buffer_save(j);
  176. #endif
  177. console_verbose();
  178. oops_in_progress = 1;
  179. #ifdef CONFIG_DEBUG_VERBOSE
  180. printk(KERN_EMERG "Double Fault\n");
  181. #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
  182. if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
  183. unsigned int cpu = raw_smp_processor_id();
  184. char buf[150];
  185. decode_address(buf, cpu_pda[cpu].retx_doublefault);
  186. printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
  187. (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf);
  188. decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr);
  189. printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
  190. decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr);
  191. printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
  192. decode_address(buf, fp->retx);
  193. printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
  194. } else
  195. #endif
  196. {
  197. dump_bfin_process(fp);
  198. dump_bfin_mem(fp);
  199. show_regs(fp);
  200. dump_bfin_trace_buffer();
  201. }
  202. #endif
  203. panic("Double Fault - unrecoverable event");
  204. }
  205. static int kernel_mode_regs(struct pt_regs *regs)
  206. {
  207. return regs->ipend & 0xffc0;
  208. }
  209. asmlinkage notrace void trap_c(struct pt_regs *fp)
  210. {
  211. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  212. int j;
  213. #endif
  214. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  215. unsigned int cpu = raw_smp_processor_id();
  216. #endif
  217. const char *strerror = NULL;
  218. int sig = 0;
  219. siginfo_t info;
  220. unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
  221. trace_buffer_save(j);
  222. #if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
  223. last_seqstat = (u32)fp->seqstat;
  224. #endif
  225. /* Important - be very careful dereferncing pointers - will lead to
  226. * double faults if the stack has become corrupt
  227. */
  228. /* trap_c() will be called for exceptions. During exceptions
  229. * processing, the pc value should be set with retx value.
  230. * With this change we can cleanup some code in signal.c- TODO
  231. */
  232. fp->orig_pc = fp->retx;
  233. /* printk("exception: 0x%x, ipend=%x, reti=%x, retx=%x\n",
  234. trapnr, fp->ipend, fp->pc, fp->retx); */
  235. /* send the appropriate signal to the user program */
  236. switch (trapnr) {
  237. /* This table works in conjuction with the one in ./mach-common/entry.S
  238. * Some exceptions are handled there (in assembly, in exception space)
  239. * Some are handled here, (in C, in interrupt space)
  240. * Some, like CPLB, are handled in both, where the normal path is
  241. * handled in assembly/exception space, and the error path is handled
  242. * here
  243. */
  244. /* 0x00 - Linux Syscall, getting here is an error */
  245. /* 0x01 - userspace gdb breakpoint, handled here */
  246. case VEC_EXCPT01:
  247. info.si_code = TRAP_ILLTRAP;
  248. sig = SIGTRAP;
  249. CHK_DEBUGGER_TRAP_MAYBE();
  250. /* Check if this is a breakpoint in kernel space */
  251. if (kernel_mode_regs(fp))
  252. goto traps_done;
  253. else
  254. break;
  255. /* 0x03 - User Defined, userspace stack overflow */
  256. case VEC_EXCPT03:
  257. info.si_code = SEGV_STACKFLOW;
  258. sig = SIGSEGV;
  259. strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
  260. CHK_DEBUGGER_TRAP_MAYBE();
  261. break;
  262. /* 0x02 - KGDB initial connection and break signal trap */
  263. case VEC_EXCPT02:
  264. #ifdef CONFIG_KGDB
  265. info.si_code = TRAP_ILLTRAP;
  266. sig = SIGTRAP;
  267. CHK_DEBUGGER_TRAP();
  268. goto traps_done;
  269. #endif
  270. /* 0x04 - User Defined */
  271. /* 0x05 - User Defined */
  272. /* 0x06 - User Defined */
  273. /* 0x07 - User Defined */
  274. /* 0x08 - User Defined */
  275. /* 0x09 - User Defined */
  276. /* 0x0A - User Defined */
  277. /* 0x0B - User Defined */
  278. /* 0x0C - User Defined */
  279. /* 0x0D - User Defined */
  280. /* 0x0E - User Defined */
  281. /* 0x0F - User Defined */
  282. /* If we got here, it is most likely that someone was trying to use a
  283. * custom exception handler, and it is not actually installed properly
  284. */
  285. case VEC_EXCPT04 ... VEC_EXCPT15:
  286. info.si_code = ILL_ILLPARAOP;
  287. sig = SIGILL;
  288. strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
  289. CHK_DEBUGGER_TRAP_MAYBE();
  290. break;
  291. /* 0x10 HW Single step, handled here */
  292. case VEC_STEP:
  293. info.si_code = TRAP_STEP;
  294. sig = SIGTRAP;
  295. CHK_DEBUGGER_TRAP_MAYBE();
  296. /* Check if this is a single step in kernel space */
  297. if (kernel_mode_regs(fp))
  298. goto traps_done;
  299. else
  300. break;
  301. /* 0x11 - Trace Buffer Full, handled here */
  302. case VEC_OVFLOW:
  303. info.si_code = TRAP_TRACEFLOW;
  304. sig = SIGTRAP;
  305. strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
  306. CHK_DEBUGGER_TRAP_MAYBE();
  307. break;
  308. /* 0x12 - Reserved, Caught by default */
  309. /* 0x13 - Reserved, Caught by default */
  310. /* 0x14 - Reserved, Caught by default */
  311. /* 0x15 - Reserved, Caught by default */
  312. /* 0x16 - Reserved, Caught by default */
  313. /* 0x17 - Reserved, Caught by default */
  314. /* 0x18 - Reserved, Caught by default */
  315. /* 0x19 - Reserved, Caught by default */
  316. /* 0x1A - Reserved, Caught by default */
  317. /* 0x1B - Reserved, Caught by default */
  318. /* 0x1C - Reserved, Caught by default */
  319. /* 0x1D - Reserved, Caught by default */
  320. /* 0x1E - Reserved, Caught by default */
  321. /* 0x1F - Reserved, Caught by default */
  322. /* 0x20 - Reserved, Caught by default */
  323. /* 0x21 - Undefined Instruction, handled here */
  324. case VEC_UNDEF_I:
  325. #ifdef CONFIG_BUG
  326. if (kernel_mode_regs(fp)) {
  327. switch (report_bug(fp->pc, fp)) {
  328. case BUG_TRAP_TYPE_NONE:
  329. break;
  330. case BUG_TRAP_TYPE_WARN:
  331. dump_bfin_trace_buffer();
  332. fp->pc += 2;
  333. goto traps_done;
  334. case BUG_TRAP_TYPE_BUG:
  335. /* call to panic() will dump trace, and it is
  336. * off at this point, so it won't be clobbered
  337. */
  338. panic("BUG()");
  339. }
  340. }
  341. #endif
  342. info.si_code = ILL_ILLOPC;
  343. sig = SIGILL;
  344. strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
  345. CHK_DEBUGGER_TRAP_MAYBE();
  346. break;
  347. /* 0x22 - Illegal Instruction Combination, handled here */
  348. case VEC_ILGAL_I:
  349. info.si_code = ILL_ILLPARAOP;
  350. sig = SIGILL;
  351. strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
  352. CHK_DEBUGGER_TRAP_MAYBE();
  353. break;
  354. /* 0x23 - Data CPLB protection violation, handled here */
  355. case VEC_CPLB_VL:
  356. info.si_code = ILL_CPLB_VI;
  357. sig = SIGSEGV;
  358. strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
  359. CHK_DEBUGGER_TRAP_MAYBE();
  360. break;
  361. /* 0x24 - Data access misaligned, handled here */
  362. case VEC_MISALI_D:
  363. info.si_code = BUS_ADRALN;
  364. sig = SIGBUS;
  365. strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
  366. CHK_DEBUGGER_TRAP_MAYBE();
  367. break;
  368. /* 0x25 - Unrecoverable Event, handled here */
  369. case VEC_UNCOV:
  370. info.si_code = ILL_ILLEXCPT;
  371. sig = SIGILL;
  372. strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
  373. CHK_DEBUGGER_TRAP_MAYBE();
  374. break;
  375. /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
  376. error case is handled here */
  377. case VEC_CPLB_M:
  378. info.si_code = BUS_ADRALN;
  379. sig = SIGBUS;
  380. strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
  381. break;
  382. /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
  383. case VEC_CPLB_MHIT:
  384. info.si_code = ILL_CPLB_MULHIT;
  385. sig = SIGSEGV;
  386. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  387. if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
  388. strerror = KERN_NOTICE "NULL pointer access\n";
  389. else
  390. #endif
  391. strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
  392. CHK_DEBUGGER_TRAP_MAYBE();
  393. break;
  394. /* 0x28 - Emulation Watchpoint, handled here */
  395. case VEC_WATCH:
  396. info.si_code = TRAP_WATCHPT;
  397. sig = SIGTRAP;
  398. pr_debug(EXC_0x28(KERN_DEBUG));
  399. CHK_DEBUGGER_TRAP_MAYBE();
  400. /* Check if this is a watchpoint in kernel space */
  401. if (kernel_mode_regs(fp))
  402. goto traps_done;
  403. else
  404. break;
  405. #ifdef CONFIG_BF535
  406. /* 0x29 - Instruction fetch access error (535 only) */
  407. case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
  408. info.si_code = BUS_OPFETCH;
  409. sig = SIGBUS;
  410. strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
  411. CHK_DEBUGGER_TRAP_MAYBE();
  412. break;
  413. #else
  414. /* 0x29 - Reserved, Caught by default */
  415. #endif
  416. /* 0x2A - Instruction fetch misaligned, handled here */
  417. case VEC_MISALI_I:
  418. info.si_code = BUS_ADRALN;
  419. sig = SIGBUS;
  420. strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
  421. CHK_DEBUGGER_TRAP_MAYBE();
  422. break;
  423. /* 0x2B - Instruction CPLB protection violation, handled here */
  424. case VEC_CPLB_I_VL:
  425. info.si_code = ILL_CPLB_VI;
  426. sig = SIGBUS;
  427. strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
  428. CHK_DEBUGGER_TRAP_MAYBE();
  429. break;
  430. /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
  431. case VEC_CPLB_I_M:
  432. info.si_code = ILL_CPLB_MISS;
  433. sig = SIGBUS;
  434. strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
  435. break;
  436. /* 0x2D - Instruction CPLB Multiple Hits, handled here */
  437. case VEC_CPLB_I_MHIT:
  438. info.si_code = ILL_CPLB_MULHIT;
  439. sig = SIGSEGV;
  440. #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
  441. if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
  442. strerror = KERN_NOTICE "Jump to NULL address\n";
  443. else
  444. #endif
  445. strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
  446. CHK_DEBUGGER_TRAP_MAYBE();
  447. break;
  448. /* 0x2E - Illegal use of Supervisor Resource, handled here */
  449. case VEC_ILL_RES:
  450. info.si_code = ILL_PRVOPC;
  451. sig = SIGILL;
  452. strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
  453. CHK_DEBUGGER_TRAP_MAYBE();
  454. break;
  455. /* 0x2F - Reserved, Caught by default */
  456. /* 0x30 - Reserved, Caught by default */
  457. /* 0x31 - Reserved, Caught by default */
  458. /* 0x32 - Reserved, Caught by default */
  459. /* 0x33 - Reserved, Caught by default */
  460. /* 0x34 - Reserved, Caught by default */
  461. /* 0x35 - Reserved, Caught by default */
  462. /* 0x36 - Reserved, Caught by default */
  463. /* 0x37 - Reserved, Caught by default */
  464. /* 0x38 - Reserved, Caught by default */
  465. /* 0x39 - Reserved, Caught by default */
  466. /* 0x3A - Reserved, Caught by default */
  467. /* 0x3B - Reserved, Caught by default */
  468. /* 0x3C - Reserved, Caught by default */
  469. /* 0x3D - Reserved, Caught by default */
  470. /* 0x3E - Reserved, Caught by default */
  471. /* 0x3F - Reserved, Caught by default */
  472. case VEC_HWERR:
  473. info.si_code = BUS_ADRALN;
  474. sig = SIGBUS;
  475. switch (fp->seqstat & SEQSTAT_HWERRCAUSE) {
  476. /* System MMR Error */
  477. case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
  478. info.si_code = BUS_ADRALN;
  479. sig = SIGBUS;
  480. strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
  481. break;
  482. /* External Memory Addressing Error */
  483. case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
  484. if (ANOMALY_05000310) {
  485. static unsigned long anomaly_rets;
  486. if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
  487. (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) {
  488. /*
  489. * A false hardware error will happen while fetching at
  490. * the L1 instruction SRAM boundary. Ignore it.
  491. */
  492. anomaly_rets = fp->rets;
  493. goto traps_done;
  494. } else if (fp->rets == anomaly_rets) {
  495. /*
  496. * While boundary code returns to a function, at the ret
  497. * point, a new false hardware error might occur too based
  498. * on tests. Ignore it too.
  499. */
  500. goto traps_done;
  501. } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
  502. (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) {
  503. /*
  504. * If boundary code calls a function, at the entry point,
  505. * a new false hardware error maybe happen based on tests.
  506. * Ignore it too.
  507. */
  508. goto traps_done;
  509. } else
  510. anomaly_rets = 0;
  511. }
  512. info.si_code = BUS_ADRERR;
  513. sig = SIGBUS;
  514. strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
  515. break;
  516. /* Performance Monitor Overflow */
  517. case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
  518. strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
  519. break;
  520. /* RAISE 5 instruction */
  521. case (SEQSTAT_HWERRCAUSE_RAISE_5):
  522. printk(KERN_NOTICE HWC_x18(KERN_NOTICE));
  523. break;
  524. default: /* Reserved */
  525. printk(KERN_NOTICE HWC_default(KERN_NOTICE));
  526. break;
  527. }
  528. CHK_DEBUGGER_TRAP_MAYBE();
  529. break;
  530. /*
  531. * We should be handling all known exception types above,
  532. * if we get here we hit a reserved one, so panic
  533. */
  534. default:
  535. info.si_code = ILL_ILLPARAOP;
  536. sig = SIGILL;
  537. verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
  538. (fp->seqstat & SEQSTAT_EXCAUSE));
  539. CHK_DEBUGGER_TRAP_MAYBE();
  540. break;
  541. }
  542. BUG_ON(sig == 0);
  543. /* If the fault was caused by a kernel thread, or interrupt handler
  544. * we will kernel panic, so the system reboots.
  545. */
  546. if (kernel_mode_regs(fp) || (current && !current->mm)) {
  547. console_verbose();
  548. oops_in_progress = 1;
  549. }
  550. if (sig != SIGTRAP) {
  551. if (strerror)
  552. verbose_printk(strerror);
  553. dump_bfin_process(fp);
  554. dump_bfin_mem(fp);
  555. show_regs(fp);
  556. /* Print out the trace buffer if it makes sense */
  557. #ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
  558. if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M)
  559. verbose_printk(KERN_NOTICE "No trace since you do not have "
  560. "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
  561. else
  562. #endif
  563. dump_bfin_trace_buffer();
  564. if (oops_in_progress) {
  565. /* Dump the current kernel stack */
  566. verbose_printk(KERN_NOTICE "Kernel Stack\n");
  567. show_stack(current, NULL);
  568. print_modules();
  569. #ifndef CONFIG_ACCESS_CHECK
  570. verbose_printk(KERN_EMERG "Please turn on "
  571. "CONFIG_ACCESS_CHECK\n");
  572. #endif
  573. panic("Kernel exception");
  574. } else {
  575. #ifdef CONFIG_DEBUG_VERBOSE
  576. unsigned long *stack;
  577. /* Dump the user space stack */
  578. stack = (unsigned long *)rdusp();
  579. verbose_printk(KERN_NOTICE "Userspace Stack\n");
  580. show_stack(NULL, stack);
  581. #endif
  582. }
  583. }
  584. #ifdef CONFIG_IPIPE
  585. if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
  586. #endif
  587. {
  588. info.si_signo = sig;
  589. info.si_errno = 0;
  590. info.si_addr = (void __user *)fp->pc;
  591. force_sig_info(sig, &info, current);
  592. }
  593. if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) ||
  594. (ANOMALY_05000281 && trapnr == VEC_HWERR) ||
  595. (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL)))
  596. fp->pc = SAFE_USER_INSTRUCTION;
  597. traps_done:
  598. trace_buffer_restore(j);
  599. }
  600. /* Typical exception handling routines */
  601. #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
  602. /*
  603. * Similar to get_user, do some address checking, then dereference
  604. * Return true on success, false on bad address
  605. */
  606. static bool get_instruction(unsigned short *val, unsigned short *address)
  607. {
  608. unsigned long addr = (unsigned long)address;
  609. /* Check for odd addresses */
  610. if (addr & 0x1)
  611. return false;
  612. /* MMR region will never have instructions */
  613. if (addr >= SYSMMR_BASE)
  614. return false;
  615. switch (bfin_mem_access_type(addr, 2)) {
  616. case BFIN_MEM_ACCESS_CORE:
  617. case BFIN_MEM_ACCESS_CORE_ONLY:
  618. *val = *address;
  619. return true;
  620. case BFIN_MEM_ACCESS_DMA:
  621. dma_memcpy(val, address, 2);
  622. return true;
  623. case BFIN_MEM_ACCESS_ITEST:
  624. isram_memcpy(val, address, 2);
  625. return true;
  626. default: /* invalid access */
  627. return false;
  628. }
  629. }
  630. /*
  631. * decode the instruction if we are printing out the trace, as it
  632. * makes things easier to follow, without running it through objdump
  633. * These are the normal instructions which cause change of flow, which
  634. * would be at the source of the trace buffer
  635. */
  636. #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
  637. static void decode_instruction(unsigned short *address)
  638. {
  639. unsigned short opcode;
  640. if (get_instruction(&opcode, address)) {
  641. if (opcode == 0x0010)
  642. verbose_printk("RTS");
  643. else if (opcode == 0x0011)
  644. verbose_printk("RTI");
  645. else if (opcode == 0x0012)
  646. verbose_printk("RTX");
  647. else if (opcode == 0x0013)
  648. verbose_printk("RTN");
  649. else if (opcode == 0x0014)
  650. verbose_printk("RTE");
  651. else if (opcode == 0x0025)
  652. verbose_printk("EMUEXCPT");
  653. else if (opcode == 0x0040 && opcode <= 0x0047)
  654. verbose_printk("STI R%i", opcode & 7);
  655. else if (opcode >= 0x0050 && opcode <= 0x0057)
  656. verbose_printk("JUMP (P%i)", opcode & 7);
  657. else if (opcode >= 0x0060 && opcode <= 0x0067)
  658. verbose_printk("CALL (P%i)", opcode & 7);
  659. else if (opcode >= 0x0070 && opcode <= 0x0077)
  660. verbose_printk("CALL (PC+P%i)", opcode & 7);
  661. else if (opcode >= 0x0080 && opcode <= 0x0087)
  662. verbose_printk("JUMP (PC+P%i)", opcode & 7);
  663. else if (opcode >= 0x0090 && opcode <= 0x009F)
  664. verbose_printk("RAISE 0x%x", opcode & 0xF);
  665. else if (opcode >= 0x00A0 && opcode <= 0x00AF)
  666. verbose_printk("EXCPT 0x%x", opcode & 0xF);
  667. else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
  668. verbose_printk("IF !CC JUMP");
  669. else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
  670. verbose_printk("IF CC JUMP");
  671. else if (opcode >= 0x2000 && opcode <= 0x2fff)
  672. verbose_printk("JUMP.S");
  673. else if (opcode >= 0xe080 && opcode <= 0xe0ff)
  674. verbose_printk("LSETUP");
  675. else if (opcode >= 0xe200 && opcode <= 0xe2ff)
  676. verbose_printk("JUMP.L");
  677. else if (opcode >= 0xe300 && opcode <= 0xe3ff)
  678. verbose_printk("CALL pcrel");
  679. else
  680. verbose_printk("0x%04x", opcode);
  681. }
  682. }
  683. #endif
  684. void dump_bfin_trace_buffer(void)
  685. {
  686. #ifdef CONFIG_DEBUG_VERBOSE
  687. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  688. int tflags, i = 0;
  689. char buf[150];
  690. unsigned short *addr;
  691. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  692. int j, index;
  693. #endif
  694. trace_buffer_save(tflags);
  695. printk(KERN_NOTICE "Hardware Trace:\n");
  696. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  697. printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n");
  698. #endif
  699. if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
  700. for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
  701. decode_address(buf, (unsigned long)bfin_read_TBUF());
  702. printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
  703. addr = (unsigned short *)bfin_read_TBUF();
  704. decode_address(buf, (unsigned long)addr);
  705. printk(KERN_NOTICE " Source : %s ", buf);
  706. decode_instruction(addr);
  707. printk("\n");
  708. }
  709. }
  710. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  711. if (trace_buff_offset)
  712. index = trace_buff_offset / 4;
  713. else
  714. index = EXPAND_LEN;
  715. j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
  716. while (j) {
  717. decode_address(buf, software_trace_buff[index]);
  718. printk(KERN_NOTICE "%4i Target : %s\n", i, buf);
  719. index -= 1;
  720. if (index < 0 )
  721. index = EXPAND_LEN;
  722. decode_address(buf, software_trace_buff[index]);
  723. printk(KERN_NOTICE " Source : %s ", buf);
  724. decode_instruction((unsigned short *)software_trace_buff[index]);
  725. printk("\n");
  726. index -= 1;
  727. if (index < 0)
  728. index = EXPAND_LEN;
  729. j--;
  730. i++;
  731. }
  732. #endif
  733. trace_buffer_restore(tflags);
  734. #endif
  735. #endif
  736. }
  737. EXPORT_SYMBOL(dump_bfin_trace_buffer);
  738. #ifdef CONFIG_BUG
  739. int is_valid_bugaddr(unsigned long addr)
  740. {
  741. unsigned short opcode;
  742. if (!get_instruction(&opcode, (unsigned short *)addr))
  743. return 0;
  744. return opcode == BFIN_BUG_OPCODE;
  745. }
  746. #endif
  747. /*
  748. * Checks to see if the address pointed to is either a
  749. * 16-bit CALL instruction, or a 32-bit CALL instruction
  750. */
  751. static bool is_bfin_call(unsigned short *addr)
  752. {
  753. unsigned short opcode = 0, *ins_addr;
  754. ins_addr = (unsigned short *)addr;
  755. if (!get_instruction(&opcode, ins_addr))
  756. return false;
  757. if ((opcode >= 0x0060 && opcode <= 0x0067) ||
  758. (opcode >= 0x0070 && opcode <= 0x0077))
  759. return true;
  760. ins_addr--;
  761. if (!get_instruction(&opcode, ins_addr))
  762. return false;
  763. if (opcode >= 0xE300 && opcode <= 0xE3FF)
  764. return true;
  765. return false;
  766. }
  767. void show_stack(struct task_struct *task, unsigned long *stack)
  768. {
  769. #ifdef CONFIG_PRINTK
  770. unsigned int *addr, *endstack, *fp = 0, *frame;
  771. unsigned short *ins_addr;
  772. char buf[150];
  773. unsigned int i, j, ret_addr, frame_no = 0;
  774. /*
  775. * If we have been passed a specific stack, use that one otherwise
  776. * if we have been passed a task structure, use that, otherwise
  777. * use the stack of where the variable "stack" exists
  778. */
  779. if (stack == NULL) {
  780. if (task) {
  781. /* We know this is a kernel stack, so this is the start/end */
  782. stack = (unsigned long *)task->thread.ksp;
  783. endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE);
  784. } else {
  785. /* print out the existing stack info */
  786. stack = (unsigned long *)&stack;
  787. endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
  788. }
  789. } else
  790. endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack);
  791. printk(KERN_NOTICE "Stack info:\n");
  792. decode_address(buf, (unsigned int)stack);
  793. printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
  794. if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
  795. printk(KERN_NOTICE "Invalid stack pointer\n");
  796. return;
  797. }
  798. /* First thing is to look for a frame pointer */
  799. for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
  800. if (*addr & 0x1)
  801. continue;
  802. ins_addr = (unsigned short *)*addr;
  803. ins_addr--;
  804. if (is_bfin_call(ins_addr))
  805. fp = addr - 1;
  806. if (fp) {
  807. /* Let's check to see if it is a frame pointer */
  808. while (fp >= (addr - 1) && fp < endstack
  809. && fp && ((unsigned int) fp & 0x3) == 0)
  810. fp = (unsigned int *)*fp;
  811. if (fp == 0 || fp == endstack) {
  812. fp = addr - 1;
  813. break;
  814. }
  815. fp = 0;
  816. }
  817. }
  818. if (fp) {
  819. frame = fp;
  820. printk(KERN_NOTICE " FP: (0x%p)\n", fp);
  821. } else
  822. frame = 0;
  823. /*
  824. * Now that we think we know where things are, we
  825. * walk the stack again, this time printing things out
  826. * incase there is no frame pointer, we still look for
  827. * valid return addresses
  828. */
  829. /* First time print out data, next time, print out symbols */
  830. for (j = 0; j <= 1; j++) {
  831. if (j)
  832. printk(KERN_NOTICE "Return addresses in stack:\n");
  833. else
  834. printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack);
  835. fp = frame;
  836. frame_no = 0;
  837. for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0;
  838. addr < endstack; addr++, i++) {
  839. ret_addr = 0;
  840. if (!j && i % 8 == 0)
  841. printk(KERN_NOTICE "%p:",addr);
  842. /* if it is an odd address, or zero, just skip it */
  843. if (*addr & 0x1 || !*addr)
  844. goto print;
  845. ins_addr = (unsigned short *)*addr;
  846. /* Go back one instruction, and see if it is a CALL */
  847. ins_addr--;
  848. ret_addr = is_bfin_call(ins_addr);
  849. print:
  850. if (!j && stack == (unsigned long *)addr)
  851. printk("[%08x]", *addr);
  852. else if (ret_addr)
  853. if (j) {
  854. decode_address(buf, (unsigned int)*addr);
  855. if (frame == addr) {
  856. printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf);
  857. continue;
  858. }
  859. printk(KERN_NOTICE " address : %s\n", buf);
  860. } else
  861. printk("<%08x>", *addr);
  862. else if (fp == addr) {
  863. if (j)
  864. frame = addr+1;
  865. else
  866. printk("(%08x)", *addr);
  867. fp = (unsigned int *)*addr;
  868. frame_no++;
  869. } else if (!j)
  870. printk(" %08x ", *addr);
  871. }
  872. if (!j)
  873. printk("\n");
  874. }
  875. #endif
  876. }
  877. EXPORT_SYMBOL(show_stack);
  878. void dump_stack(void)
  879. {
  880. unsigned long stack;
  881. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  882. int tflags;
  883. #endif
  884. trace_buffer_save(tflags);
  885. dump_bfin_trace_buffer();
  886. show_stack(current, &stack);
  887. trace_buffer_restore(tflags);
  888. }
  889. EXPORT_SYMBOL(dump_stack);
  890. void dump_bfin_process(struct pt_regs *fp)
  891. {
  892. #ifdef CONFIG_DEBUG_VERBOSE
  893. /* We should be able to look at fp->ipend, but we don't push it on the
  894. * stack all the time, so do this until we fix that */
  895. unsigned int context = bfin_read_IPEND();
  896. if (oops_in_progress)
  897. verbose_printk(KERN_EMERG "Kernel OOPS in progress\n");
  898. if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
  899. verbose_printk(KERN_NOTICE "HW Error context\n");
  900. else if (context & 0x0020)
  901. verbose_printk(KERN_NOTICE "Deferred Exception context\n");
  902. else if (context & 0x3FC0)
  903. verbose_printk(KERN_NOTICE "Interrupt context\n");
  904. else if (context & 0x4000)
  905. verbose_printk(KERN_NOTICE "Deferred Interrupt context\n");
  906. else if (context & 0x8000)
  907. verbose_printk(KERN_NOTICE "Kernel process context\n");
  908. /* Because we are crashing, and pointers could be bad, we check things
  909. * pretty closely before we use them
  910. */
  911. if ((unsigned long)current >= FIXED_CODE_START &&
  912. !((unsigned long)current & 0x3) && current->pid) {
  913. verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
  914. if (current->comm >= (char *)FIXED_CODE_START)
  915. verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
  916. current->comm, current->pid);
  917. else
  918. verbose_printk(KERN_NOTICE "COMM= invalid");
  919. printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
  920. if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
  921. verbose_printk(KERN_NOTICE
  922. "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
  923. " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
  924. (void *)current->mm->start_code,
  925. (void *)current->mm->end_code,
  926. (void *)current->mm->start_data,
  927. (void *)current->mm->end_data,
  928. (void *)current->mm->end_data,
  929. (void *)current->mm->brk,
  930. (void *)current->mm->start_stack);
  931. else
  932. verbose_printk(KERN_NOTICE "invalid mm\n");
  933. } else
  934. verbose_printk(KERN_NOTICE
  935. "No Valid process in current context\n");
  936. #endif
  937. }
  938. void dump_bfin_mem(struct pt_regs *fp)
  939. {
  940. #ifdef CONFIG_DEBUG_VERBOSE
  941. unsigned short *addr, *erraddr, val = 0, err = 0;
  942. char sti = 0, buf[6];
  943. erraddr = (void *)fp->pc;
  944. verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr);
  945. for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
  946. addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
  947. addr++) {
  948. if (!((unsigned long)addr & 0xF))
  949. verbose_printk(KERN_NOTICE "0x%p: ", addr);
  950. if (!get_instruction(&val, addr)) {
  951. val = 0;
  952. sprintf(buf, "????");
  953. } else
  954. sprintf(buf, "%04x", val);
  955. if (addr == erraddr) {
  956. verbose_printk("[%s]", buf);
  957. err = val;
  958. } else
  959. verbose_printk(" %s ", buf);
  960. /* Do any previous instructions turn on interrupts? */
  961. if (addr <= erraddr && /* in the past */
  962. ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
  963. val == 0x017b)) /* [SP++] = RETI */
  964. sti = 1;
  965. }
  966. verbose_printk("\n");
  967. /* Hardware error interrupts can be deferred */
  968. if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
  969. oops_in_progress)){
  970. verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
  971. #ifndef CONFIG_DEBUG_HWERR
  972. verbose_printk(KERN_NOTICE
  973. "The remaining message may be meaningless\n"
  974. "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
  975. #else
  976. /* If we are handling only one peripheral interrupt
  977. * and current mm and pid are valid, and the last error
  978. * was in that user space process's text area
  979. * print it out - because that is where the problem exists
  980. */
  981. if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
  982. (current->pid && current->mm)) {
  983. /* And the last RETI points to the current userspace context */
  984. if ((fp + 1)->pc >= current->mm->start_code &&
  985. (fp + 1)->pc <= current->mm->end_code) {
  986. verbose_printk(KERN_NOTICE "It might be better to look around here : \n");
  987. verbose_printk(KERN_NOTICE "-------------------------------------------\n");
  988. show_regs(fp + 1);
  989. verbose_printk(KERN_NOTICE "-------------------------------------------\n");
  990. }
  991. }
  992. #endif
  993. }
  994. #endif
  995. }
  996. void show_regs(struct pt_regs *fp)
  997. {
  998. #ifdef CONFIG_DEBUG_VERBOSE
  999. char buf [150];
  1000. struct irqaction *action;
  1001. unsigned int i;
  1002. unsigned long flags = 0;
  1003. unsigned int cpu = raw_smp_processor_id();
  1004. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  1005. verbose_printk(KERN_NOTICE "\n");
  1006. if (CPUID != bfin_cpuid())
  1007. verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
  1008. "but running on:0x%04x (Rev %d)\n",
  1009. CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
  1010. verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
  1011. CPU, bfin_compiled_revid());
  1012. if (bfin_compiled_revid() != bfin_revid())
  1013. verbose_printk("(Detected 0.%d)", bfin_revid());
  1014. verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
  1015. get_cclk()/1000000, get_sclk()/1000000,
  1016. #ifdef CONFIG_MPU
  1017. "mpu on"
  1018. #else
  1019. "mpu off"
  1020. #endif
  1021. );
  1022. verbose_printk(KERN_NOTICE "%s", linux_banner);
  1023. verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
  1024. verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
  1025. (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
  1026. if (fp->ipend & EVT_IRPTEN)
  1027. verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n");
  1028. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
  1029. EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
  1030. verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n");
  1031. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
  1032. verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n");
  1033. if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
  1034. verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
  1035. (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
  1036. #ifdef EBIU_ERRMST
  1037. /* If the error was from the EBIU, print it out */
  1038. if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
  1039. verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n",
  1040. bfin_read_EBIU_ERRMST());
  1041. verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n",
  1042. bfin_read_EBIU_ERRADD());
  1043. }
  1044. #endif
  1045. }
  1046. verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n",
  1047. fp->seqstat & SEQSTAT_EXCAUSE);
  1048. for (i = 2; i <= 15 ; i++) {
  1049. if (fp->ipend & (1 << i)) {
  1050. if (i != 4) {
  1051. decode_address(buf, bfin_read32(EVT0 + 4*i));
  1052. verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf);
  1053. } else
  1054. verbose_printk(KERN_NOTICE " interrupts disabled\n");
  1055. }
  1056. }
  1057. /* if no interrupts are going off, don't print this out */
  1058. if (fp->ipend & ~0x3F) {
  1059. for (i = 0; i < (NR_IRQS - 1); i++) {
  1060. if (!in_atomic)
  1061. raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
  1062. action = irq_desc[i].action;
  1063. if (!action)
  1064. goto unlock;
  1065. decode_address(buf, (unsigned int)action->handler);
  1066. verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf);
  1067. for (action = action->next; action; action = action->next) {
  1068. decode_address(buf, (unsigned int)action->handler);
  1069. verbose_printk(", %s", buf);
  1070. }
  1071. verbose_printk("\n");
  1072. unlock:
  1073. if (!in_atomic)
  1074. raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  1075. }
  1076. }
  1077. decode_address(buf, fp->rete);
  1078. verbose_printk(KERN_NOTICE " RETE: %s\n", buf);
  1079. decode_address(buf, fp->retn);
  1080. verbose_printk(KERN_NOTICE " RETN: %s\n", buf);
  1081. decode_address(buf, fp->retx);
  1082. verbose_printk(KERN_NOTICE " RETX: %s\n", buf);
  1083. decode_address(buf, fp->rets);
  1084. verbose_printk(KERN_NOTICE " RETS: %s\n", buf);
  1085. decode_address(buf, fp->pc);
  1086. verbose_printk(KERN_NOTICE " PC : %s\n", buf);
  1087. if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
  1088. (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
  1089. decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
  1090. verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
  1091. decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
  1092. verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
  1093. }
  1094. verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
  1095. verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
  1096. fp->r0, fp->r1, fp->r2, fp->r3);
  1097. verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
  1098. fp->r4, fp->r5, fp->r6, fp->r7);
  1099. verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
  1100. fp->p0, fp->p1, fp->p2, fp->p3);
  1101. verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
  1102. fp->p4, fp->p5, fp->fp, (long)fp);
  1103. verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n",
  1104. fp->lb0, fp->lt0, fp->lc0);
  1105. verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n",
  1106. fp->lb1, fp->lt1, fp->lc1);
  1107. verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
  1108. fp->b0, fp->l0, fp->m0, fp->i0);
  1109. verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
  1110. fp->b1, fp->l1, fp->m1, fp->i1);
  1111. verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
  1112. fp->b2, fp->l2, fp->m2, fp->i2);
  1113. verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
  1114. fp->b3, fp->l3, fp->m3, fp->i3);
  1115. verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
  1116. fp->a0w, fp->a0x, fp->a1w, fp->a1x);
  1117. verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n",
  1118. rdusp(), fp->astat);
  1119. verbose_printk(KERN_NOTICE "\n");
  1120. #endif
  1121. }
  1122. #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1
  1123. asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
  1124. #endif
  1125. static DEFINE_SPINLOCK(bfin_spinlock_lock);
  1126. asmlinkage int sys_bfin_spinlock(int *p)
  1127. {
  1128. int ret, tmp = 0;
  1129. spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
  1130. ret = get_user(tmp, p);
  1131. if (likely(ret == 0)) {
  1132. if (unlikely(tmp))
  1133. ret = 1;
  1134. else
  1135. put_user(1, p);
  1136. }
  1137. spin_unlock(&bfin_spinlock_lock);
  1138. return ret;
  1139. }
  1140. int bfin_request_exception(unsigned int exception, void (*handler)(void))
  1141. {
  1142. void (*curr_handler)(void);
  1143. if (exception > 0x3F)
  1144. return -EINVAL;
  1145. curr_handler = ex_table[exception];
  1146. if (curr_handler != ex_replaceable)
  1147. return -EBUSY;
  1148. ex_table[exception] = handler;
  1149. return 0;
  1150. }
  1151. EXPORT_SYMBOL(bfin_request_exception);
  1152. int bfin_free_exception(unsigned int exception, void (*handler)(void))
  1153. {
  1154. void (*curr_handler)(void);
  1155. if (exception > 0x3F)
  1156. return -EINVAL;
  1157. curr_handler = ex_table[exception];
  1158. if (curr_handler != handler)
  1159. return -EBUSY;
  1160. ex_table[exception] = ex_replaceable;
  1161. return 0;
  1162. }
  1163. EXPORT_SYMBOL(bfin_free_exception);
  1164. void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
  1165. {
  1166. switch (cplb_panic) {
  1167. case CPLB_NO_UNLOCKED:
  1168. printk(KERN_EMERG "All CPLBs are locked\n");
  1169. break;
  1170. case CPLB_PROT_VIOL:
  1171. return;
  1172. case CPLB_NO_ADDR_MATCH:
  1173. return;
  1174. case CPLB_UNKNOWN_ERR:
  1175. printk(KERN_EMERG "Unknown CPLB Exception\n");
  1176. break;
  1177. }
  1178. oops_in_progress = 1;
  1179. dump_bfin_process(fp);
  1180. dump_bfin_mem(fp);
  1181. show_regs(fp);
  1182. dump_stack();
  1183. panic("Unrecoverable event");
  1184. }