trace.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. /* provide some functions which dump the trace buffer, in a nice way for people
  2. * to read it, and understand what is going on
  3. *
  4. * Copyright 2004-2010 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/hardirq.h>
  10. #include <linux/thread_info.h>
  11. #include <linux/mm.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/module.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/err.h>
  16. #include <linux/fs.h>
  17. #include <asm/dma.h>
  18. #include <asm/trace.h>
  19. #include <asm/fixed_code.h>
  20. #include <asm/traps.h>
  21. #include <asm/irq_handler.h>
  22. void decode_address(char *buf, unsigned long address)
  23. {
  24. struct task_struct *p;
  25. struct mm_struct *mm;
  26. unsigned long flags, offset;
  27. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  28. struct rb_node *n;
  29. #ifdef CONFIG_KALLSYMS
  30. unsigned long symsize;
  31. const char *symname;
  32. char *modname;
  33. char *delim = ":";
  34. char namebuf[128];
  35. #endif
  36. buf += sprintf(buf, "<0x%08lx> ", address);
  37. #ifdef CONFIG_KALLSYMS
  38. /* look up the address and see if we are in kernel space */
  39. symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
  40. if (symname) {
  41. /* yeah! kernel space! */
  42. if (!modname)
  43. modname = delim = "";
  44. sprintf(buf, "{ %s%s%s%s + 0x%lx }",
  45. delim, modname, delim, symname,
  46. (unsigned long)offset);
  47. return;
  48. }
  49. #endif
  50. if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
  51. /* Problem in fixed code section? */
  52. strcat(buf, "/* Maybe fixed code section */");
  53. return;
  54. } else if (address < CONFIG_BOOT_LOAD) {
  55. /* Problem somewhere before the kernel start address */
  56. strcat(buf, "/* Maybe null pointer? */");
  57. return;
  58. } else if (address >= COREMMR_BASE) {
  59. strcat(buf, "/* core mmrs */");
  60. return;
  61. } else if (address >= SYSMMR_BASE) {
  62. strcat(buf, "/* system mmrs */");
  63. return;
  64. } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
  65. strcat(buf, "/* on-chip L1 ROM */");
  66. return;
  67. }
  68. /*
  69. * Don't walk any of the vmas if we are oopsing, it has been known
  70. * to cause problems - corrupt vmas (kernel crashes) cause double faults
  71. */
  72. if (oops_in_progress) {
  73. strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
  74. return;
  75. }
  76. /* looks like we're off in user-land, so let's walk all the
  77. * mappings of all our processes and see if we can't be a whee
  78. * bit more specific
  79. */
  80. write_lock_irqsave(&tasklist_lock, flags);
  81. for_each_process(p) {
  82. mm = (in_atomic ? p->mm : get_task_mm(p));
  83. if (!mm)
  84. continue;
  85. if (!down_read_trylock(&mm->mmap_sem)) {
  86. if (!in_atomic)
  87. mmput(mm);
  88. continue;
  89. }
  90. for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
  91. struct vm_area_struct *vma;
  92. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  93. if (address >= vma->vm_start && address < vma->vm_end) {
  94. char _tmpbuf[256];
  95. char *name = p->comm;
  96. struct file *file = vma->vm_file;
  97. if (file) {
  98. char *d_name = d_path(&file->f_path, _tmpbuf,
  99. sizeof(_tmpbuf));
  100. if (!IS_ERR(d_name))
  101. name = d_name;
  102. }
  103. /* FLAT does not have its text aligned to the start of
  104. * the map while FDPIC ELF does ...
  105. */
  106. /* before we can check flat/fdpic, we need to
  107. * make sure current is valid
  108. */
  109. if ((unsigned long)current >= FIXED_CODE_START &&
  110. !((unsigned long)current & 0x3)) {
  111. if (current->mm &&
  112. (address > current->mm->start_code) &&
  113. (address < current->mm->end_code))
  114. offset = address - current->mm->start_code;
  115. else
  116. offset = (address - vma->vm_start) +
  117. (vma->vm_pgoff << PAGE_SHIFT);
  118. sprintf(buf, "[ %s + 0x%lx ]", name, offset);
  119. } else
  120. sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
  121. name, vma->vm_start, vma->vm_end);
  122. up_read(&mm->mmap_sem);
  123. if (!in_atomic)
  124. mmput(mm);
  125. if (buf[0] == '\0')
  126. sprintf(buf, "[ %s ] dynamic memory", name);
  127. goto done;
  128. }
  129. }
  130. up_read(&mm->mmap_sem);
  131. if (!in_atomic)
  132. mmput(mm);
  133. }
  134. /*
  135. * we were unable to find this address anywhere,
  136. * or some MMs were skipped because they were in use.
  137. */
  138. sprintf(buf, "/* kernel dynamic memory */");
  139. done:
  140. write_unlock_irqrestore(&tasklist_lock, flags);
  141. }
  142. #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
  143. /*
  144. * Similar to get_user, do some address checking, then dereference
  145. * Return true on success, false on bad address
  146. */
  147. bool get_instruction(unsigned short *val, unsigned short *address)
  148. {
  149. unsigned long addr = (unsigned long)address;
  150. /* Check for odd addresses */
  151. if (addr & 0x1)
  152. return false;
  153. /* MMR region will never have instructions */
  154. if (addr >= SYSMMR_BASE)
  155. return false;
  156. switch (bfin_mem_access_type(addr, 2)) {
  157. case BFIN_MEM_ACCESS_CORE:
  158. case BFIN_MEM_ACCESS_CORE_ONLY:
  159. *val = *address;
  160. return true;
  161. case BFIN_MEM_ACCESS_DMA:
  162. dma_memcpy(val, address, 2);
  163. return true;
  164. case BFIN_MEM_ACCESS_ITEST:
  165. isram_memcpy(val, address, 2);
  166. return true;
  167. default: /* invalid access */
  168. return false;
  169. }
  170. }
  171. /*
  172. * decode the instruction if we are printing out the trace, as it
  173. * makes things easier to follow, without running it through objdump
  174. * These are the normal instructions which cause change of flow, which
  175. * would be at the source of the trace buffer
  176. */
  177. #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
  178. static void decode_instruction(unsigned short *address)
  179. {
  180. unsigned short opcode;
  181. if (get_instruction(&opcode, address)) {
  182. if (opcode == 0x0010)
  183. pr_cont("RTS");
  184. else if (opcode == 0x0011)
  185. pr_cont("RTI");
  186. else if (opcode == 0x0012)
  187. pr_cont("RTX");
  188. else if (opcode == 0x0013)
  189. pr_cont("RTN");
  190. else if (opcode == 0x0014)
  191. pr_cont("RTE");
  192. else if (opcode == 0x0025)
  193. pr_cont("EMUEXCPT");
  194. else if (opcode >= 0x0040 && opcode <= 0x0047)
  195. pr_cont("STI R%i", opcode & 7);
  196. else if (opcode >= 0x0050 && opcode <= 0x0057)
  197. pr_cont("JUMP (P%i)", opcode & 7);
  198. else if (opcode >= 0x0060 && opcode <= 0x0067)
  199. pr_cont("CALL (P%i)", opcode & 7);
  200. else if (opcode >= 0x0070 && opcode <= 0x0077)
  201. pr_cont("CALL (PC+P%i)", opcode & 7);
  202. else if (opcode >= 0x0080 && opcode <= 0x0087)
  203. pr_cont("JUMP (PC+P%i)", opcode & 7);
  204. else if (opcode >= 0x0090 && opcode <= 0x009F)
  205. pr_cont("RAISE 0x%x", opcode & 0xF);
  206. else if (opcode >= 0x00A0 && opcode <= 0x00AF)
  207. pr_cont("EXCPT 0x%x", opcode & 0xF);
  208. else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF))
  209. pr_cont("IF !CC JUMP");
  210. else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff))
  211. pr_cont("IF CC JUMP");
  212. else if (opcode >= 0x2000 && opcode <= 0x2fff)
  213. pr_cont("JUMP.S");
  214. else if (opcode >= 0xe080 && opcode <= 0xe0ff)
  215. pr_cont("LSETUP");
  216. else if (opcode >= 0xe200 && opcode <= 0xe2ff)
  217. pr_cont("JUMP.L");
  218. else if (opcode >= 0xe300 && opcode <= 0xe3ff)
  219. pr_cont("CALL pcrel");
  220. else
  221. pr_cont("0x%04x", opcode);
  222. }
  223. }
  224. #endif
  225. void dump_bfin_trace_buffer(void)
  226. {
  227. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  228. int tflags, i = 0, fault = 0;
  229. char buf[150];
  230. unsigned short *addr;
  231. unsigned int cpu = raw_smp_processor_id();
  232. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  233. int j, index;
  234. #endif
  235. trace_buffer_save(tflags);
  236. pr_notice("Hardware Trace:\n");
  237. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  238. pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
  239. #endif
  240. if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
  241. for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
  242. addr = (unsigned short *)bfin_read_TBUF();
  243. decode_address(buf, (unsigned long)addr);
  244. pr_notice("%4i Target : %s\n", i, buf);
  245. /* Normally, the faulting instruction doesn't go into
  246. * the trace buffer, (since it doesn't commit), so
  247. * we print out the fault address here
  248. */
  249. if (!fault && addr == (unsigned short *)trap &&
  250. (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
  251. decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
  252. pr_notice(" FAULT : %s ", buf);
  253. decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
  254. pr_cont("\n");
  255. fault = 1;
  256. }
  257. addr = (unsigned short *)bfin_read_TBUF();
  258. decode_address(buf, (unsigned long)addr);
  259. pr_notice(" Source : %s ", buf);
  260. decode_instruction(addr);
  261. pr_cont("\n");
  262. }
  263. }
  264. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  265. if (trace_buff_offset)
  266. index = trace_buff_offset / 4;
  267. else
  268. index = EXPAND_LEN;
  269. j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
  270. while (j) {
  271. decode_address(buf, software_trace_buff[index]);
  272. pr_notice("%4i Target : %s\n", i, buf);
  273. index -= 1;
  274. if (index < 0)
  275. index = EXPAND_LEN;
  276. decode_address(buf, software_trace_buff[index]);
  277. pr_notice(" Source : %s ", buf);
  278. decode_instruction((unsigned short *)software_trace_buff[index]);
  279. pr_cont("\n");
  280. index -= 1;
  281. if (index < 0)
  282. index = EXPAND_LEN;
  283. j--;
  284. i++;
  285. }
  286. #endif
  287. trace_buffer_restore(tflags);
  288. #endif
  289. }
  290. EXPORT_SYMBOL(dump_bfin_trace_buffer);
  291. void dump_bfin_process(struct pt_regs *fp)
  292. {
  293. /* We should be able to look at fp->ipend, but we don't push it on the
  294. * stack all the time, so do this until we fix that */
  295. unsigned int context = bfin_read_IPEND();
  296. if (oops_in_progress)
  297. pr_emerg("Kernel OOPS in progress\n");
  298. if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
  299. pr_notice("HW Error context\n");
  300. else if (context & 0x0020)
  301. pr_notice("Deferred Exception context\n");
  302. else if (context & 0x3FC0)
  303. pr_notice("Interrupt context\n");
  304. else if (context & 0x4000)
  305. pr_notice("Deferred Interrupt context\n");
  306. else if (context & 0x8000)
  307. pr_notice("Kernel process context\n");
  308. /* Because we are crashing, and pointers could be bad, we check things
  309. * pretty closely before we use them
  310. */
  311. if ((unsigned long)current >= FIXED_CODE_START &&
  312. !((unsigned long)current & 0x3) && current->pid) {
  313. pr_notice("CURRENT PROCESS:\n");
  314. if (current->comm >= (char *)FIXED_CODE_START)
  315. pr_notice("COMM=%s PID=%d",
  316. current->comm, current->pid);
  317. else
  318. pr_notice("COMM= invalid");
  319. pr_cont(" CPU=%d\n", current_thread_info()->cpu);
  320. if (!((unsigned long)current->mm & 0x3) &&
  321. (unsigned long)current->mm >= FIXED_CODE_START) {
  322. pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
  323. (void *)current->mm->start_code,
  324. (void *)current->mm->end_code,
  325. (void *)current->mm->start_data,
  326. (void *)current->mm->end_data);
  327. pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
  328. (void *)current->mm->end_data,
  329. (void *)current->mm->brk,
  330. (void *)current->mm->start_stack);
  331. } else
  332. pr_notice("invalid mm\n");
  333. } else
  334. pr_notice("No Valid process in current context\n");
  335. }
  336. void dump_bfin_mem(struct pt_regs *fp)
  337. {
  338. unsigned short *addr, *erraddr, val = 0, err = 0;
  339. char sti = 0, buf[6];
  340. erraddr = (void *)fp->pc;
  341. pr_notice("return address: [0x%p]; contents of:", erraddr);
  342. for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
  343. addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
  344. addr++) {
  345. if (!((unsigned long)addr & 0xF))
  346. pr_notice("0x%p: ", addr);
  347. if (!get_instruction(&val, addr)) {
  348. val = 0;
  349. sprintf(buf, "????");
  350. } else
  351. sprintf(buf, "%04x", val);
  352. if (addr == erraddr) {
  353. pr_cont("[%s]", buf);
  354. err = val;
  355. } else
  356. pr_cont(" %s ", buf);
  357. /* Do any previous instructions turn on interrupts? */
  358. if (addr <= erraddr && /* in the past */
  359. ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
  360. val == 0x017b)) /* [SP++] = RETI */
  361. sti = 1;
  362. }
  363. pr_cont("\n");
  364. /* Hardware error interrupts can be deferred */
  365. if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
  366. oops_in_progress)){
  367. pr_notice("Looks like this was a deferred error - sorry\n");
  368. #ifndef CONFIG_DEBUG_HWERR
  369. pr_notice("The remaining message may be meaningless\n");
  370. pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
  371. #else
  372. /* If we are handling only one peripheral interrupt
  373. * and current mm and pid are valid, and the last error
  374. * was in that user space process's text area
  375. * print it out - because that is where the problem exists
  376. */
  377. if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
  378. (current->pid && current->mm)) {
  379. /* And the last RETI points to the current userspace context */
  380. if ((fp + 1)->pc >= current->mm->start_code &&
  381. (fp + 1)->pc <= current->mm->end_code) {
  382. pr_notice("It might be better to look around here :\n");
  383. pr_notice("-------------------------------------------\n");
  384. show_regs(fp + 1);
  385. pr_notice("-------------------------------------------\n");
  386. }
  387. }
  388. #endif
  389. }
  390. }
  391. void show_regs(struct pt_regs *fp)
  392. {
  393. char buf[150];
  394. struct irqaction *action;
  395. unsigned int i;
  396. unsigned long flags = 0;
  397. unsigned int cpu = raw_smp_processor_id();
  398. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  399. pr_notice("\n");
  400. if (CPUID != bfin_cpuid())
  401. pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
  402. "but running on:0x%04x (Rev %d)\n",
  403. CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
  404. pr_notice("ADSP-%s-0.%d",
  405. CPU, bfin_compiled_revid());
  406. if (bfin_compiled_revid() != bfin_revid())
  407. pr_cont("(Detected 0.%d)", bfin_revid());
  408. pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
  409. get_cclk()/1000000, get_sclk()/1000000,
  410. #ifdef CONFIG_MPU
  411. "mpu on"
  412. #else
  413. "mpu off"
  414. #endif
  415. );
  416. pr_notice("%s", linux_banner);
  417. pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
  418. pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
  419. (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
  420. if (fp->ipend & EVT_IRPTEN)
  421. pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
  422. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
  423. EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
  424. pr_notice(" Peripheral interrupts masked off\n");
  425. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
  426. pr_notice(" Kernel interrupts masked off\n");
  427. if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
  428. pr_notice(" HWERRCAUSE: 0x%lx\n",
  429. (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
  430. #ifdef EBIU_ERRMST
  431. /* If the error was from the EBIU, print it out */
  432. if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
  433. pr_notice(" EBIU Error Reason : 0x%04x\n",
  434. bfin_read_EBIU_ERRMST());
  435. pr_notice(" EBIU Error Address : 0x%08x\n",
  436. bfin_read_EBIU_ERRADD());
  437. }
  438. #endif
  439. }
  440. pr_notice(" EXCAUSE : 0x%lx\n",
  441. fp->seqstat & SEQSTAT_EXCAUSE);
  442. for (i = 2; i <= 15 ; i++) {
  443. if (fp->ipend & (1 << i)) {
  444. if (i != 4) {
  445. decode_address(buf, bfin_read32(EVT0 + 4*i));
  446. pr_notice(" physical IVG%i asserted : %s\n", i, buf);
  447. } else
  448. pr_notice(" interrupts disabled\n");
  449. }
  450. }
  451. /* if no interrupts are going off, don't print this out */
  452. if (fp->ipend & ~0x3F) {
  453. for (i = 0; i < (NR_IRQS - 1); i++) {
  454. if (!in_atomic)
  455. raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
  456. action = irq_desc[i].action;
  457. if (!action)
  458. goto unlock;
  459. decode_address(buf, (unsigned int)action->handler);
  460. pr_notice(" logical irq %3d mapped : %s", i, buf);
  461. for (action = action->next; action; action = action->next) {
  462. decode_address(buf, (unsigned int)action->handler);
  463. pr_cont(", %s", buf);
  464. }
  465. pr_cont("\n");
  466. unlock:
  467. if (!in_atomic)
  468. raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
  469. }
  470. }
  471. decode_address(buf, fp->rete);
  472. pr_notice(" RETE: %s\n", buf);
  473. decode_address(buf, fp->retn);
  474. pr_notice(" RETN: %s\n", buf);
  475. decode_address(buf, fp->retx);
  476. pr_notice(" RETX: %s\n", buf);
  477. decode_address(buf, fp->rets);
  478. pr_notice(" RETS: %s\n", buf);
  479. decode_address(buf, fp->pc);
  480. pr_notice(" PC : %s\n", buf);
  481. if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
  482. (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
  483. decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
  484. pr_notice("DCPLB_FAULT_ADDR: %s\n", buf);
  485. decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
  486. pr_notice("ICPLB_FAULT_ADDR: %s\n", buf);
  487. }
  488. pr_notice("PROCESSOR STATE:\n");
  489. pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
  490. fp->r0, fp->r1, fp->r2, fp->r3);
  491. pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
  492. fp->r4, fp->r5, fp->r6, fp->r7);
  493. pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
  494. fp->p0, fp->p1, fp->p2, fp->p3);
  495. pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
  496. fp->p4, fp->p5, fp->fp, (long)fp);
  497. pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
  498. fp->lb0, fp->lt0, fp->lc0);
  499. pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
  500. fp->lb1, fp->lt1, fp->lc1);
  501. pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
  502. fp->b0, fp->l0, fp->m0, fp->i0);
  503. pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
  504. fp->b1, fp->l1, fp->m1, fp->i1);
  505. pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
  506. fp->b2, fp->l2, fp->m2, fp->i2);
  507. pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
  508. fp->b3, fp->l3, fp->m3, fp->i3);
  509. pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
  510. fp->a0w, fp->a0x, fp->a1w, fp->a1x);
  511. pr_notice("USP : %08lx ASTAT: %08lx\n",
  512. rdusp(), fp->astat);
  513. pr_notice("\n");
  514. }