trace.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985
  1. /* provide some functions which dump the trace buffer, in a nice way for people
  2. * to read it, and understand what is going on
  3. *
  4. * Copyright 2004-2010 Analog Devices Inc.
  5. *
  6. * Licensed under the GPL-2 or later
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/hardirq.h>
  10. #include <linux/thread_info.h>
  11. #include <linux/mm.h>
  12. #include <linux/uaccess.h>
  13. #include <linux/module.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/err.h>
  16. #include <linux/fs.h>
  17. #include <linux/irq.h>
  18. #include <asm/dma.h>
  19. #include <asm/trace.h>
  20. #include <asm/fixed_code.h>
  21. #include <asm/traps.h>
  22. #include <asm/irq_handler.h>
  23. #include <asm/pda.h>
  24. void decode_address(char *buf, unsigned long address)
  25. {
  26. struct task_struct *p;
  27. struct mm_struct *mm;
  28. unsigned long flags, offset;
  29. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  30. struct rb_node *n;
  31. #ifdef CONFIG_KALLSYMS
  32. unsigned long symsize;
  33. const char *symname;
  34. char *modname;
  35. char *delim = ":";
  36. char namebuf[128];
  37. #endif
  38. buf += sprintf(buf, "<0x%08lx> ", address);
  39. #ifdef CONFIG_KALLSYMS
  40. /* look up the address and see if we are in kernel space */
  41. symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
  42. if (symname) {
  43. /* yeah! kernel space! */
  44. if (!modname)
  45. modname = delim = "";
  46. sprintf(buf, "{ %s%s%s%s + 0x%lx }",
  47. delim, modname, delim, symname,
  48. (unsigned long)offset);
  49. return;
  50. }
  51. #endif
  52. if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
  53. /* Problem in fixed code section? */
  54. strcat(buf, "/* Maybe fixed code section */");
  55. return;
  56. } else if (address < CONFIG_BOOT_LOAD) {
  57. /* Problem somewhere before the kernel start address */
  58. strcat(buf, "/* Maybe null pointer? */");
  59. return;
  60. } else if (address >= COREMMR_BASE) {
  61. strcat(buf, "/* core mmrs */");
  62. return;
  63. } else if (address >= SYSMMR_BASE) {
  64. strcat(buf, "/* system mmrs */");
  65. return;
  66. } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
  67. strcat(buf, "/* on-chip L1 ROM */");
  68. return;
  69. } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) {
  70. strcat(buf, "/* on-chip scratchpad */");
  71. return;
  72. } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) {
  73. strcat(buf, "/* unconnected memory */");
  74. return;
  75. } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) {
  76. strcat(buf, "/* reserved memory */");
  77. return;
  78. } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) {
  79. strcat(buf, "/* on-chip Data Bank A */");
  80. return;
  81. } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) {
  82. strcat(buf, "/* on-chip Data Bank B */");
  83. return;
  84. }
  85. /*
  86. * Don't walk any of the vmas if we are oopsing, it has been known
  87. * to cause problems - corrupt vmas (kernel crashes) cause double faults
  88. */
  89. if (oops_in_progress) {
  90. strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
  91. return;
  92. }
  93. /* looks like we're off in user-land, so let's walk all the
  94. * mappings of all our processes and see if we can't be a whee
  95. * bit more specific
  96. */
  97. write_lock_irqsave(&tasklist_lock, flags);
  98. for_each_process(p) {
  99. mm = (in_atomic ? p->mm : get_task_mm(p));
  100. if (!mm)
  101. continue;
  102. if (!down_read_trylock(&mm->mmap_sem)) {
  103. if (!in_atomic)
  104. mmput(mm);
  105. continue;
  106. }
  107. for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
  108. struct vm_area_struct *vma;
  109. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  110. if (address >= vma->vm_start && address < vma->vm_end) {
  111. char _tmpbuf[256];
  112. char *name = p->comm;
  113. struct file *file = vma->vm_file;
  114. if (file) {
  115. char *d_name = d_path(&file->f_path, _tmpbuf,
  116. sizeof(_tmpbuf));
  117. if (!IS_ERR(d_name))
  118. name = d_name;
  119. }
  120. /* FLAT does not have its text aligned to the start of
  121. * the map while FDPIC ELF does ...
  122. */
  123. /* before we can check flat/fdpic, we need to
  124. * make sure current is valid
  125. */
  126. if ((unsigned long)current >= FIXED_CODE_START &&
  127. !((unsigned long)current & 0x3)) {
  128. if (current->mm &&
  129. (address > current->mm->start_code) &&
  130. (address < current->mm->end_code))
  131. offset = address - current->mm->start_code;
  132. else
  133. offset = (address - vma->vm_start) +
  134. (vma->vm_pgoff << PAGE_SHIFT);
  135. sprintf(buf, "[ %s + 0x%lx ]", name, offset);
  136. } else
  137. sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
  138. name, vma->vm_start, vma->vm_end);
  139. up_read(&mm->mmap_sem);
  140. if (!in_atomic)
  141. mmput(mm);
  142. if (buf[0] == '\0')
  143. sprintf(buf, "[ %s ] dynamic memory", name);
  144. goto done;
  145. }
  146. }
  147. up_read(&mm->mmap_sem);
  148. if (!in_atomic)
  149. mmput(mm);
  150. }
  151. /*
  152. * we were unable to find this address anywhere,
  153. * or some MMs were skipped because they were in use.
  154. */
  155. sprintf(buf, "/* kernel dynamic memory */");
  156. done:
  157. write_unlock_irqrestore(&tasklist_lock, flags);
  158. }
  159. #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
  160. /*
  161. * Similar to get_user, do some address checking, then dereference
  162. * Return true on success, false on bad address
  163. */
  164. bool get_mem16(unsigned short *val, unsigned short *address)
  165. {
  166. unsigned long addr = (unsigned long)address;
  167. /* Check for odd addresses */
  168. if (addr & 0x1)
  169. return false;
  170. switch (bfin_mem_access_type(addr, 2)) {
  171. case BFIN_MEM_ACCESS_CORE:
  172. case BFIN_MEM_ACCESS_CORE_ONLY:
  173. *val = *address;
  174. return true;
  175. case BFIN_MEM_ACCESS_DMA:
  176. dma_memcpy(val, address, 2);
  177. return true;
  178. case BFIN_MEM_ACCESS_ITEST:
  179. isram_memcpy(val, address, 2);
  180. return true;
  181. default: /* invalid access */
  182. return false;
  183. }
  184. }
  185. bool get_instruction(unsigned int *val, unsigned short *address)
  186. {
  187. unsigned long addr = (unsigned long)address;
  188. unsigned short opcode0, opcode1;
  189. /* Check for odd addresses */
  190. if (addr & 0x1)
  191. return false;
  192. /* MMR region will never have instructions */
  193. if (addr >= SYSMMR_BASE)
  194. return false;
  195. /* Scratchpad will never have instructions */
  196. if (addr >= L1_SCRATCH_START && addr < L1_SCRATCH_START + L1_SCRATCH_LENGTH)
  197. return false;
  198. /* Data banks will never have instructions */
  199. if (addr >= BOOT_ROM_START + BOOT_ROM_LENGTH && addr < L1_CODE_START)
  200. return false;
  201. if (!get_mem16(&opcode0, address))
  202. return false;
  203. /* was this a 32-bit instruction? If so, get the next 16 bits */
  204. if ((opcode0 & 0xc000) == 0xc000) {
  205. if (!get_mem16(&opcode1, address + 1))
  206. return false;
  207. *val = (opcode0 << 16) + opcode1;
  208. } else
  209. *val = opcode0;
  210. return true;
  211. }
  212. #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
  213. /*
  214. * decode the instruction if we are printing out the trace, as it
  215. * makes things easier to follow, without running it through objdump
  216. * Decode the change of flow, and the common load/store instructions
  217. * which are the main cause for faults, and discontinuities in the trace
  218. * buffer.
  219. */
  220. #define ProgCtrl_opcode 0x0000
  221. #define ProgCtrl_poprnd_bits 0
  222. #define ProgCtrl_poprnd_mask 0xf
  223. #define ProgCtrl_prgfunc_bits 4
  224. #define ProgCtrl_prgfunc_mask 0xf
  225. #define ProgCtrl_code_bits 8
  226. #define ProgCtrl_code_mask 0xff
  227. static void decode_ProgCtrl_0(unsigned int opcode)
  228. {
  229. int poprnd = ((opcode >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask);
  230. int prgfunc = ((opcode >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask);
  231. if (prgfunc == 0 && poprnd == 0)
  232. pr_cont("NOP");
  233. else if (prgfunc == 1 && poprnd == 0)
  234. pr_cont("RTS");
  235. else if (prgfunc == 1 && poprnd == 1)
  236. pr_cont("RTI");
  237. else if (prgfunc == 1 && poprnd == 2)
  238. pr_cont("RTX");
  239. else if (prgfunc == 1 && poprnd == 3)
  240. pr_cont("RTN");
  241. else if (prgfunc == 1 && poprnd == 4)
  242. pr_cont("RTE");
  243. else if (prgfunc == 2 && poprnd == 0)
  244. pr_cont("IDLE");
  245. else if (prgfunc == 2 && poprnd == 3)
  246. pr_cont("CSYNC");
  247. else if (prgfunc == 2 && poprnd == 4)
  248. pr_cont("SSYNC");
  249. else if (prgfunc == 2 && poprnd == 5)
  250. pr_cont("EMUEXCPT");
  251. else if (prgfunc == 3)
  252. pr_cont("CLI R%i", poprnd);
  253. else if (prgfunc == 4)
  254. pr_cont("STI R%i", poprnd);
  255. else if (prgfunc == 5)
  256. pr_cont("JUMP (P%i)", poprnd);
  257. else if (prgfunc == 6)
  258. pr_cont("CALL (P%i)", poprnd);
  259. else if (prgfunc == 7)
  260. pr_cont("CALL (PC + P%i)", poprnd);
  261. else if (prgfunc == 8)
  262. pr_cont("JUMP (PC + P%i", poprnd);
  263. else if (prgfunc == 9)
  264. pr_cont("RAISE %i", poprnd);
  265. else if (prgfunc == 10)
  266. pr_cont("EXCPT %i", poprnd);
  267. else
  268. pr_cont("0x%04x", opcode);
  269. }
  270. #define BRCC_opcode 0x1000
  271. #define BRCC_offset_bits 0
  272. #define BRCC_offset_mask 0x3ff
  273. #define BRCC_B_bits 10
  274. #define BRCC_B_mask 0x1
  275. #define BRCC_T_bits 11
  276. #define BRCC_T_mask 0x1
  277. #define BRCC_code_bits 12
  278. #define BRCC_code_mask 0xf
  279. static void decode_BRCC_0(unsigned int opcode)
  280. {
  281. int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask);
  282. int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask);
  283. pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : "");
  284. }
  285. #define CALLa_opcode 0xe2000000
  286. #define CALLa_addr_bits 0
  287. #define CALLa_addr_mask 0xffffff
  288. #define CALLa_S_bits 24
  289. #define CALLa_S_mask 0x1
  290. #define CALLa_code_bits 25
  291. #define CALLa_code_mask 0x7f
  292. static void decode_CALLa_0(unsigned int opcode)
  293. {
  294. int S = ((opcode >> (CALLa_S_bits - 16)) & CALLa_S_mask);
  295. if (S)
  296. pr_cont("CALL pcrel");
  297. else
  298. pr_cont("JUMP.L");
  299. }
  300. #define LoopSetup_opcode 0xe0800000
  301. #define LoopSetup_eoffset_bits 0
  302. #define LoopSetup_eoffset_mask 0x3ff
  303. #define LoopSetup_dontcare_bits 10
  304. #define LoopSetup_dontcare_mask 0x3
  305. #define LoopSetup_reg_bits 12
  306. #define LoopSetup_reg_mask 0xf
  307. #define LoopSetup_soffset_bits 16
  308. #define LoopSetup_soffset_mask 0xf
  309. #define LoopSetup_c_bits 20
  310. #define LoopSetup_c_mask 0x1
  311. #define LoopSetup_rop_bits 21
  312. #define LoopSetup_rop_mask 0x3
  313. #define LoopSetup_code_bits 23
  314. #define LoopSetup_code_mask 0x1ff
  315. static void decode_LoopSetup_0(unsigned int opcode)
  316. {
  317. int c = ((opcode >> LoopSetup_c_bits) & LoopSetup_c_mask);
  318. int reg = ((opcode >> LoopSetup_reg_bits) & LoopSetup_reg_mask);
  319. int rop = ((opcode >> LoopSetup_rop_bits) & LoopSetup_rop_mask);
  320. pr_cont("LSETUP <> LC%i", c);
  321. if ((rop & 1) == 1)
  322. pr_cont("= P%i", reg);
  323. if ((rop & 2) == 2)
  324. pr_cont(" >> 0x1");
  325. }
  326. #define DspLDST_opcode 0x9c00
  327. #define DspLDST_reg_bits 0
  328. #define DspLDST_reg_mask 0x7
  329. #define DspLDST_i_bits 3
  330. #define DspLDST_i_mask 0x3
  331. #define DspLDST_m_bits 5
  332. #define DspLDST_m_mask 0x3
  333. #define DspLDST_aop_bits 7
  334. #define DspLDST_aop_mask 0x3
  335. #define DspLDST_W_bits 9
  336. #define DspLDST_W_mask 0x1
  337. #define DspLDST_code_bits 10
  338. #define DspLDST_code_mask 0x3f
  339. static void decode_dspLDST_0(unsigned int opcode)
  340. {
  341. int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask);
  342. int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask);
  343. int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask);
  344. int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask);
  345. int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask);
  346. if (W == 0) {
  347. pr_cont("R%i", reg);
  348. switch (m) {
  349. case 0:
  350. pr_cont(" = ");
  351. break;
  352. case 1:
  353. pr_cont(".L = ");
  354. break;
  355. case 2:
  356. pr_cont(".W = ");
  357. break;
  358. }
  359. }
  360. pr_cont("[ I%i", i);
  361. switch (aop) {
  362. case 0:
  363. pr_cont("++ ]");
  364. break;
  365. case 1:
  366. pr_cont("-- ]");
  367. break;
  368. }
  369. if (W == 1) {
  370. pr_cont(" = R%i", reg);
  371. switch (m) {
  372. case 1:
  373. pr_cont(".L = ");
  374. break;
  375. case 2:
  376. pr_cont(".W = ");
  377. break;
  378. }
  379. }
  380. }
  381. #define LDST_opcode 0x9000
  382. #define LDST_reg_bits 0
  383. #define LDST_reg_mask 0x7
  384. #define LDST_ptr_bits 3
  385. #define LDST_ptr_mask 0x7
  386. #define LDST_Z_bits 6
  387. #define LDST_Z_mask 0x1
  388. #define LDST_aop_bits 7
  389. #define LDST_aop_mask 0x3
  390. #define LDST_W_bits 9
  391. #define LDST_W_mask 0x1
  392. #define LDST_sz_bits 10
  393. #define LDST_sz_mask 0x3
  394. #define LDST_code_bits 12
  395. #define LDST_code_mask 0xf
  396. static void decode_LDST_0(unsigned int opcode)
  397. {
  398. int Z = ((opcode >> LDST_Z_bits) & LDST_Z_mask);
  399. int W = ((opcode >> LDST_W_bits) & LDST_W_mask);
  400. int sz = ((opcode >> LDST_sz_bits) & LDST_sz_mask);
  401. int aop = ((opcode >> LDST_aop_bits) & LDST_aop_mask);
  402. int reg = ((opcode >> LDST_reg_bits) & LDST_reg_mask);
  403. int ptr = ((opcode >> LDST_ptr_bits) & LDST_ptr_mask);
  404. if (W == 0)
  405. pr_cont("%s%i = ", (sz == 0 && Z == 1) ? "P" : "R", reg);
  406. switch (sz) {
  407. case 1:
  408. pr_cont("W");
  409. break;
  410. case 2:
  411. pr_cont("B");
  412. break;
  413. }
  414. pr_cont("[P%i", ptr);
  415. switch (aop) {
  416. case 0:
  417. pr_cont("++");
  418. break;
  419. case 1:
  420. pr_cont("--");
  421. break;
  422. }
  423. pr_cont("]");
  424. if (W == 1)
  425. pr_cont(" = %s%i ", (sz == 0 && Z == 1) ? "P" : "R", reg);
  426. if (sz) {
  427. if (Z)
  428. pr_cont(" (X)");
  429. else
  430. pr_cont(" (Z)");
  431. }
  432. }
  433. #define LDSTii_opcode 0xa000
  434. #define LDSTii_reg_bit 0
  435. #define LDSTii_reg_mask 0x7
  436. #define LDSTii_ptr_bit 3
  437. #define LDSTii_ptr_mask 0x7
  438. #define LDSTii_offset_bit 6
  439. #define LDSTii_offset_mask 0xf
  440. #define LDSTii_op_bit 10
  441. #define LDSTii_op_mask 0x3
  442. #define LDSTii_W_bit 12
  443. #define LDSTii_W_mask 0x1
  444. #define LDSTii_code_bit 13
  445. #define LDSTii_code_mask 0x7
  446. static void decode_LDSTii_0(unsigned int opcode)
  447. {
  448. int reg = ((opcode >> LDSTii_reg_bit) & LDSTii_reg_mask);
  449. int ptr = ((opcode >> LDSTii_ptr_bit) & LDSTii_ptr_mask);
  450. int offset = ((opcode >> LDSTii_offset_bit) & LDSTii_offset_mask);
  451. int op = ((opcode >> LDSTii_op_bit) & LDSTii_op_mask);
  452. int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask);
  453. if (W == 0) {
  454. pr_cont("%s%i = %s[P%i + %i]", op == 3 ? "R" : "P", reg,
  455. op == 1 || op == 2 ? "" : "W", ptr, offset);
  456. if (op == 2)
  457. pr_cont("(Z)");
  458. if (op == 3)
  459. pr_cont("(X)");
  460. } else {
  461. pr_cont("%s[P%i + %i] = %s%i", op == 0 ? "" : "W", ptr,
  462. offset, op == 3 ? "P" : "R", reg);
  463. }
  464. }
  465. #define LDSTidxI_opcode 0xe4000000
  466. #define LDSTidxI_offset_bits 0
  467. #define LDSTidxI_offset_mask 0xffff
  468. #define LDSTidxI_reg_bits 16
  469. #define LDSTidxI_reg_mask 0x7
  470. #define LDSTidxI_ptr_bits 19
  471. #define LDSTidxI_ptr_mask 0x7
  472. #define LDSTidxI_sz_bits 22
  473. #define LDSTidxI_sz_mask 0x3
  474. #define LDSTidxI_Z_bits 24
  475. #define LDSTidxI_Z_mask 0x1
  476. #define LDSTidxI_W_bits 25
  477. #define LDSTidxI_W_mask 0x1
  478. #define LDSTidxI_code_bits 26
  479. #define LDSTidxI_code_mask 0x3f
  480. static void decode_LDSTidxI_0(unsigned int opcode)
  481. {
  482. int Z = ((opcode >> LDSTidxI_Z_bits) & LDSTidxI_Z_mask);
  483. int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask);
  484. int sz = ((opcode >> LDSTidxI_sz_bits) & LDSTidxI_sz_mask);
  485. int reg = ((opcode >> LDSTidxI_reg_bits) & LDSTidxI_reg_mask);
  486. int ptr = ((opcode >> LDSTidxI_ptr_bits) & LDSTidxI_ptr_mask);
  487. int offset = ((opcode >> LDSTidxI_offset_bits) & LDSTidxI_offset_mask);
  488. if (W == 0)
  489. pr_cont("%s%i = ", sz == 0 && Z == 1 ? "P" : "R", reg);
  490. if (sz == 1)
  491. pr_cont("W");
  492. if (sz == 2)
  493. pr_cont("B");
  494. pr_cont("[P%i + %s0x%x]", ptr, offset & 0x20 ? "-" : "",
  495. (offset & 0x1f) << 2);
  496. if (W == 0 && sz != 0) {
  497. if (Z)
  498. pr_cont("(X)");
  499. else
  500. pr_cont("(Z)");
  501. }
  502. if (W == 1)
  503. pr_cont("= %s%i", (sz == 0 && Z == 1) ? "P" : "R", reg);
  504. }
  505. static void decode_opcode(unsigned int opcode)
  506. {
  507. #ifdef CONFIG_BUG
  508. if (opcode == BFIN_BUG_OPCODE)
  509. pr_cont("BUG");
  510. else
  511. #endif
  512. if ((opcode & 0xffffff00) == ProgCtrl_opcode)
  513. decode_ProgCtrl_0(opcode);
  514. else if ((opcode & 0xfffff000) == BRCC_opcode)
  515. decode_BRCC_0(opcode);
  516. else if ((opcode & 0xfffff000) == 0x2000)
  517. pr_cont("JUMP.S");
  518. else if ((opcode & 0xfe000000) == CALLa_opcode)
  519. decode_CALLa_0(opcode);
  520. else if ((opcode & 0xff8000C0) == LoopSetup_opcode)
  521. decode_LoopSetup_0(opcode);
  522. else if ((opcode & 0xfffffc00) == DspLDST_opcode)
  523. decode_dspLDST_0(opcode);
  524. else if ((opcode & 0xfffff000) == LDST_opcode)
  525. decode_LDST_0(opcode);
  526. else if ((opcode & 0xffffe000) == LDSTii_opcode)
  527. decode_LDSTii_0(opcode);
  528. else if ((opcode & 0xfc000000) == LDSTidxI_opcode)
  529. decode_LDSTidxI_0(opcode);
  530. else if (opcode & 0xffff0000)
  531. pr_cont("0x%08x", opcode);
  532. else
  533. pr_cont("0x%04x", opcode);
  534. }
  535. #define BIT_MULTI_INS 0x08000000
  536. static void decode_instruction(unsigned short *address)
  537. {
  538. unsigned int opcode;
  539. if (!get_instruction(&opcode, address))
  540. return;
  541. decode_opcode(opcode);
  542. /* If things are a 32-bit instruction, it has the possibility of being
  543. * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions)
  544. * This test collidates with the unlink instruction, so disallow that
  545. */
  546. if ((opcode & 0xc0000000) == 0xc0000000 &&
  547. (opcode & BIT_MULTI_INS) &&
  548. (opcode & 0xe8000000) != 0xe8000000) {
  549. pr_cont(" || ");
  550. if (!get_instruction(&opcode, address + 2))
  551. return;
  552. decode_opcode(opcode);
  553. pr_cont(" || ");
  554. if (!get_instruction(&opcode, address + 3))
  555. return;
  556. decode_opcode(opcode);
  557. }
  558. }
  559. #endif
  560. void dump_bfin_trace_buffer(void)
  561. {
  562. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
  563. int tflags, i = 0, fault = 0;
  564. char buf[150];
  565. unsigned short *addr;
  566. unsigned int cpu = raw_smp_processor_id();
  567. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  568. int j, index;
  569. #endif
  570. trace_buffer_save(tflags);
  571. pr_notice("Hardware Trace:\n");
  572. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  573. pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
  574. #endif
  575. if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
  576. for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
  577. addr = (unsigned short *)bfin_read_TBUF();
  578. decode_address(buf, (unsigned long)addr);
  579. pr_notice("%4i Target : %s\n", i, buf);
  580. /* Normally, the faulting instruction doesn't go into
  581. * the trace buffer, (since it doesn't commit), so
  582. * we print out the fault address here
  583. */
  584. if (!fault && addr == ((unsigned short *)evt_ivhw)) {
  585. addr = (unsigned short *)bfin_read_TBUF();
  586. decode_address(buf, (unsigned long)addr);
  587. pr_notice(" FAULT : %s ", buf);
  588. decode_instruction(addr);
  589. pr_cont("\n");
  590. fault = 1;
  591. continue;
  592. }
  593. if (!fault && addr == (unsigned short *)trap &&
  594. (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
  595. decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
  596. pr_notice(" FAULT : %s ", buf);
  597. decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
  598. pr_cont("\n");
  599. fault = 1;
  600. }
  601. addr = (unsigned short *)bfin_read_TBUF();
  602. decode_address(buf, (unsigned long)addr);
  603. pr_notice(" Source : %s ", buf);
  604. decode_instruction(addr);
  605. pr_cont("\n");
  606. }
  607. }
  608. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  609. if (trace_buff_offset)
  610. index = trace_buff_offset / 4;
  611. else
  612. index = EXPAND_LEN;
  613. j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
  614. while (j) {
  615. decode_address(buf, software_trace_buff[index]);
  616. pr_notice("%4i Target : %s\n", i, buf);
  617. index -= 1;
  618. if (index < 0)
  619. index = EXPAND_LEN;
  620. decode_address(buf, software_trace_buff[index]);
  621. pr_notice(" Source : %s ", buf);
  622. decode_instruction((unsigned short *)software_trace_buff[index]);
  623. pr_cont("\n");
  624. index -= 1;
  625. if (index < 0)
  626. index = EXPAND_LEN;
  627. j--;
  628. i++;
  629. }
  630. #endif
  631. trace_buffer_restore(tflags);
  632. #endif
  633. }
  634. EXPORT_SYMBOL(dump_bfin_trace_buffer);
  635. void dump_bfin_process(struct pt_regs *fp)
  636. {
  637. /* We should be able to look at fp->ipend, but we don't push it on the
  638. * stack all the time, so do this until we fix that */
  639. unsigned int context = bfin_read_IPEND();
  640. if (oops_in_progress)
  641. pr_emerg("Kernel OOPS in progress\n");
  642. if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
  643. pr_notice("HW Error context\n");
  644. else if (context & 0x0020)
  645. pr_notice("Deferred Exception context\n");
  646. else if (context & 0x3FC0)
  647. pr_notice("Interrupt context\n");
  648. else if (context & 0x4000)
  649. pr_notice("Deferred Interrupt context\n");
  650. else if (context & 0x8000)
  651. pr_notice("Kernel process context\n");
  652. /* Because we are crashing, and pointers could be bad, we check things
  653. * pretty closely before we use them
  654. */
  655. if ((unsigned long)current >= FIXED_CODE_START &&
  656. !((unsigned long)current & 0x3) && current->pid) {
  657. pr_notice("CURRENT PROCESS:\n");
  658. if (current->comm >= (char *)FIXED_CODE_START)
  659. pr_notice("COMM=%s PID=%d",
  660. current->comm, current->pid);
  661. else
  662. pr_notice("COMM= invalid");
  663. pr_cont(" CPU=%d\n", current_thread_info()->cpu);
  664. if (!((unsigned long)current->mm & 0x3) &&
  665. (unsigned long)current->mm >= FIXED_CODE_START) {
  666. pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
  667. (void *)current->mm->start_code,
  668. (void *)current->mm->end_code,
  669. (void *)current->mm->start_data,
  670. (void *)current->mm->end_data);
  671. pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
  672. (void *)current->mm->end_data,
  673. (void *)current->mm->brk,
  674. (void *)current->mm->start_stack);
  675. } else
  676. pr_notice("invalid mm\n");
  677. } else
  678. pr_notice("No Valid process in current context\n");
  679. }
  680. void dump_bfin_mem(struct pt_regs *fp)
  681. {
  682. unsigned short *addr, *erraddr, val = 0, err = 0;
  683. char sti = 0, buf[6];
  684. erraddr = (void *)fp->pc;
  685. pr_notice("return address: [0x%p]; contents of:", erraddr);
  686. for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
  687. addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
  688. addr++) {
  689. if (!((unsigned long)addr & 0xF))
  690. pr_notice("0x%p: ", addr);
  691. if (!get_mem16(&val, addr)) {
  692. val = 0;
  693. sprintf(buf, "????");
  694. } else
  695. sprintf(buf, "%04x", val);
  696. if (addr == erraddr) {
  697. pr_cont("[%s]", buf);
  698. err = val;
  699. } else
  700. pr_cont(" %s ", buf);
  701. /* Do any previous instructions turn on interrupts? */
  702. if (addr <= erraddr && /* in the past */
  703. ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
  704. val == 0x017b)) /* [SP++] = RETI */
  705. sti = 1;
  706. }
  707. pr_cont("\n");
  708. /* Hardware error interrupts can be deferred */
  709. if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
  710. oops_in_progress)){
  711. pr_notice("Looks like this was a deferred error - sorry\n");
  712. #ifndef CONFIG_DEBUG_HWERR
  713. pr_notice("The remaining message may be meaningless\n");
  714. pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
  715. #else
  716. /* If we are handling only one peripheral interrupt
  717. * and current mm and pid are valid, and the last error
  718. * was in that user space process's text area
  719. * print it out - because that is where the problem exists
  720. */
  721. if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
  722. (current->pid && current->mm)) {
  723. /* And the last RETI points to the current userspace context */
  724. if ((fp + 1)->pc >= current->mm->start_code &&
  725. (fp + 1)->pc <= current->mm->end_code) {
  726. pr_notice("It might be better to look around here :\n");
  727. pr_notice("-------------------------------------------\n");
  728. show_regs(fp + 1);
  729. pr_notice("-------------------------------------------\n");
  730. }
  731. }
  732. #endif
  733. }
  734. }
  735. void show_regs(struct pt_regs *fp)
  736. {
  737. char buf[150];
  738. struct irqaction *action;
  739. unsigned int i;
  740. unsigned long flags = 0;
  741. unsigned int cpu = raw_smp_processor_id();
  742. unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
  743. pr_notice("\n");
  744. if (CPUID != bfin_cpuid())
  745. pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
  746. "but running on:0x%04x (Rev %d)\n",
  747. CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
  748. pr_notice("ADSP-%s-0.%d",
  749. CPU, bfin_compiled_revid());
  750. if (bfin_compiled_revid() != bfin_revid())
  751. pr_cont("(Detected 0.%d)", bfin_revid());
  752. pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
  753. get_cclk()/1000000, get_sclk()/1000000,
  754. #ifdef CONFIG_MPU
  755. "mpu on"
  756. #else
  757. "mpu off"
  758. #endif
  759. );
  760. pr_notice("%s", linux_banner);
  761. pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
  762. pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
  763. (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
  764. if (fp->ipend & EVT_IRPTEN)
  765. pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
  766. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
  767. EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
  768. pr_notice(" Peripheral interrupts masked off\n");
  769. if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
  770. pr_notice(" Kernel interrupts masked off\n");
  771. if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
  772. pr_notice(" HWERRCAUSE: 0x%lx\n",
  773. (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
  774. #ifdef EBIU_ERRMST
  775. /* If the error was from the EBIU, print it out */
  776. if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
  777. pr_notice(" EBIU Error Reason : 0x%04x\n",
  778. bfin_read_EBIU_ERRMST());
  779. pr_notice(" EBIU Error Address : 0x%08x\n",
  780. bfin_read_EBIU_ERRADD());
  781. }
  782. #endif
  783. }
  784. pr_notice(" EXCAUSE : 0x%lx\n",
  785. fp->seqstat & SEQSTAT_EXCAUSE);
  786. for (i = 2; i <= 15 ; i++) {
  787. if (fp->ipend & (1 << i)) {
  788. if (i != 4) {
  789. decode_address(buf, bfin_read32(EVT0 + 4*i));
  790. pr_notice(" physical IVG%i asserted : %s\n", i, buf);
  791. } else
  792. pr_notice(" interrupts disabled\n");
  793. }
  794. }
  795. /* if no interrupts are going off, don't print this out */
  796. if (fp->ipend & ~0x3F) {
  797. for (i = 0; i < (NR_IRQS - 1); i++) {
  798. struct irq_desc *desc = irq_to_desc(i);
  799. if (!in_atomic)
  800. raw_spin_lock_irqsave(&desc->lock, flags);
  801. action = desc->action;
  802. if (!action)
  803. goto unlock;
  804. decode_address(buf, (unsigned int)action->handler);
  805. pr_notice(" logical irq %3d mapped : %s", i, buf);
  806. for (action = action->next; action; action = action->next) {
  807. decode_address(buf, (unsigned int)action->handler);
  808. pr_cont(", %s", buf);
  809. }
  810. pr_cont("\n");
  811. unlock:
  812. if (!in_atomic)
  813. raw_spin_unlock_irqrestore(&desc->lock, flags);
  814. }
  815. }
  816. decode_address(buf, fp->rete);
  817. pr_notice(" RETE: %s\n", buf);
  818. decode_address(buf, fp->retn);
  819. pr_notice(" RETN: %s\n", buf);
  820. decode_address(buf, fp->retx);
  821. pr_notice(" RETX: %s\n", buf);
  822. decode_address(buf, fp->rets);
  823. pr_notice(" RETS: %s\n", buf);
  824. decode_address(buf, fp->pc);
  825. pr_notice(" PC : %s\n", buf);
  826. if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
  827. (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
  828. decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
  829. pr_notice("DCPLB_FAULT_ADDR: %s\n", buf);
  830. decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
  831. pr_notice("ICPLB_FAULT_ADDR: %s\n", buf);
  832. }
  833. pr_notice("PROCESSOR STATE:\n");
  834. pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
  835. fp->r0, fp->r1, fp->r2, fp->r3);
  836. pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
  837. fp->r4, fp->r5, fp->r6, fp->r7);
  838. pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
  839. fp->p0, fp->p1, fp->p2, fp->p3);
  840. pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
  841. fp->p4, fp->p5, fp->fp, (long)fp);
  842. pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
  843. fp->lb0, fp->lt0, fp->lc0);
  844. pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
  845. fp->lb1, fp->lt1, fp->lc1);
  846. pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
  847. fp->b0, fp->l0, fp->m0, fp->i0);
  848. pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
  849. fp->b1, fp->l1, fp->m1, fp->i1);
  850. pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
  851. fp->b2, fp->l2, fp->m2, fp->i2);
  852. pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
  853. fp->b3, fp->l3, fp->m3, fp->i3);
  854. pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
  855. fp->a0w, fp->a0x, fp->a1w, fp->a1x);
  856. pr_notice("USP : %08lx ASTAT: %08lx\n",
  857. rdusp(), fp->astat);
  858. pr_notice("\n");
  859. }