adreno_postmortem.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639
  1. /* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/vmalloc.h>
  14. #include <mach/board.h>
  15. #include "kgsl.h"
  16. #include "kgsl_sharedmem.h"
  17. #include "adreno.h"
  18. #include "adreno_pm4types.h"
  19. #include "adreno_ringbuffer.h"
  20. #include "kgsl_cffdump.h"
  21. #include "kgsl_pwrctrl.h"
  22. #include "adreno_trace.h"
  23. #include "a2xx_reg.h"
  24. #include "a3xx_reg.h"
  25. #define INVALID_RB_CMD 0xaaaaaaaa
  26. #define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
  27. struct pm_id_name {
  28. enum adreno_regs id;
  29. char name[9];
  30. };
  31. static const struct pm_id_name pm0_types[] = {
  32. {ADRENO_REG_PA_SC_AA_CONFIG, "RPASCAAC"},
  33. {ADRENO_REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"},
  34. {ADRENO_REG_SCRATCH_REG2, "RSCRTRG2"},
  35. {ADRENO_REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"},
  36. {ADRENO_REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"},
  37. {ADRENO_REG_TC_CNTL_STATUS, "RTCCNTLS"},
  38. {ADRENO_REG_TP0_CHICKEN, "RTP0CHCK"},
  39. {ADRENO_REG_CP_TIMESTAMP, "CP_TM_ST"},
  40. };
  41. static const struct pm_id_name pm3_types[] = {
  42. {CP_COND_EXEC, "CND_EXEC"},
  43. {CP_CONTEXT_UPDATE, "CX__UPDT"},
  44. {CP_DRAW_INDX, "DRW_NDX_"},
  45. {CP_DRAW_INDX_BIN, "DRW_NDXB"},
  46. {CP_EVENT_WRITE, "EVENT_WT"},
  47. {CP_MEM_WRITE, "MEM_WRIT"},
  48. {CP_IM_LOAD, "IN__LOAD"},
  49. {CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
  50. {CP_IM_STORE, "IM_STORE"},
  51. {CP_INDIRECT_BUFFER_PFE, "IND_BUF_"},
  52. {CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
  53. {CP_INTERRUPT, "PM4_INTR"},
  54. {CP_INVALIDATE_STATE, "INV_STAT"},
  55. {CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
  56. {CP_ME_INIT, "ME__INIT"},
  57. {CP_NOP, "PM4__NOP"},
  58. {CP_REG_RMW, "REG__RMW"},
  59. {CP_REG_TO_MEM, "REG2_MEM"},
  60. {CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
  61. {CP_SET_CONSTANT, "ST_CONST"},
  62. {CP_SET_PROTECTED_MODE, "ST_PRT_M"},
  63. {CP_SET_SHADER_BASES, "ST_SHD_B"},
  64. {CP_WAIT_FOR_IDLE, "WAIT4IDL"},
  65. {CP_WAIT_FOR_ME, "WAIT4ME"},
  66. {CP_WAIT_REG_EQ, "WAITRGEQ"},
  67. };
  68. static const struct pm_id_name pm3_nop_values[] = {
  69. {KGSL_CONTEXT_TO_MEM_IDENTIFIER, "CTX_SWCH"},
  70. {KGSL_CMD_IDENTIFIER, "CMD__EXT"},
  71. {KGSL_CMD_INTERNAL_IDENTIFIER, "CMD__INT"},
  72. {KGSL_START_OF_IB_IDENTIFIER, "IB_START"},
  73. {KGSL_END_OF_IB_IDENTIFIER, "IB___END"},
  74. {KGSL_START_OF_PROFILE_IDENTIFIER, "PRO_STRT"},
  75. {KGSL_END_OF_PROFILE_IDENTIFIER, "PRO__END"},
  76. };
  77. static uint32_t adreno_is_pm4_len(uint32_t word)
  78. {
  79. if (word == INVALID_RB_CMD)
  80. return 0;
  81. return (word >> 16) & 0x3FFF;
  82. }
  83. static bool adreno_is_pm4_type(struct kgsl_device *device, uint32_t word)
  84. {
  85. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  86. int i;
  87. if (word == INVALID_RB_CMD)
  88. return 1;
  89. if (adreno_is_pm4_len(word) > 16)
  90. return 0;
  91. if ((word & (3<<30)) == CP_TYPE0_PKT) {
  92. for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
  93. if ((word & 0x7FFF) == adreno_getreg(adreno_dev,
  94. pm0_types[i].id))
  95. return 1;
  96. }
  97. return 0;
  98. }
  99. if ((word & (3<<30)) == CP_TYPE3_PKT) {
  100. for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
  101. if ((word & 0xFFFF) == (pm3_types[i].id << 8))
  102. return 1;
  103. }
  104. return 0;
  105. }
  106. return 0;
  107. }
  108. static const char *adreno_pm4_name(uint32_t word)
  109. {
  110. int i;
  111. if (word == INVALID_RB_CMD)
  112. return "--------";
  113. if ((word & (3<<30)) == CP_TYPE0_PKT) {
  114. for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
  115. if ((word & 0x7FFF) == pm0_types[i].id)
  116. return pm0_types[i].name;
  117. }
  118. return "????????";
  119. }
  120. if ((word & (3<<30)) == CP_TYPE3_PKT) {
  121. for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
  122. if ((word & 0xFFFF) == (pm3_types[i].id << 8))
  123. return pm3_types[i].name;
  124. }
  125. return "????????";
  126. }
  127. return "????????";
  128. }
  129. static bool adreno_is_pm3_nop_value(uint32_t word)
  130. {
  131. int i;
  132. for (i = 0; i < ARRAY_SIZE(pm3_nop_values); ++i) {
  133. if (word == pm3_nop_values[i].id)
  134. return 1;
  135. }
  136. return 0;
  137. }
  138. static const char *adreno_pm3_nop_name(uint32_t word)
  139. {
  140. int i;
  141. for (i = 0; i < ARRAY_SIZE(pm3_nop_values); ++i) {
  142. if (word == pm3_nop_values[i].id)
  143. return pm3_nop_values[i].name;
  144. }
  145. return "????????";
  146. }
  147. static void adreno_dump_regs(struct kgsl_device *device,
  148. const int *registers, int size)
  149. {
  150. int range = 0, offset = 0;
  151. for (range = 0; range < size; range++) {
  152. /* start and end are in dword offsets */
  153. int start = registers[range * 2];
  154. int end = registers[range * 2 + 1];
  155. unsigned char linebuf[32 * 3 + 2 + 32 + 1];
  156. int linelen, i;
  157. for (offset = start; offset <= end; offset += linelen) {
  158. unsigned int regvals[32/4];
  159. linelen = min(end+1-offset, 32/4);
  160. for (i = 0; i < linelen; ++i)
  161. kgsl_regread(device, offset+i, regvals+i);
  162. hex_dump_to_buffer(regvals, linelen*4, 32, 4,
  163. linebuf, sizeof(linebuf), 0);
  164. KGSL_LOG_DUMP(device,
  165. "REG: %5.5X: %s\n", offset, linebuf);
  166. }
  167. }
  168. }
  169. static void dump_ib(struct kgsl_device *device, char *buffId,
  170. phys_addr_t pt_base, uint32_t base_offset, uint32_t ib_base,
  171. uint32_t ib_size, bool dump)
  172. {
  173. struct kgsl_mem_entry *ent = NULL;
  174. uint8_t *base_addr = adreno_convertaddr(device, pt_base,
  175. ib_base, ib_size*sizeof(uint32_t), &ent);
  176. if (base_addr && dump)
  177. print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
  178. 32, 4, base_addr, ib_size*4, 0);
  179. else
  180. KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d "
  181. "offset:%5.5X%s\n",
  182. buffId, ib_base, ib_size*4, base_offset,
  183. base_addr ? "" : " [Invalid]");
  184. if (ent) {
  185. kgsl_memdesc_unmap(&ent->memdesc);
  186. kgsl_mem_entry_put(ent);
  187. }
  188. }
  189. #define IB_LIST_SIZE 64
  190. struct ib_list {
  191. int count;
  192. uint32_t bases[IB_LIST_SIZE];
  193. uint32_t sizes[IB_LIST_SIZE];
  194. uint32_t offsets[IB_LIST_SIZE];
  195. };
  196. static void dump_ib1(struct kgsl_device *device, phys_addr_t pt_base,
  197. uint32_t base_offset,
  198. uint32_t ib1_base, uint32_t ib1_size,
  199. struct ib_list *ib_list, bool dump)
  200. {
  201. int i, j;
  202. uint32_t value;
  203. uint32_t *ib1_addr;
  204. struct kgsl_mem_entry *ent = NULL;
  205. dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
  206. ib1_size, dump);
  207. /* fetch virtual address for given IB base */
  208. ib1_addr = (uint32_t *)adreno_convertaddr(device, pt_base,
  209. ib1_base, ib1_size*sizeof(uint32_t), &ent);
  210. if (!ib1_addr)
  211. return;
  212. for (i = 0; i+3 < ib1_size; ) {
  213. value = ib1_addr[i++];
  214. if (adreno_cmd_is_ib(value)) {
  215. uint32_t ib2_base = ib1_addr[i++];
  216. uint32_t ib2_size = ib1_addr[i++];
  217. /* find previous match */
  218. for (j = 0; j < ib_list->count; ++j)
  219. if (ib_list->sizes[j] == ib2_size
  220. && ib_list->bases[j] == ib2_base)
  221. break;
  222. if (j < ib_list->count || ib_list->count
  223. >= IB_LIST_SIZE)
  224. continue;
  225. /* store match */
  226. ib_list->sizes[ib_list->count] = ib2_size;
  227. ib_list->bases[ib_list->count] = ib2_base;
  228. ib_list->offsets[ib_list->count] = i<<2;
  229. ++ib_list->count;
  230. }
  231. }
  232. if (ent) {
  233. kgsl_memdesc_unmap(&ent->memdesc);
  234. kgsl_mem_entry_put(ent);
  235. }
  236. }
  237. static void adreno_dump_rb_buffer(struct kgsl_device *device, const void *buf,
  238. size_t len, char *linebuf, size_t linebuflen, int *argp)
  239. {
  240. const u32 *ptr4 = buf;
  241. const int ngroups = len;
  242. int lx = 0, j;
  243. bool nxsp = 1;
  244. for (j = 0; j < ngroups; j++) {
  245. if (*argp < 0) {
  246. lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
  247. *argp = -*argp;
  248. } else if (nxsp)
  249. lx += scnprintf(linebuf + lx, linebuflen - lx, " ");
  250. else
  251. nxsp = 1;
  252. if (!*argp && adreno_is_pm4_type(device, ptr4[j])) {
  253. lx += scnprintf(linebuf + lx, linebuflen - lx,
  254. "%s", adreno_pm4_name(ptr4[j]));
  255. *argp = -(adreno_is_pm4_len(ptr4[j])+1);
  256. } else {
  257. if (adreno_is_pm3_nop_value(ptr4[j]))
  258. lx += scnprintf(linebuf + lx, linebuflen - lx,
  259. "%s", adreno_pm3_nop_name(ptr4[j]));
  260. else
  261. lx += scnprintf(linebuf + lx, linebuflen - lx,
  262. "%8.8X", ptr4[j]);
  263. if (*argp > 1)
  264. --*argp;
  265. else if (*argp == 1) {
  266. *argp = 0;
  267. nxsp = 0;
  268. lx += scnprintf(linebuf + lx, linebuflen - lx,
  269. "> ");
  270. }
  271. }
  272. }
  273. linebuf[lx] = '\0';
  274. }
  275. static bool adreno_rb_use_hex(void)
  276. {
  277. #ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
  278. return 1;
  279. #else
  280. return 0;
  281. #endif
  282. }
  283. void adreno_dump_rb(struct kgsl_device *device, const void *buf,
  284. size_t len, int start, int size)
  285. {
  286. const uint32_t *ptr = buf;
  287. int i, remaining, args = 0;
  288. unsigned char linebuf[32 * 3 + 2 + 32 + 1];
  289. const int rowsize = 8;
  290. len >>= 2;
  291. remaining = len;
  292. for (i = 0; i < len; i += rowsize) {
  293. int linelen = min(remaining, rowsize);
  294. remaining -= rowsize;
  295. if (adreno_rb_use_hex())
  296. hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
  297. linebuf, sizeof(linebuf), 0);
  298. else
  299. adreno_dump_rb_buffer(device, ptr+i, linelen, linebuf,
  300. sizeof(linebuf), &args);
  301. KGSL_LOG_DUMP(device,
  302. "RB: %4.4X:%s\n", (start+i)%size, linebuf);
  303. }
  304. }
  305. static int adreno_dump_fields_line(struct kgsl_device *device,
  306. const char *start, char *str, int slen,
  307. const struct log_field **lines,
  308. int num)
  309. {
  310. const struct log_field *l = *lines;
  311. int sptr, count = 0;
  312. sptr = snprintf(str, slen, "%s", start);
  313. for ( ; num && sptr < slen; num--, l++) {
  314. int ilen = strlen(l->display);
  315. if (!l->show)
  316. continue;
  317. if (count)
  318. ilen += strlen(" | ");
  319. if (ilen > (slen - sptr))
  320. break;
  321. if (count++)
  322. sptr += snprintf(str + sptr, slen - sptr, " | ");
  323. sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
  324. }
  325. KGSL_LOG_DUMP(device, "%s\n", str);
  326. *lines = l;
  327. return num;
  328. }
  329. void adreno_dump_fields(struct kgsl_device *device,
  330. const char *start, const struct log_field *lines,
  331. int num)
  332. {
  333. char lb[90];
  334. const char *sstr = start;
  335. lb[sizeof(lb) - 1] = '\0';
  336. while (num) {
  337. int ret = adreno_dump_fields_line(device, sstr, lb,
  338. sizeof(lb) - 1, &lines, num);
  339. if (ret == num)
  340. break;
  341. num = ret;
  342. sstr = " ";
  343. }
  344. }
  345. EXPORT_SYMBOL(adreno_dump_fields);
  346. int adreno_dump(struct kgsl_device *device, int manual)
  347. {
  348. unsigned int cp_ib1_base;
  349. unsigned int cp_ib2_base;
  350. phys_addr_t pt_base, cur_pt_base;
  351. unsigned int cp_rb_base, cp_rb_ctrl, rb_count;
  352. unsigned int cp_rb_wptr, cp_rb_rptr;
  353. unsigned int i;
  354. int result = 0;
  355. uint32_t *rb_copy;
  356. const uint32_t *rb_vaddr;
  357. int num_item = 0;
  358. int read_idx, write_idx;
  359. unsigned int ts_processed = 0xdeaddead;
  360. struct kgsl_context *context;
  361. unsigned int context_id;
  362. static struct ib_list ib_list;
  363. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  364. int num_iommu_units = 0;
  365. mb();
  366. msm_clk_dump_debug_info();
  367. if (adreno_dev->gpudev->postmortem_dump)
  368. adreno_dev->gpudev->postmortem_dump(adreno_dev);
  369. pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
  370. cur_pt_base = pt_base;
  371. kgsl_regread(device,
  372. adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_BASE),
  373. &cp_rb_base);
  374. kgsl_regread(device,
  375. adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_CNTL),
  376. &cp_rb_ctrl);
  377. rb_count = 2 << (cp_rb_ctrl & (BIT(6) - 1));
  378. kgsl_regread(device,
  379. adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_RPTR),
  380. &cp_rb_rptr);
  381. kgsl_regread(device,
  382. adreno_getreg(adreno_dev, ADRENO_REG_CP_RB_WPTR),
  383. &cp_rb_wptr);
  384. kgsl_regread(device,
  385. adreno_getreg(adreno_dev, ADRENO_REG_CP_IB1_BASE),
  386. &cp_ib1_base);
  387. kgsl_regread(device,
  388. adreno_getreg(adreno_dev, ADRENO_REG_CP_IB2_BASE),
  389. &cp_ib2_base);
  390. kgsl_sharedmem_readl(&device->memstore,
  391. (unsigned int *) &context_id,
  392. KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
  393. current_context));
  394. context = kgsl_context_get(device, context_id);
  395. if (context) {
  396. ts_processed = kgsl_readtimestamp(device, context,
  397. KGSL_TIMESTAMP_RETIRED);
  398. KGSL_LOG_DUMP(device, "FT CTXT: %d TIMESTM RTRD: %08X\n",
  399. context->id, ts_processed);
  400. } else
  401. KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
  402. kgsl_context_put(context);
  403. num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
  404. cp_rb_rptr);
  405. if (num_item <= 0)
  406. KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
  407. rb_copy = vmalloc(rb_count<<2);
  408. if (!rb_copy) {
  409. KGSL_LOG_POSTMORTEM_WRITE(device,
  410. "vmalloc(%d) failed\n", rb_count << 2);
  411. result = -ENOMEM;
  412. goto end;
  413. }
  414. KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
  415. cp_rb_base, rb_count<<2, num_item);
  416. if (adreno_dev->ringbuffer.buffer_desc.gpuaddr != cp_rb_base)
  417. KGSL_LOG_POSTMORTEM_WRITE(device,
  418. "rb address mismatch, should be 0x%08x\n",
  419. adreno_dev->ringbuffer.buffer_desc.gpuaddr);
  420. rb_vaddr = adreno_dev->ringbuffer.buffer_desc.hostptr;
  421. if (!rb_vaddr) {
  422. KGSL_LOG_POSTMORTEM_WRITE(device,
  423. "rb has no kernel mapping!\n");
  424. goto error_vfree;
  425. }
  426. read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
  427. if (read_idx < 0)
  428. read_idx += rb_count;
  429. write_idx = (int)cp_rb_wptr + 16;
  430. if (write_idx > rb_count)
  431. write_idx -= rb_count;
  432. num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16;
  433. if (num_item > rb_count)
  434. num_item = rb_count;
  435. if (write_idx >= read_idx)
  436. memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
  437. else {
  438. int part1_c = rb_count-read_idx;
  439. memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
  440. memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
  441. }
  442. /* extract the latest ib commands from the buffer */
  443. ib_list.count = 0;
  444. i = 0;
  445. /* get the register mapped array in case we are using IOMMU */
  446. num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
  447. for (read_idx = 0; read_idx < num_item; ) {
  448. uint32_t this_cmd = rb_copy[read_idx++];
  449. if (adreno_cmd_is_ib(this_cmd)) {
  450. uint32_t ib_addr = rb_copy[read_idx++];
  451. uint32_t ib_size = rb_copy[read_idx++];
  452. dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
  453. ib_size, &ib_list, 0);
  454. for (; i < ib_list.count; ++i)
  455. dump_ib(device, "IB2:", cur_pt_base,
  456. ib_list.offsets[i],
  457. ib_list.bases[i],
  458. ib_list.sizes[i], 0);
  459. } else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1) ||
  460. (num_iommu_units && this_cmd ==
  461. kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
  462. KGSL_IOMMU_CONTEXT_USER,
  463. KGSL_IOMMU_CTX_TTBR0)) ||
  464. (num_iommu_units && this_cmd == cp_type0_packet(
  465. kgsl_mmu_get_reg_ahbaddr(
  466. &device->mmu, 0,
  467. KGSL_IOMMU_CONTEXT_USER,
  468. KGSL_IOMMU_CTX_TTBR0), 1))) {
  469. KGSL_LOG_DUMP(device,
  470. "Current pagetable: %x\t pagetable base: %pa\n",
  471. kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
  472. cur_pt_base),
  473. &cur_pt_base);
  474. /* Set cur_pt_base to the new pagetable base */
  475. cur_pt_base = rb_copy[read_idx++];
  476. KGSL_LOG_DUMP(device,
  477. "New pagetable: %x\t pagetable base: %pa\n",
  478. kgsl_mmu_get_ptname_from_ptbase(&device->mmu,
  479. cur_pt_base),
  480. &cur_pt_base);
  481. }
  482. }
  483. /* Restore cur_pt_base back to the pt_base of
  484. the process in whose context the GPU hung */
  485. cur_pt_base = pt_base;
  486. read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
  487. if (read_idx < 0)
  488. read_idx += rb_count;
  489. KGSL_LOG_DUMP(device,
  490. "RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n",
  491. cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
  492. adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
  493. if (device->pm_ib_enabled) {
  494. for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY;
  495. read_idx >= 0; --read_idx) {
  496. uint32_t this_cmd = rb_copy[read_idx];
  497. if (adreno_cmd_is_ib(this_cmd)) {
  498. uint32_t ib_addr = rb_copy[read_idx+1];
  499. uint32_t ib_size = rb_copy[read_idx+2];
  500. if (ib_size && cp_ib1_base == ib_addr) {
  501. KGSL_LOG_DUMP(device,
  502. "IB1: base:%8.8X "
  503. "count:%d\n", ib_addr, ib_size);
  504. dump_ib(device, "IB1: ", cur_pt_base,
  505. read_idx<<2, ib_addr, ib_size,
  506. 1);
  507. }
  508. }
  509. }
  510. for (i = 0; i < ib_list.count; ++i) {
  511. uint32_t ib_size = ib_list.sizes[i];
  512. uint32_t ib_offset = ib_list.offsets[i];
  513. if (ib_size && cp_ib2_base == ib_list.bases[i]) {
  514. KGSL_LOG_DUMP(device,
  515. "IB2: base:%8.8X count:%d\n",
  516. cp_ib2_base, ib_size);
  517. dump_ib(device, "IB2: ", cur_pt_base, ib_offset,
  518. ib_list.bases[i], ib_size, 1);
  519. }
  520. }
  521. }
  522. /* Dump the registers if the user asked for it */
  523. if (device->pm_regs_enabled) {
  524. if (adreno_is_a20x(adreno_dev))
  525. adreno_dump_regs(device, a200_registers,
  526. a200_registers_count);
  527. else if (adreno_is_a22x(adreno_dev))
  528. adreno_dump_regs(device, a220_registers,
  529. a220_registers_count);
  530. else if (adreno_is_a225(adreno_dev))
  531. adreno_dump_regs(device, a225_registers,
  532. a225_registers_count);
  533. else if (adreno_is_a3xx(adreno_dev)) {
  534. adreno_dump_regs(device, a3xx_registers,
  535. a3xx_registers_count);
  536. if (adreno_is_a330(adreno_dev) ||
  537. adreno_is_a305b(adreno_dev))
  538. adreno_dump_regs(device, a330_registers,
  539. a330_registers_count);
  540. }
  541. }
  542. error_vfree:
  543. vfree(rb_copy);
  544. end:
  545. /* Restart the dispatcher after a manually triggered dump */
  546. if (manual)
  547. adreno_dispatcher_start(device);
  548. return result;
  549. }