qlcnic_minidump.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452
  1. /*
  2. * QLogic qlcnic NIC Driver
  3. * Copyright (c) 2009-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qlcnic for copyright and licensing details.
  6. */
  7. #include <net/ip.h>
  8. #include "qlcnic.h"
  9. #include "qlcnic_hdr.h"
  10. #include "qlcnic_83xx_hw.h"
  11. #include "qlcnic_hw.h"
  12. #define QLC_83XX_MINIDUMP_FLASH 0x520000
  13. #define QLC_83XX_OCM_INDEX 3
  14. #define QLC_83XX_PCI_INDEX 0
  15. #define QLC_83XX_DMA_ENGINE_INDEX 8
  16. static const u32 qlcnic_ms_read_data[] = {
  17. 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
  18. };
  19. #define QLCNIC_DUMP_WCRB BIT_0
  20. #define QLCNIC_DUMP_RWCRB BIT_1
  21. #define QLCNIC_DUMP_ANDCRB BIT_2
  22. #define QLCNIC_DUMP_ORCRB BIT_3
  23. #define QLCNIC_DUMP_POLLCRB BIT_4
  24. #define QLCNIC_DUMP_RD_SAVE BIT_5
  25. #define QLCNIC_DUMP_WRT_SAVED BIT_6
  26. #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
  27. #define QLCNIC_DUMP_SKIP BIT_7
  28. #define QLCNIC_DUMP_MASK_MAX 0xff
  29. struct qlcnic_pex_dma_descriptor {
  30. u32 read_data_size;
  31. u32 dma_desc_cmd;
  32. u32 src_addr_low;
  33. u32 src_addr_high;
  34. u32 dma_bus_addr_low;
  35. u32 dma_bus_addr_high;
  36. u32 rsvd[6];
  37. } __packed;
  38. struct qlcnic_common_entry_hdr {
  39. u32 type;
  40. u32 offset;
  41. u32 cap_size;
  42. #if defined(__LITTLE_ENDIAN)
  43. u8 mask;
  44. u8 rsvd[2];
  45. u8 flags;
  46. #else
  47. u8 flags;
  48. u8 rsvd[2];
  49. u8 mask;
  50. #endif
  51. } __packed;
  52. struct __crb {
  53. u32 addr;
  54. #if defined(__LITTLE_ENDIAN)
  55. u8 stride;
  56. u8 rsvd1[3];
  57. #else
  58. u8 rsvd1[3];
  59. u8 stride;
  60. #endif
  61. u32 data_size;
  62. u32 no_ops;
  63. u32 rsvd2[4];
  64. } __packed;
  65. struct __ctrl {
  66. u32 addr;
  67. #if defined(__LITTLE_ENDIAN)
  68. u8 stride;
  69. u8 index_a;
  70. u16 timeout;
  71. #else
  72. u16 timeout;
  73. u8 index_a;
  74. u8 stride;
  75. #endif
  76. u32 data_size;
  77. u32 no_ops;
  78. #if defined(__LITTLE_ENDIAN)
  79. u8 opcode;
  80. u8 index_v;
  81. u8 shl_val;
  82. u8 shr_val;
  83. #else
  84. u8 shr_val;
  85. u8 shl_val;
  86. u8 index_v;
  87. u8 opcode;
  88. #endif
  89. u32 val1;
  90. u32 val2;
  91. u32 val3;
  92. } __packed;
  93. struct __cache {
  94. u32 addr;
  95. #if defined(__LITTLE_ENDIAN)
  96. u16 stride;
  97. u16 init_tag_val;
  98. #else
  99. u16 init_tag_val;
  100. u16 stride;
  101. #endif
  102. u32 size;
  103. u32 no_ops;
  104. u32 ctrl_addr;
  105. u32 ctrl_val;
  106. u32 read_addr;
  107. #if defined(__LITTLE_ENDIAN)
  108. u8 read_addr_stride;
  109. u8 read_addr_num;
  110. u8 rsvd1[2];
  111. #else
  112. u8 rsvd1[2];
  113. u8 read_addr_num;
  114. u8 read_addr_stride;
  115. #endif
  116. } __packed;
  117. struct __ocm {
  118. u8 rsvd[8];
  119. u32 size;
  120. u32 no_ops;
  121. u8 rsvd1[8];
  122. u32 read_addr;
  123. u32 read_addr_stride;
  124. } __packed;
  125. struct __mem {
  126. u32 desc_card_addr;
  127. u32 dma_desc_cmd;
  128. u32 start_dma_cmd;
  129. u32 rsvd[3];
  130. u32 addr;
  131. u32 size;
  132. } __packed;
  133. struct __mux {
  134. u32 addr;
  135. u8 rsvd[4];
  136. u32 size;
  137. u32 no_ops;
  138. u32 val;
  139. u32 val_stride;
  140. u32 read_addr;
  141. u8 rsvd2[4];
  142. } __packed;
  143. struct __queue {
  144. u32 sel_addr;
  145. #if defined(__LITTLE_ENDIAN)
  146. u16 stride;
  147. u8 rsvd[2];
  148. #else
  149. u8 rsvd[2];
  150. u16 stride;
  151. #endif
  152. u32 size;
  153. u32 no_ops;
  154. u8 rsvd2[8];
  155. u32 read_addr;
  156. #if defined(__LITTLE_ENDIAN)
  157. u8 read_addr_stride;
  158. u8 read_addr_cnt;
  159. u8 rsvd3[2];
  160. #else
  161. u8 rsvd3[2];
  162. u8 read_addr_cnt;
  163. u8 read_addr_stride;
  164. #endif
  165. } __packed;
  166. struct __pollrd {
  167. u32 sel_addr;
  168. u32 read_addr;
  169. u32 sel_val;
  170. #if defined(__LITTLE_ENDIAN)
  171. u16 sel_val_stride;
  172. u16 no_ops;
  173. #else
  174. u16 no_ops;
  175. u16 sel_val_stride;
  176. #endif
  177. u32 poll_wait;
  178. u32 poll_mask;
  179. u32 data_size;
  180. u8 rsvd[4];
  181. } __packed;
  182. struct __mux2 {
  183. u32 sel_addr1;
  184. u32 sel_addr2;
  185. u32 sel_val1;
  186. u32 sel_val2;
  187. u32 no_ops;
  188. u32 sel_val_mask;
  189. u32 read_addr;
  190. #if defined(__LITTLE_ENDIAN)
  191. u8 sel_val_stride;
  192. u8 data_size;
  193. u8 rsvd[2];
  194. #else
  195. u8 rsvd[2];
  196. u8 data_size;
  197. u8 sel_val_stride;
  198. #endif
  199. } __packed;
  200. struct __pollrdmwr {
  201. u32 addr1;
  202. u32 addr2;
  203. u32 val1;
  204. u32 val2;
  205. u32 poll_wait;
  206. u32 poll_mask;
  207. u32 mod_mask;
  208. u32 data_size;
  209. } __packed;
  210. struct qlcnic_dump_entry {
  211. struct qlcnic_common_entry_hdr hdr;
  212. union {
  213. struct __crb crb;
  214. struct __cache cache;
  215. struct __ocm ocm;
  216. struct __mem mem;
  217. struct __mux mux;
  218. struct __queue que;
  219. struct __ctrl ctrl;
  220. struct __pollrdmwr pollrdmwr;
  221. struct __mux2 mux2;
  222. struct __pollrd pollrd;
  223. } region;
  224. } __packed;
  225. enum qlcnic_minidump_opcode {
  226. QLCNIC_DUMP_NOP = 0,
  227. QLCNIC_DUMP_READ_CRB = 1,
  228. QLCNIC_DUMP_READ_MUX = 2,
  229. QLCNIC_DUMP_QUEUE = 3,
  230. QLCNIC_DUMP_BRD_CONFIG = 4,
  231. QLCNIC_DUMP_READ_OCM = 6,
  232. QLCNIC_DUMP_PEG_REG = 7,
  233. QLCNIC_DUMP_L1_DTAG = 8,
  234. QLCNIC_DUMP_L1_ITAG = 9,
  235. QLCNIC_DUMP_L1_DATA = 11,
  236. QLCNIC_DUMP_L1_INST = 12,
  237. QLCNIC_DUMP_L2_DTAG = 21,
  238. QLCNIC_DUMP_L2_ITAG = 22,
  239. QLCNIC_DUMP_L2_DATA = 23,
  240. QLCNIC_DUMP_L2_INST = 24,
  241. QLCNIC_DUMP_POLL_RD = 35,
  242. QLCNIC_READ_MUX2 = 36,
  243. QLCNIC_READ_POLLRDMWR = 37,
  244. QLCNIC_DUMP_READ_ROM = 71,
  245. QLCNIC_DUMP_READ_MEM = 72,
  246. QLCNIC_DUMP_READ_CTRL = 98,
  247. QLCNIC_DUMP_TLHDR = 99,
  248. QLCNIC_DUMP_RDEND = 255
  249. };
  250. inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
  251. {
  252. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  253. return hdr->saved_state[index];
  254. }
  255. inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
  256. u32 value)
  257. {
  258. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  259. hdr->saved_state[index] = value;
  260. }
  261. void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
  262. {
  263. struct qlcnic_82xx_dump_template_hdr *hdr;
  264. hdr = fw_dump->tmpl_hdr;
  265. fw_dump->tmpl_hdr_size = hdr->size;
  266. fw_dump->version = hdr->version;
  267. fw_dump->num_entries = hdr->num_entries;
  268. fw_dump->offset = hdr->offset;
  269. hdr->drv_cap_mask = hdr->cap_mask;
  270. fw_dump->cap_mask = hdr->cap_mask;
  271. fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
  272. }
  273. inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
  274. {
  275. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  276. return hdr->cap_sizes[index];
  277. }
  278. void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
  279. {
  280. struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
  281. hdr->sys_info[idx] = value;
  282. }
  283. void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
  284. {
  285. struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
  286. hdr->drv_cap_mask = mask;
  287. }
  288. inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
  289. {
  290. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  291. return hdr->saved_state[index];
  292. }
  293. inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
  294. u32 value)
  295. {
  296. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  297. hdr->saved_state[index] = value;
  298. }
  299. #define QLCNIC_TEMPLATE_VERSION (0x20001)
  300. void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
  301. {
  302. struct qlcnic_83xx_dump_template_hdr *hdr;
  303. hdr = fw_dump->tmpl_hdr;
  304. fw_dump->tmpl_hdr_size = hdr->size;
  305. fw_dump->version = hdr->version;
  306. fw_dump->num_entries = hdr->num_entries;
  307. fw_dump->offset = hdr->offset;
  308. hdr->drv_cap_mask = hdr->cap_mask;
  309. fw_dump->cap_mask = hdr->cap_mask;
  310. fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
  311. QLCNIC_TEMPLATE_VERSION;
  312. }
  313. inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
  314. {
  315. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  316. return hdr->cap_sizes[index];
  317. }
  318. void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
  319. {
  320. struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
  321. hdr->sys_info[idx] = value;
  322. }
  323. void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
  324. {
  325. struct qlcnic_83xx_dump_template_hdr *hdr;
  326. hdr = tmpl_hdr;
  327. hdr->drv_cap_mask = mask;
  328. }
  329. struct qlcnic_dump_operations {
  330. enum qlcnic_minidump_opcode opcode;
  331. u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
  332. __le32 *);
  333. };
  334. static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
  335. struct qlcnic_dump_entry *entry, __le32 *buffer)
  336. {
  337. int i;
  338. u32 addr, data;
  339. struct __crb *crb = &entry->region.crb;
  340. addr = crb->addr;
  341. for (i = 0; i < crb->no_ops; i++) {
  342. data = qlcnic_ind_rd(adapter, addr);
  343. *buffer++ = cpu_to_le32(addr);
  344. *buffer++ = cpu_to_le32(data);
  345. addr += crb->stride;
  346. }
  347. return crb->no_ops * 2 * sizeof(u32);
  348. }
  349. static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
  350. struct qlcnic_dump_entry *entry, __le32 *buffer)
  351. {
  352. void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
  353. struct __ctrl *ctr = &entry->region.ctrl;
  354. int i, k, timeout = 0;
  355. u32 addr, data, temp;
  356. u8 no_ops;
  357. addr = ctr->addr;
  358. no_ops = ctr->no_ops;
  359. for (i = 0; i < no_ops; i++) {
  360. k = 0;
  361. for (k = 0; k < 8; k++) {
  362. if (!(ctr->opcode & (1 << k)))
  363. continue;
  364. switch (1 << k) {
  365. case QLCNIC_DUMP_WCRB:
  366. qlcnic_ind_wr(adapter, addr, ctr->val1);
  367. break;
  368. case QLCNIC_DUMP_RWCRB:
  369. data = qlcnic_ind_rd(adapter, addr);
  370. qlcnic_ind_wr(adapter, addr, data);
  371. break;
  372. case QLCNIC_DUMP_ANDCRB:
  373. data = qlcnic_ind_rd(adapter, addr);
  374. qlcnic_ind_wr(adapter, addr,
  375. (data & ctr->val2));
  376. break;
  377. case QLCNIC_DUMP_ORCRB:
  378. data = qlcnic_ind_rd(adapter, addr);
  379. qlcnic_ind_wr(adapter, addr,
  380. (data | ctr->val3));
  381. break;
  382. case QLCNIC_DUMP_POLLCRB:
  383. while (timeout <= ctr->timeout) {
  384. data = qlcnic_ind_rd(adapter, addr);
  385. if ((data & ctr->val2) == ctr->val1)
  386. break;
  387. usleep_range(1000, 2000);
  388. timeout++;
  389. }
  390. if (timeout > ctr->timeout) {
  391. dev_info(&adapter->pdev->dev,
  392. "Timed out, aborting poll CRB\n");
  393. return -EINVAL;
  394. }
  395. break;
  396. case QLCNIC_DUMP_RD_SAVE:
  397. temp = ctr->index_a;
  398. if (temp)
  399. addr = qlcnic_get_saved_state(adapter,
  400. hdr,
  401. temp);
  402. data = qlcnic_ind_rd(adapter, addr);
  403. qlcnic_set_saved_state(adapter, hdr,
  404. ctr->index_v, data);
  405. break;
  406. case QLCNIC_DUMP_WRT_SAVED:
  407. temp = ctr->index_v;
  408. if (temp)
  409. data = qlcnic_get_saved_state(adapter,
  410. hdr,
  411. temp);
  412. else
  413. data = ctr->val1;
  414. temp = ctr->index_a;
  415. if (temp)
  416. addr = qlcnic_get_saved_state(adapter,
  417. hdr,
  418. temp);
  419. qlcnic_ind_wr(adapter, addr, data);
  420. break;
  421. case QLCNIC_DUMP_MOD_SAVE_ST:
  422. data = qlcnic_get_saved_state(adapter, hdr,
  423. ctr->index_v);
  424. data <<= ctr->shl_val;
  425. data >>= ctr->shr_val;
  426. if (ctr->val2)
  427. data &= ctr->val2;
  428. data |= ctr->val3;
  429. data += ctr->val1;
  430. qlcnic_set_saved_state(adapter, hdr,
  431. ctr->index_v, data);
  432. break;
  433. default:
  434. dev_info(&adapter->pdev->dev,
  435. "Unknown opcode\n");
  436. break;
  437. }
  438. }
  439. addr += ctr->stride;
  440. }
  441. return 0;
  442. }
  443. static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
  444. struct qlcnic_dump_entry *entry, __le32 *buffer)
  445. {
  446. int loop;
  447. u32 val, data = 0;
  448. struct __mux *mux = &entry->region.mux;
  449. val = mux->val;
  450. for (loop = 0; loop < mux->no_ops; loop++) {
  451. qlcnic_ind_wr(adapter, mux->addr, val);
  452. data = qlcnic_ind_rd(adapter, mux->read_addr);
  453. *buffer++ = cpu_to_le32(val);
  454. *buffer++ = cpu_to_le32(data);
  455. val += mux->val_stride;
  456. }
  457. return 2 * mux->no_ops * sizeof(u32);
  458. }
  459. static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
  460. struct qlcnic_dump_entry *entry, __le32 *buffer)
  461. {
  462. int i, loop;
  463. u32 cnt, addr, data, que_id = 0;
  464. struct __queue *que = &entry->region.que;
  465. addr = que->read_addr;
  466. cnt = que->read_addr_cnt;
  467. for (loop = 0; loop < que->no_ops; loop++) {
  468. qlcnic_ind_wr(adapter, que->sel_addr, que_id);
  469. addr = que->read_addr;
  470. for (i = 0; i < cnt; i++) {
  471. data = qlcnic_ind_rd(adapter, addr);
  472. *buffer++ = cpu_to_le32(data);
  473. addr += que->read_addr_stride;
  474. }
  475. que_id += que->stride;
  476. }
  477. return que->no_ops * cnt * sizeof(u32);
  478. }
  479. static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
  480. struct qlcnic_dump_entry *entry, __le32 *buffer)
  481. {
  482. int i;
  483. u32 data;
  484. void __iomem *addr;
  485. struct __ocm *ocm = &entry->region.ocm;
  486. addr = adapter->ahw->pci_base0 + ocm->read_addr;
  487. for (i = 0; i < ocm->no_ops; i++) {
  488. data = readl(addr);
  489. *buffer++ = cpu_to_le32(data);
  490. addr += ocm->read_addr_stride;
  491. }
  492. return ocm->no_ops * sizeof(u32);
  493. }
  494. static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
  495. struct qlcnic_dump_entry *entry, __le32 *buffer)
  496. {
  497. int i, count = 0;
  498. u32 fl_addr, size, val, lck_val, addr;
  499. struct __mem *rom = &entry->region.mem;
  500. fl_addr = rom->addr;
  501. size = rom->size / 4;
  502. lock_try:
  503. lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
  504. if (!lck_val && count < MAX_CTL_CHECK) {
  505. usleep_range(10000, 11000);
  506. count++;
  507. goto lock_try;
  508. }
  509. QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
  510. adapter->ahw->pci_func);
  511. for (i = 0; i < size; i++) {
  512. addr = fl_addr & 0xFFFF0000;
  513. qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
  514. addr = LSW(fl_addr) + FLASH_ROM_DATA;
  515. val = qlcnic_ind_rd(adapter, addr);
  516. fl_addr += 4;
  517. *buffer++ = cpu_to_le32(val);
  518. }
  519. QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
  520. return rom->size;
  521. }
  522. static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
  523. struct qlcnic_dump_entry *entry, __le32 *buffer)
  524. {
  525. int i;
  526. u32 cnt, val, data, addr;
  527. struct __cache *l1 = &entry->region.cache;
  528. val = l1->init_tag_val;
  529. for (i = 0; i < l1->no_ops; i++) {
  530. qlcnic_ind_wr(adapter, l1->addr, val);
  531. qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
  532. addr = l1->read_addr;
  533. cnt = l1->read_addr_num;
  534. while (cnt) {
  535. data = qlcnic_ind_rd(adapter, addr);
  536. *buffer++ = cpu_to_le32(data);
  537. addr += l1->read_addr_stride;
  538. cnt--;
  539. }
  540. val += l1->stride;
  541. }
  542. return l1->no_ops * l1->read_addr_num * sizeof(u32);
  543. }
  544. static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
  545. struct qlcnic_dump_entry *entry, __le32 *buffer)
  546. {
  547. int i;
  548. u32 cnt, val, data, addr;
  549. u8 poll_mask, poll_to, time_out = 0;
  550. struct __cache *l2 = &entry->region.cache;
  551. val = l2->init_tag_val;
  552. poll_mask = LSB(MSW(l2->ctrl_val));
  553. poll_to = MSB(MSW(l2->ctrl_val));
  554. for (i = 0; i < l2->no_ops; i++) {
  555. qlcnic_ind_wr(adapter, l2->addr, val);
  556. if (LSW(l2->ctrl_val))
  557. qlcnic_ind_wr(adapter, l2->ctrl_addr,
  558. LSW(l2->ctrl_val));
  559. if (!poll_mask)
  560. goto skip_poll;
  561. do {
  562. data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
  563. if (!(data & poll_mask))
  564. break;
  565. usleep_range(1000, 2000);
  566. time_out++;
  567. } while (time_out <= poll_to);
  568. if (time_out > poll_to) {
  569. dev_err(&adapter->pdev->dev,
  570. "Timeout exceeded in %s, aborting dump\n",
  571. __func__);
  572. return -EINVAL;
  573. }
  574. skip_poll:
  575. addr = l2->read_addr;
  576. cnt = l2->read_addr_num;
  577. while (cnt) {
  578. data = qlcnic_ind_rd(adapter, addr);
  579. *buffer++ = cpu_to_le32(data);
  580. addr += l2->read_addr_stride;
  581. cnt--;
  582. }
  583. val += l2->stride;
  584. }
  585. return l2->no_ops * l2->read_addr_num * sizeof(u32);
  586. }
  587. static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
  588. struct __mem *mem, __le32 *buffer,
  589. int *ret)
  590. {
  591. u32 addr, data, test;
  592. int i, reg_read;
  593. reg_read = mem->size;
  594. addr = mem->addr;
  595. /* check for data size of multiple of 16 and 16 byte alignment */
  596. if ((addr & 0xf) || (reg_read%16)) {
  597. dev_info(&adapter->pdev->dev,
  598. "Unaligned memory addr:0x%x size:0x%x\n",
  599. addr, reg_read);
  600. *ret = -EINVAL;
  601. return 0;
  602. }
  603. mutex_lock(&adapter->ahw->mem_lock);
  604. while (reg_read != 0) {
  605. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
  606. qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
  607. qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
  608. for (i = 0; i < MAX_CTL_CHECK; i++) {
  609. test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
  610. if (!(test & TA_CTL_BUSY))
  611. break;
  612. }
  613. if (i == MAX_CTL_CHECK) {
  614. if (printk_ratelimit()) {
  615. dev_err(&adapter->pdev->dev,
  616. "failed to read through agent\n");
  617. *ret = -EIO;
  618. goto out;
  619. }
  620. }
  621. for (i = 0; i < 4; i++) {
  622. data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
  623. *buffer++ = cpu_to_le32(data);
  624. }
  625. addr += 16;
  626. reg_read -= 16;
  627. ret += 16;
  628. }
  629. out:
  630. mutex_unlock(&adapter->ahw->mem_lock);
  631. return mem->size;
  632. }
  633. /* DMA register base address */
  634. #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
  635. /* DMA register offsets w.r.t base address */
  636. #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
  637. #define QLC_DMA_CMD_BUFF_ADDR_HI 4
  638. #define QLC_DMA_CMD_STATUS_CTRL 8
  639. static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
  640. struct __mem *mem)
  641. {
  642. struct device *dev = &adapter->pdev->dev;
  643. u32 dma_no, dma_base_addr, temp_addr;
  644. int i, ret, dma_sts;
  645. void *tmpl_hdr;
  646. tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
  647. dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
  648. QLC_83XX_DMA_ENGINE_INDEX);
  649. dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
  650. temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
  651. ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
  652. if (ret)
  653. return ret;
  654. temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
  655. ret = qlcnic_ind_wr(adapter, temp_addr, 0);
  656. if (ret)
  657. return ret;
  658. temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
  659. ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
  660. if (ret)
  661. return ret;
  662. /* Wait for DMA to complete */
  663. temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
  664. for (i = 0; i < 400; i++) {
  665. dma_sts = qlcnic_ind_rd(adapter, temp_addr);
  666. if (dma_sts & BIT_1)
  667. usleep_range(250, 500);
  668. else
  669. break;
  670. }
  671. if (i >= 400) {
  672. dev_info(dev, "PEX DMA operation timed out");
  673. ret = -EIO;
  674. }
  675. return ret;
  676. }
  677. static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
  678. struct __mem *mem,
  679. __le32 *buffer, int *ret)
  680. {
  681. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  682. u32 temp, dma_base_addr, size = 0, read_size = 0;
  683. struct qlcnic_pex_dma_descriptor *dma_descr;
  684. struct device *dev = &adapter->pdev->dev;
  685. dma_addr_t dma_phys_addr;
  686. void *dma_buffer;
  687. void *tmpl_hdr;
  688. tmpl_hdr = fw_dump->tmpl_hdr;
  689. /* Check if DMA engine is available */
  690. temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
  691. QLC_83XX_DMA_ENGINE_INDEX);
  692. dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
  693. temp = qlcnic_ind_rd(adapter,
  694. dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
  695. if (!(temp & BIT_31)) {
  696. dev_info(dev, "%s: DMA engine is not available\n", __func__);
  697. *ret = -EIO;
  698. return 0;
  699. }
  700. /* Create DMA descriptor */
  701. dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
  702. GFP_KERNEL);
  703. if (!dma_descr) {
  704. *ret = -ENOMEM;
  705. return 0;
  706. }
  707. /* dma_desc_cmd 0:15 = 0
  708. * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
  709. * dma_desc_cmd 20:23 = pci function number
  710. * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
  711. */
  712. dma_phys_addr = fw_dump->phys_addr;
  713. dma_buffer = fw_dump->dma_buffer;
  714. temp = 0;
  715. temp = mem->dma_desc_cmd & 0xff0f;
  716. temp |= (adapter->ahw->pci_func & 0xf) << 4;
  717. dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
  718. dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
  719. dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
  720. dma_descr->src_addr_high = 0;
  721. /* Collect memory dump using multiple DMA operations if required */
  722. while (read_size < mem->size) {
  723. if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
  724. size = QLC_PEX_DMA_READ_SIZE;
  725. else
  726. size = mem->size - read_size;
  727. dma_descr->src_addr_low = mem->addr + read_size;
  728. dma_descr->read_data_size = size;
  729. /* Write DMA descriptor to MS memory*/
  730. temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
  731. *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
  732. (u32 *)dma_descr, temp);
  733. if (*ret) {
  734. dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
  735. mem->desc_card_addr);
  736. goto free_dma_descr;
  737. }
  738. *ret = qlcnic_start_pex_dma(adapter, mem);
  739. if (*ret) {
  740. dev_info(dev, "Failed to start PEX DMA operation\n");
  741. goto free_dma_descr;
  742. }
  743. memcpy(buffer, dma_buffer, size);
  744. buffer += size / 4;
  745. read_size += size;
  746. }
  747. free_dma_descr:
  748. kfree(dma_descr);
  749. return read_size;
  750. }
  751. static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
  752. struct qlcnic_dump_entry *entry, __le32 *buffer)
  753. {
  754. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  755. struct device *dev = &adapter->pdev->dev;
  756. struct __mem *mem = &entry->region.mem;
  757. u32 data_size;
  758. int ret = 0;
  759. if (fw_dump->use_pex_dma) {
  760. data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
  761. &ret);
  762. if (ret)
  763. dev_info(dev,
  764. "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
  765. entry->hdr.mask);
  766. else
  767. return data_size;
  768. }
  769. data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
  770. if (ret) {
  771. dev_info(dev,
  772. "Failed to read memory dump using test agent method: mask[0x%x]\n",
  773. entry->hdr.mask);
  774. return 0;
  775. } else {
  776. return data_size;
  777. }
  778. }
  779. static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
  780. struct qlcnic_dump_entry *entry, __le32 *buffer)
  781. {
  782. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  783. return 0;
  784. }
  785. static int qlcnic_valid_dump_entry(struct device *dev,
  786. struct qlcnic_dump_entry *entry, u32 size)
  787. {
  788. int ret = 1;
  789. if (size != entry->hdr.cap_size) {
  790. dev_err(dev,
  791. "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
  792. entry->hdr.type, entry->hdr.mask, size,
  793. entry->hdr.cap_size);
  794. ret = 0;
  795. }
  796. return ret;
  797. }
  798. static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
  799. struct qlcnic_dump_entry *entry,
  800. __le32 *buffer)
  801. {
  802. struct __pollrdmwr *poll = &entry->region.pollrdmwr;
  803. u32 data, wait_count, poll_wait, temp;
  804. poll_wait = poll->poll_wait;
  805. qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
  806. wait_count = 0;
  807. while (wait_count < poll_wait) {
  808. data = qlcnic_ind_rd(adapter, poll->addr1);
  809. if ((data & poll->poll_mask) != 0)
  810. break;
  811. wait_count++;
  812. }
  813. if (wait_count == poll_wait) {
  814. dev_err(&adapter->pdev->dev,
  815. "Timeout exceeded in %s, aborting dump\n",
  816. __func__);
  817. return 0;
  818. }
  819. data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
  820. qlcnic_ind_wr(adapter, poll->addr2, data);
  821. qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
  822. wait_count = 0;
  823. while (wait_count < poll_wait) {
  824. temp = qlcnic_ind_rd(adapter, poll->addr1);
  825. if ((temp & poll->poll_mask) != 0)
  826. break;
  827. wait_count++;
  828. }
  829. *buffer++ = cpu_to_le32(poll->addr2);
  830. *buffer++ = cpu_to_le32(data);
  831. return 2 * sizeof(u32);
  832. }
  833. static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
  834. struct qlcnic_dump_entry *entry, __le32 *buffer)
  835. {
  836. struct __pollrd *pollrd = &entry->region.pollrd;
  837. u32 data, wait_count, poll_wait, sel_val;
  838. int i;
  839. poll_wait = pollrd->poll_wait;
  840. sel_val = pollrd->sel_val;
  841. for (i = 0; i < pollrd->no_ops; i++) {
  842. qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
  843. wait_count = 0;
  844. while (wait_count < poll_wait) {
  845. data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
  846. if ((data & pollrd->poll_mask) != 0)
  847. break;
  848. wait_count++;
  849. }
  850. if (wait_count == poll_wait) {
  851. dev_err(&adapter->pdev->dev,
  852. "Timeout exceeded in %s, aborting dump\n",
  853. __func__);
  854. return 0;
  855. }
  856. data = qlcnic_ind_rd(adapter, pollrd->read_addr);
  857. *buffer++ = cpu_to_le32(sel_val);
  858. *buffer++ = cpu_to_le32(data);
  859. sel_val += pollrd->sel_val_stride;
  860. }
  861. return pollrd->no_ops * (2 * sizeof(u32));
  862. }
  863. static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
  864. struct qlcnic_dump_entry *entry, __le32 *buffer)
  865. {
  866. struct __mux2 *mux2 = &entry->region.mux2;
  867. u32 data;
  868. u32 t_sel_val, sel_val1, sel_val2;
  869. int i;
  870. sel_val1 = mux2->sel_val1;
  871. sel_val2 = mux2->sel_val2;
  872. for (i = 0; i < mux2->no_ops; i++) {
  873. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
  874. t_sel_val = sel_val1 & mux2->sel_val_mask;
  875. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  876. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  877. *buffer++ = cpu_to_le32(t_sel_val);
  878. *buffer++ = cpu_to_le32(data);
  879. qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
  880. t_sel_val = sel_val2 & mux2->sel_val_mask;
  881. qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
  882. data = qlcnic_ind_rd(adapter, mux2->read_addr);
  883. *buffer++ = cpu_to_le32(t_sel_val);
  884. *buffer++ = cpu_to_le32(data);
  885. sel_val1 += mux2->sel_val_stride;
  886. sel_val2 += mux2->sel_val_stride;
  887. }
  888. return mux2->no_ops * (4 * sizeof(u32));
  889. }
  890. static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
  891. struct qlcnic_dump_entry *entry, __le32 *buffer)
  892. {
  893. u32 fl_addr, size;
  894. struct __mem *rom = &entry->region.mem;
  895. fl_addr = rom->addr;
  896. size = rom->size / 4;
  897. if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
  898. (u8 *)buffer, size))
  899. return rom->size;
  900. return 0;
  901. }
  902. static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
  903. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  904. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  905. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  906. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  907. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
  908. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  909. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  910. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  911. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  912. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  913. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  914. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  915. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  916. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  917. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  918. {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
  919. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  920. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  921. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  922. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  923. };
  924. static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
  925. {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
  926. {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
  927. {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
  928. {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
  929. {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
  930. {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
  931. {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
  932. {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
  933. {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
  934. {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
  935. {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
  936. {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
  937. {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
  938. {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
  939. {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
  940. {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
  941. {QLCNIC_READ_MUX2, qlcnic_read_mux2},
  942. {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
  943. {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
  944. {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
  945. {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
  946. {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
  947. {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
  948. };
  949. static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
  950. {
  951. uint64_t sum = 0;
  952. int count = temp_size / sizeof(uint32_t);
  953. while (count-- > 0)
  954. sum += *temp_buffer++;
  955. while (sum >> 32)
  956. sum = (sum & 0xFFFFFFFF) + (sum >> 32);
  957. return ~sum;
  958. }
  959. static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
  960. u8 *buffer, u32 size)
  961. {
  962. int ret = 0;
  963. if (qlcnic_82xx_check(adapter))
  964. return -EIO;
  965. if (qlcnic_83xx_lock_flash(adapter))
  966. return -EIO;
  967. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  968. QLC_83XX_MINIDUMP_FLASH,
  969. buffer, size / sizeof(u32));
  970. qlcnic_83xx_unlock_flash(adapter);
  971. return ret;
  972. }
  973. static int
  974. qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  975. struct qlcnic_cmd_args *cmd)
  976. {
  977. struct qlcnic_83xx_dump_template_hdr tmp_hdr;
  978. u32 size = sizeof(tmp_hdr) / sizeof(u32);
  979. int ret = 0;
  980. if (qlcnic_82xx_check(adapter))
  981. return -EIO;
  982. if (qlcnic_83xx_lock_flash(adapter))
  983. return -EIO;
  984. ret = qlcnic_83xx_lockless_flash_read32(adapter,
  985. QLC_83XX_MINIDUMP_FLASH,
  986. (u8 *)&tmp_hdr, size);
  987. qlcnic_83xx_unlock_flash(adapter);
  988. cmd->rsp.arg[2] = tmp_hdr.size;
  989. cmd->rsp.arg[3] = tmp_hdr.version;
  990. return ret;
  991. }
  992. static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
  993. u32 *version, u32 *temp_size,
  994. u8 *use_flash_temp)
  995. {
  996. int err = 0;
  997. struct qlcnic_cmd_args cmd;
  998. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
  999. return -ENOMEM;
  1000. err = qlcnic_issue_cmd(adapter, &cmd);
  1001. if (err != QLCNIC_RCODE_SUCCESS) {
  1002. if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
  1003. qlcnic_free_mbx_args(&cmd);
  1004. return -EIO;
  1005. }
  1006. *use_flash_temp = 1;
  1007. }
  1008. *temp_size = cmd.rsp.arg[2];
  1009. *version = cmd.rsp.arg[3];
  1010. qlcnic_free_mbx_args(&cmd);
  1011. if (!(*temp_size))
  1012. return -EIO;
  1013. return 0;
  1014. }
  1015. static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
  1016. u32 *buffer, u32 temp_size)
  1017. {
  1018. int err = 0, i;
  1019. void *tmp_addr;
  1020. __le32 *tmp_buf;
  1021. struct qlcnic_cmd_args cmd;
  1022. dma_addr_t tmp_addr_t = 0;
  1023. tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
  1024. &tmp_addr_t, GFP_KERNEL);
  1025. if (!tmp_addr)
  1026. return -ENOMEM;
  1027. if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
  1028. err = -ENOMEM;
  1029. goto free_mem;
  1030. }
  1031. cmd.req.arg[1] = LSD(tmp_addr_t);
  1032. cmd.req.arg[2] = MSD(tmp_addr_t);
  1033. cmd.req.arg[3] = temp_size;
  1034. err = qlcnic_issue_cmd(adapter, &cmd);
  1035. tmp_buf = tmp_addr;
  1036. if (err == QLCNIC_RCODE_SUCCESS) {
  1037. for (i = 0; i < temp_size / sizeof(u32); i++)
  1038. *buffer++ = __le32_to_cpu(*tmp_buf++);
  1039. }
  1040. qlcnic_free_mbx_args(&cmd);
  1041. free_mem:
  1042. dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
  1043. return err;
  1044. }
  1045. int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
  1046. {
  1047. struct qlcnic_hardware_context *ahw;
  1048. struct qlcnic_fw_dump *fw_dump;
  1049. u32 version, csum, *tmp_buf;
  1050. u8 use_flash_temp = 0;
  1051. u32 temp_size = 0;
  1052. void *temp_buffer;
  1053. int err;
  1054. ahw = adapter->ahw;
  1055. fw_dump = &ahw->fw_dump;
  1056. err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
  1057. &use_flash_temp);
  1058. if (err) {
  1059. dev_err(&adapter->pdev->dev,
  1060. "Can't get template size %d\n", err);
  1061. return -EIO;
  1062. }
  1063. fw_dump->tmpl_hdr = vzalloc(temp_size);
  1064. if (!fw_dump->tmpl_hdr)
  1065. return -ENOMEM;
  1066. tmp_buf = (u32 *)fw_dump->tmpl_hdr;
  1067. if (use_flash_temp)
  1068. goto flash_temp;
  1069. err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
  1070. if (err) {
  1071. flash_temp:
  1072. err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
  1073. temp_size);
  1074. if (err) {
  1075. dev_err(&adapter->pdev->dev,
  1076. "Failed to get minidump template header %d\n",
  1077. err);
  1078. vfree(fw_dump->tmpl_hdr);
  1079. fw_dump->tmpl_hdr = NULL;
  1080. return -EIO;
  1081. }
  1082. }
  1083. csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
  1084. if (csum) {
  1085. dev_err(&adapter->pdev->dev,
  1086. "Template header checksum validation failed\n");
  1087. vfree(fw_dump->tmpl_hdr);
  1088. fw_dump->tmpl_hdr = NULL;
  1089. return -EIO;
  1090. }
  1091. qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
  1092. if (fw_dump->use_pex_dma) {
  1093. fw_dump->dma_buffer = NULL;
  1094. temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
  1095. QLC_PEX_DMA_READ_SIZE,
  1096. &fw_dump->phys_addr,
  1097. GFP_KERNEL);
  1098. if (!temp_buffer)
  1099. fw_dump->use_pex_dma = false;
  1100. else
  1101. fw_dump->dma_buffer = temp_buffer;
  1102. }
  1103. dev_info(&adapter->pdev->dev,
  1104. "Default minidump capture mask 0x%x\n",
  1105. fw_dump->cap_mask);
  1106. qlcnic_enable_fw_dump_state(adapter);
  1107. return 0;
  1108. }
  1109. int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
  1110. {
  1111. struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
  1112. static const struct qlcnic_dump_operations *fw_dump_ops;
  1113. struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
  1114. u32 entry_offset, dump, no_entries, buf_offset = 0;
  1115. int i, k, ops_cnt, ops_index, dump_size = 0;
  1116. struct device *dev = &adapter->pdev->dev;
  1117. struct qlcnic_hardware_context *ahw;
  1118. struct qlcnic_dump_entry *entry;
  1119. void *tmpl_hdr;
  1120. u32 ocm_window;
  1121. __le32 *buffer;
  1122. char mesg[64];
  1123. char *msg[] = {mesg, NULL};
  1124. ahw = adapter->ahw;
  1125. tmpl_hdr = fw_dump->tmpl_hdr;
  1126. /* Return if we don't have firmware dump template header */
  1127. if (!tmpl_hdr)
  1128. return -EIO;
  1129. if (!qlcnic_check_fw_dump_state(adapter)) {
  1130. dev_info(&adapter->pdev->dev, "Dump not enabled\n");
  1131. return -EIO;
  1132. }
  1133. if (fw_dump->clr) {
  1134. dev_info(&adapter->pdev->dev,
  1135. "Previous dump not cleared, not capturing dump\n");
  1136. return -EIO;
  1137. }
  1138. netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
  1139. /* Calculate the size for dump data area only */
  1140. for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
  1141. if (i & fw_dump->cap_mask)
  1142. dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
  1143. if (!dump_size)
  1144. return -EIO;
  1145. fw_dump->data = vzalloc(dump_size);
  1146. if (!fw_dump->data)
  1147. return -ENOMEM;
  1148. buffer = fw_dump->data;
  1149. fw_dump->size = dump_size;
  1150. no_entries = fw_dump->num_entries;
  1151. entry_offset = fw_dump->offset;
  1152. qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
  1153. qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
  1154. if (qlcnic_82xx_check(adapter)) {
  1155. ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
  1156. fw_dump_ops = qlcnic_fw_dump_ops;
  1157. } else {
  1158. hdr_83xx = tmpl_hdr;
  1159. ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
  1160. fw_dump_ops = qlcnic_83xx_fw_dump_ops;
  1161. ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
  1162. hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
  1163. hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
  1164. }
  1165. for (i = 0; i < no_entries; i++) {
  1166. entry = tmpl_hdr + entry_offset;
  1167. if (!(entry->hdr.mask & fw_dump->cap_mask)) {
  1168. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1169. entry_offset += entry->hdr.offset;
  1170. continue;
  1171. }
  1172. /* Find the handler for this entry */
  1173. ops_index = 0;
  1174. while (ops_index < ops_cnt) {
  1175. if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
  1176. break;
  1177. ops_index++;
  1178. }
  1179. if (ops_index == ops_cnt) {
  1180. dev_info(dev, "Skipping unknown entry opcode %d\n",
  1181. entry->hdr.type);
  1182. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1183. entry_offset += entry->hdr.offset;
  1184. continue;
  1185. }
  1186. /* Collect dump for this entry */
  1187. dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
  1188. if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
  1189. entry->hdr.flags |= QLCNIC_DUMP_SKIP;
  1190. entry_offset += entry->hdr.offset;
  1191. continue;
  1192. }
  1193. buf_offset += entry->hdr.cap_size;
  1194. entry_offset += entry->hdr.offset;
  1195. buffer = fw_dump->data + buf_offset;
  1196. }
  1197. fw_dump->clr = 1;
  1198. snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
  1199. netdev_info(adapter->netdev,
  1200. "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
  1201. fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
  1202. fw_dump->tmpl_hdr);
  1203. /* Send a udev event to notify availability of FW dump */
  1204. kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
  1205. return 0;
  1206. }
  1207. static inline bool
  1208. qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
  1209. {
  1210. /* For special adapters (with 0x8830 device ID), where iSCSI firmware
  1211. * dump needs to be captured as part of regular firmware dump
  1212. * collection process, firmware exports it's capability through
  1213. * capability registers
  1214. */
  1215. return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
  1216. (adapter->ahw->extra_capability[0] &
  1217. QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
  1218. }
  1219. void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
  1220. {
  1221. u32 prev_version, current_version;
  1222. struct qlcnic_hardware_context *ahw = adapter->ahw;
  1223. struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
  1224. struct pci_dev *pdev = adapter->pdev;
  1225. bool extended = false;
  1226. int ret;
  1227. prev_version = adapter->fw_version;
  1228. current_version = qlcnic_83xx_get_fw_version(adapter);
  1229. if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
  1230. vfree(fw_dump->tmpl_hdr);
  1231. if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
  1232. extended = !qlcnic_83xx_extend_md_capab(adapter);
  1233. ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
  1234. if (ret)
  1235. return;
  1236. dev_info(&pdev->dev, "Supports FW dump capability\n");
  1237. /* Once we have minidump template with extended iSCSI dump
  1238. * capability, update the minidump capture mask to 0x1f as
  1239. * per FW requirement
  1240. */
  1241. if (extended) {
  1242. struct qlcnic_83xx_dump_template_hdr *hdr;
  1243. hdr = fw_dump->tmpl_hdr;
  1244. hdr->drv_cap_mask = 0x1f;
  1245. fw_dump->cap_mask = 0x1f;
  1246. dev_info(&pdev->dev,
  1247. "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
  1248. }
  1249. }
  1250. }