qla_tmpl.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_tmpl.h"
  9. /* note default template is in big endian */
  10. static const uint32_t ql27xx_fwdt_default_template[] = {
  11. 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
  12. 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
  13. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  14. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  15. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  16. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  17. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  18. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  19. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  20. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  21. 0x00000000, 0x04010000, 0x14000000, 0x00000000,
  22. 0x02000000, 0x44000000, 0x09010000, 0x10000000,
  23. 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
  24. 0x00000000, 0x02000000, 0x00600000, 0x00000000,
  25. 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
  26. 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
  27. 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
  28. 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
  29. 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
  30. 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
  31. 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
  32. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  33. 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
  34. 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
  35. 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
  36. 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
  37. 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
  38. 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
  39. 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
  40. 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
  41. 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
  42. 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
  43. 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
  44. 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
  45. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  46. 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
  47. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  48. 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
  49. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  50. 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
  51. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  52. 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
  53. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  54. 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
  55. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  56. 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
  57. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  58. 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
  59. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  60. 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
  61. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  62. 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
  63. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  64. 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
  65. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  66. 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
  67. 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
  68. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  69. 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
  70. 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
  71. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  72. 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
  73. 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
  74. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  75. 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
  76. 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
  77. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  78. 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
  79. 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
  80. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  81. 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
  82. 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
  83. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  84. 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
  85. 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
  86. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  87. 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
  88. 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
  89. 0x00000000, 0x02000000, 0x01000000, 0x00000200,
  90. 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
  91. 0x02000000, 0x02000000, 0x00001000, 0x00000000,
  92. 0x07010000, 0x18000000, 0x00000000, 0x02000000,
  93. 0x00000000, 0x01000000, 0x07010000, 0x18000000,
  94. 0x00000000, 0x02000000, 0x00000000, 0x02000000,
  95. 0x07010000, 0x18000000, 0x00000000, 0x02000000,
  96. 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
  97. 0x00000000, 0x02000000, 0x00000000, 0xff000000,
  98. 0x10000000, 0x00000000, 0x00000080,
  99. };
  100. static inline void __iomem *
  101. qla27xx_isp_reg(struct scsi_qla_host *vha)
  102. {
  103. return &vha->hw->iobase->isp24;
  104. }
  105. static inline void
  106. qla27xx_insert16(uint16_t value, void *buf, ulong *len)
  107. {
  108. if (buf) {
  109. buf += *len;
  110. *(__le16 *)buf = cpu_to_le16(value);
  111. }
  112. *len += sizeof(value);
  113. }
  114. static inline void
  115. qla27xx_insert32(uint32_t value, void *buf, ulong *len)
  116. {
  117. if (buf) {
  118. buf += *len;
  119. *(__le32 *)buf = cpu_to_le32(value);
  120. }
  121. *len += sizeof(value);
  122. }
  123. static inline void
  124. qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
  125. {
  126. if (buf && mem && size) {
  127. buf += *len;
  128. memcpy(buf, mem, size);
  129. }
  130. *len += size;
  131. }
  132. static inline void
  133. qla27xx_read8(void __iomem *window, void *buf, ulong *len)
  134. {
  135. uint8_t value = ~0;
  136. if (buf) {
  137. value = RD_REG_BYTE(window);
  138. }
  139. qla27xx_insert32(value, buf, len);
  140. }
  141. static inline void
  142. qla27xx_read16(void __iomem *window, void *buf, ulong *len)
  143. {
  144. uint16_t value = ~0;
  145. if (buf) {
  146. value = RD_REG_WORD(window);
  147. }
  148. qla27xx_insert32(value, buf, len);
  149. }
  150. static inline void
  151. qla27xx_read32(void __iomem *window, void *buf, ulong *len)
  152. {
  153. uint32_t value = ~0;
  154. if (buf) {
  155. value = RD_REG_DWORD(window);
  156. }
  157. qla27xx_insert32(value, buf, len);
  158. }
  159. static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
  160. {
  161. return
  162. (width == 1) ? qla27xx_read8 :
  163. (width == 2) ? qla27xx_read16 :
  164. qla27xx_read32;
  165. }
  166. static inline void
  167. qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
  168. uint offset, void *buf, ulong *len)
  169. {
  170. void __iomem *window = (void __iomem *)reg + offset;
  171. qla27xx_read32(window, buf, len);
  172. }
  173. static inline void
  174. qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
  175. uint offset, uint32_t data, void *buf)
  176. {
  177. __iomem void *window = (void __iomem *)reg + offset;
  178. if (buf) {
  179. WRT_REG_DWORD(window, data);
  180. }
  181. }
  182. static inline void
  183. qla27xx_read_window(__iomem struct device_reg_24xx *reg,
  184. uint32_t addr, uint offset, uint count, uint width, void *buf,
  185. ulong *len)
  186. {
  187. void __iomem *window = (void __iomem *)reg + offset;
  188. void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
  189. qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
  190. while (count--) {
  191. qla27xx_insert32(addr, buf, len);
  192. readn(window, buf, len);
  193. window += width;
  194. addr++;
  195. }
  196. }
  197. static inline void
  198. qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
  199. {
  200. if (buf)
  201. ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
  202. ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
  203. "Skipping entry %d\n", ent->hdr.entry_type);
  204. }
  205. static int
  206. qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
  207. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  208. {
  209. ql_dbg(ql_dbg_misc, vha, 0xd100,
  210. "%s: nop [%lx]\n", __func__, *len);
  211. qla27xx_skip_entry(ent, buf);
  212. return false;
  213. }
  214. static int
  215. qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
  216. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  217. {
  218. ql_dbg(ql_dbg_misc, vha, 0xd1ff,
  219. "%s: end [%lx]\n", __func__, *len);
  220. qla27xx_skip_entry(ent, buf);
  221. /* terminate */
  222. return true;
  223. }
  224. static int
  225. qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
  226. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  227. {
  228. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  229. ql_dbg(ql_dbg_misc, vha, 0xd200,
  230. "%s: rdio t1 [%lx]\n", __func__, *len);
  231. qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
  232. ent->t256.reg_count, ent->t256.reg_width, buf, len);
  233. return false;
  234. }
  235. static int
  236. qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
  237. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  238. {
  239. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  240. ql_dbg(ql_dbg_misc, vha, 0xd201,
  241. "%s: wrio t1 [%lx]\n", __func__, *len);
  242. qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
  243. qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
  244. return false;
  245. }
  246. static int
  247. qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
  248. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  249. {
  250. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  251. ql_dbg(ql_dbg_misc, vha, 0xd202,
  252. "%s: rdio t2 [%lx]\n", __func__, *len);
  253. qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
  254. qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
  255. ent->t258.reg_count, ent->t258.reg_width, buf, len);
  256. return false;
  257. }
  258. static int
  259. qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
  260. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  261. {
  262. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  263. ql_dbg(ql_dbg_misc, vha, 0xd203,
  264. "%s: wrio t2 [%lx]\n", __func__, *len);
  265. qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
  266. qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
  267. qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
  268. return false;
  269. }
  270. static int
  271. qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
  272. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  273. {
  274. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  275. ql_dbg(ql_dbg_misc, vha, 0xd204,
  276. "%s: rdpci [%lx]\n", __func__, *len);
  277. qla27xx_insert32(ent->t260.pci_offset, buf, len);
  278. qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
  279. return false;
  280. }
  281. static int
  282. qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
  283. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  284. {
  285. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  286. ql_dbg(ql_dbg_misc, vha, 0xd205,
  287. "%s: wrpci [%lx]\n", __func__, *len);
  288. qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
  289. return false;
  290. }
  291. static int
  292. qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
  293. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  294. {
  295. ulong dwords;
  296. ulong start;
  297. ulong end;
  298. ql_dbg(ql_dbg_misc, vha, 0xd206,
  299. "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
  300. start = ent->t262.start_addr;
  301. end = ent->t262.end_addr;
  302. if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
  303. ;
  304. } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
  305. end = vha->hw->fw_memory_size;
  306. if (buf)
  307. ent->t262.end_addr = end;
  308. } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
  309. start = vha->hw->fw_shared_ram_start;
  310. end = vha->hw->fw_shared_ram_end;
  311. if (buf) {
  312. ent->t262.start_addr = start;
  313. ent->t262.end_addr = end;
  314. }
  315. } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
  316. start = vha->hw->fw_ddr_ram_start;
  317. end = vha->hw->fw_ddr_ram_end;
  318. if (buf) {
  319. ent->t262.start_addr = start;
  320. ent->t262.end_addr = end;
  321. }
  322. } else {
  323. ql_dbg(ql_dbg_misc, vha, 0xd022,
  324. "%s: unknown area %x\n", __func__, ent->t262.ram_area);
  325. qla27xx_skip_entry(ent, buf);
  326. goto done;
  327. }
  328. if (end < start || start == 0 || end == 0) {
  329. ql_dbg(ql_dbg_misc, vha, 0xd023,
  330. "%s: unusable range (start=%x end=%x)\n", __func__,
  331. ent->t262.end_addr, ent->t262.start_addr);
  332. qla27xx_skip_entry(ent, buf);
  333. goto done;
  334. }
  335. dwords = end - start + 1;
  336. if (buf) {
  337. buf += *len;
  338. qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
  339. }
  340. *len += dwords * sizeof(uint32_t);
  341. done:
  342. return false;
  343. }
  344. static int
  345. qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
  346. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  347. {
  348. uint count = 0;
  349. uint i;
  350. uint length;
  351. ql_dbg(ql_dbg_misc, vha, 0xd207,
  352. "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
  353. if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
  354. for (i = 0; i < vha->hw->max_req_queues; i++) {
  355. struct req_que *req = vha->hw->req_q_map[i];
  356. if (!test_bit(i, vha->hw->req_qid_map))
  357. continue;
  358. if (req || !buf) {
  359. length = req ?
  360. req->length : REQUEST_ENTRY_CNT_24XX;
  361. qla27xx_insert16(i, buf, len);
  362. qla27xx_insert16(length, buf, len);
  363. qla27xx_insertbuf(req ? req->ring : NULL,
  364. length * sizeof(*req->ring), buf, len);
  365. count++;
  366. }
  367. }
  368. } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
  369. for (i = 0; i < vha->hw->max_rsp_queues; i++) {
  370. struct rsp_que *rsp = vha->hw->rsp_q_map[i];
  371. if (!test_bit(i, vha->hw->rsp_qid_map))
  372. continue;
  373. if (rsp || !buf) {
  374. length = rsp ?
  375. rsp->length : RESPONSE_ENTRY_CNT_MQ;
  376. qla27xx_insert16(i, buf, len);
  377. qla27xx_insert16(length, buf, len);
  378. qla27xx_insertbuf(rsp ? rsp->ring : NULL,
  379. length * sizeof(*rsp->ring), buf, len);
  380. count++;
  381. }
  382. }
  383. } else {
  384. ql_dbg(ql_dbg_misc, vha, 0xd026,
  385. "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
  386. qla27xx_skip_entry(ent, buf);
  387. }
  388. if (buf)
  389. ent->t263.num_queues = count;
  390. return false;
  391. }
  392. static int
  393. qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
  394. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  395. {
  396. ql_dbg(ql_dbg_misc, vha, 0xd208,
  397. "%s: getfce [%lx]\n", __func__, *len);
  398. if (vha->hw->fce) {
  399. if (buf) {
  400. ent->t264.fce_trace_size = FCE_SIZE;
  401. ent->t264.write_pointer = vha->hw->fce_wr;
  402. ent->t264.base_pointer = vha->hw->fce_dma;
  403. ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
  404. ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
  405. ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
  406. ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
  407. ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
  408. ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
  409. }
  410. qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
  411. } else {
  412. ql_dbg(ql_dbg_misc, vha, 0xd027,
  413. "%s: missing fce\n", __func__);
  414. qla27xx_skip_entry(ent, buf);
  415. }
  416. return false;
  417. }
  418. static int
  419. qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
  420. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  421. {
  422. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  423. ql_dbg(ql_dbg_misc, vha, 0xd209,
  424. "%s: pause risc [%lx]\n", __func__, *len);
  425. if (buf)
  426. qla24xx_pause_risc(reg, vha->hw);
  427. return false;
  428. }
  429. static int
  430. qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
  431. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  432. {
  433. ql_dbg(ql_dbg_misc, vha, 0xd20a,
  434. "%s: reset risc [%lx]\n", __func__, *len);
  435. if (buf)
  436. qla24xx_soft_reset(vha->hw);
  437. return false;
  438. }
  439. static int
  440. qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
  441. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  442. {
  443. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  444. ql_dbg(ql_dbg_misc, vha, 0xd20b,
  445. "%s: dis intr [%lx]\n", __func__, *len);
  446. qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
  447. return false;
  448. }
  449. static int
  450. qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
  451. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  452. {
  453. ql_dbg(ql_dbg_misc, vha, 0xd20c,
  454. "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
  455. if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
  456. if (vha->hw->eft) {
  457. if (buf) {
  458. ent->t268.buf_size = EFT_SIZE;
  459. ent->t268.start_addr = vha->hw->eft_dma;
  460. }
  461. qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
  462. } else {
  463. ql_dbg(ql_dbg_misc, vha, 0xd028,
  464. "%s: missing eft\n", __func__);
  465. qla27xx_skip_entry(ent, buf);
  466. }
  467. } else {
  468. ql_dbg(ql_dbg_misc, vha, 0xd02b,
  469. "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
  470. qla27xx_skip_entry(ent, buf);
  471. }
  472. return false;
  473. }
  474. static int
  475. qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
  476. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  477. {
  478. ql_dbg(ql_dbg_misc, vha, 0xd20d,
  479. "%s: scratch [%lx]\n", __func__, *len);
  480. qla27xx_insert32(0xaaaaaaaa, buf, len);
  481. qla27xx_insert32(0xbbbbbbbb, buf, len);
  482. qla27xx_insert32(0xcccccccc, buf, len);
  483. qla27xx_insert32(0xdddddddd, buf, len);
  484. qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
  485. if (buf)
  486. ent->t269.scratch_size = 5 * sizeof(uint32_t);
  487. return false;
  488. }
  489. static int
  490. qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
  491. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  492. {
  493. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  494. ulong dwords = ent->t270.count;
  495. ulong addr = ent->t270.addr;
  496. ql_dbg(ql_dbg_misc, vha, 0xd20e,
  497. "%s: rdremreg [%lx]\n", __func__, *len);
  498. qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
  499. while (dwords--) {
  500. qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
  501. qla27xx_insert32(addr, buf, len);
  502. qla27xx_read_reg(reg, 0xc4, buf, len);
  503. addr += sizeof(uint32_t);
  504. }
  505. return false;
  506. }
  507. static int
  508. qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
  509. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  510. {
  511. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  512. ulong addr = ent->t271.addr;
  513. ulong data = ent->t271.data;
  514. ql_dbg(ql_dbg_misc, vha, 0xd20f,
  515. "%s: wrremreg [%lx]\n", __func__, *len);
  516. qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
  517. qla27xx_write_reg(reg, 0xc4, data, buf);
  518. qla27xx_write_reg(reg, 0xc0, addr, buf);
  519. return false;
  520. }
  521. static int
  522. qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
  523. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  524. {
  525. ulong dwords = ent->t272.count;
  526. ulong start = ent->t272.addr;
  527. ql_dbg(ql_dbg_misc, vha, 0xd210,
  528. "%s: rdremram [%lx]\n", __func__, *len);
  529. if (buf) {
  530. ql_dbg(ql_dbg_misc, vha, 0xd02c,
  531. "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
  532. buf += *len;
  533. qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
  534. }
  535. *len += dwords * sizeof(uint32_t);
  536. return false;
  537. }
  538. static int
  539. qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
  540. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  541. {
  542. ulong dwords = ent->t273.count;
  543. ulong addr = ent->t273.addr;
  544. uint32_t value;
  545. ql_dbg(ql_dbg_misc, vha, 0xd211,
  546. "%s: pcicfg [%lx]\n", __func__, *len);
  547. while (dwords--) {
  548. value = ~0;
  549. if (pci_read_config_dword(vha->hw->pdev, addr, &value))
  550. ql_dbg(ql_dbg_misc, vha, 0xd02d,
  551. "%s: failed pcicfg read at %lx\n", __func__, addr);
  552. qla27xx_insert32(addr, buf, len);
  553. qla27xx_insert32(value, buf, len);
  554. addr += sizeof(uint32_t);
  555. }
  556. return false;
  557. }
  558. static int
  559. qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
  560. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  561. {
  562. uint count = 0;
  563. uint i;
  564. ql_dbg(ql_dbg_misc, vha, 0xd212,
  565. "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
  566. if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
  567. for (i = 0; i < vha->hw->max_req_queues; i++) {
  568. struct req_que *req = vha->hw->req_q_map[i];
  569. if (!test_bit(i, vha->hw->req_qid_map))
  570. continue;
  571. if (req || !buf) {
  572. qla27xx_insert16(i, buf, len);
  573. qla27xx_insert16(1, buf, len);
  574. qla27xx_insert32(req && req->out_ptr ?
  575. *req->out_ptr : 0, buf, len);
  576. count++;
  577. }
  578. }
  579. } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
  580. for (i = 0; i < vha->hw->max_rsp_queues; i++) {
  581. struct rsp_que *rsp = vha->hw->rsp_q_map[i];
  582. if (!test_bit(i, vha->hw->rsp_qid_map))
  583. continue;
  584. if (rsp || !buf) {
  585. qla27xx_insert16(i, buf, len);
  586. qla27xx_insert16(1, buf, len);
  587. qla27xx_insert32(rsp && rsp->in_ptr ?
  588. *rsp->in_ptr : 0, buf, len);
  589. count++;
  590. }
  591. }
  592. } else {
  593. ql_dbg(ql_dbg_misc, vha, 0xd02f,
  594. "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
  595. qla27xx_skip_entry(ent, buf);
  596. }
  597. if (buf)
  598. ent->t274.num_queues = count;
  599. if (!count)
  600. qla27xx_skip_entry(ent, buf);
  601. return false;
  602. }
  603. static int
  604. qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
  605. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  606. {
  607. ulong offset = offsetof(typeof(*ent), t275.buffer);
  608. ql_dbg(ql_dbg_misc, vha, 0xd213,
  609. "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
  610. if (!ent->t275.length) {
  611. ql_dbg(ql_dbg_misc, vha, 0xd020,
  612. "%s: buffer zero length\n", __func__);
  613. qla27xx_skip_entry(ent, buf);
  614. goto done;
  615. }
  616. if (offset + ent->t275.length > ent->hdr.entry_size) {
  617. ql_dbg(ql_dbg_misc, vha, 0xd030,
  618. "%s: buffer overflow\n", __func__);
  619. qla27xx_skip_entry(ent, buf);
  620. goto done;
  621. }
  622. qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
  623. done:
  624. return false;
  625. }
  626. static int
  627. qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
  628. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  629. {
  630. ql_dbg(ql_dbg_misc, vha, 0xd2ff,
  631. "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
  632. qla27xx_skip_entry(ent, buf);
  633. return false;
  634. }
  635. struct qla27xx_fwdt_entry_call {
  636. uint type;
  637. int (*call)(
  638. struct scsi_qla_host *,
  639. struct qla27xx_fwdt_entry *,
  640. void *,
  641. ulong *);
  642. };
  643. static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
  644. { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
  645. { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
  646. { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
  647. { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
  648. { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
  649. { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
  650. { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
  651. { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
  652. { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
  653. { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
  654. { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
  655. { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
  656. { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
  657. { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
  658. { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
  659. { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
  660. { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
  661. { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
  662. { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
  663. { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
  664. { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
  665. { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
  666. { -1 , qla27xx_fwdt_entry_other }
  667. };
  668. static inline int (*qla27xx_find_entry(uint type))
  669. (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
  670. {
  671. struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
  672. while (list->type < type)
  673. list++;
  674. if (list->type == type)
  675. return list->call;
  676. return qla27xx_fwdt_entry_other;
  677. }
  678. static inline void *
  679. qla27xx_next_entry(void *p)
  680. {
  681. struct qla27xx_fwdt_entry *ent = p;
  682. return p + ent->hdr.entry_size;
  683. }
  684. static void
  685. qla27xx_walk_template(struct scsi_qla_host *vha,
  686. struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
  687. {
  688. struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
  689. ulong count = tmp->entry_count;
  690. ql_dbg(ql_dbg_misc, vha, 0xd01a,
  691. "%s: entry count %lx\n", __func__, count);
  692. while (count--) {
  693. if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
  694. break;
  695. ent = qla27xx_next_entry(ent);
  696. }
  697. if (count)
  698. ql_dbg(ql_dbg_misc, vha, 0xd018,
  699. "%s: residual count (%lx)\n", __func__, count);
  700. if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
  701. ql_dbg(ql_dbg_misc, vha, 0xd019,
  702. "%s: missing end (%lx)\n", __func__, count);
  703. ql_dbg(ql_dbg_misc, vha, 0xd01b,
  704. "%s: len=%lx\n", __func__, *len);
  705. if (buf) {
  706. ql_log(ql_log_warn, vha, 0xd015,
  707. "Firmware dump saved to temp buffer (%ld/%p)\n",
  708. vha->host_no, vha->hw->fw_dump);
  709. qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
  710. }
  711. }
  712. static void
  713. qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
  714. {
  715. tmp->capture_timestamp = jiffies;
  716. }
  717. static void
  718. qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
  719. {
  720. uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
  721. sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
  722. v+0, v+1, v+2, v+3, v+4, v+5);
  723. tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
  724. tmp->driver_info[1] = v[5] << 8 | v[4];
  725. tmp->driver_info[2] = 0x12345678;
  726. }
  727. static void
  728. qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
  729. struct scsi_qla_host *vha)
  730. {
  731. tmp->firmware_version[0] = vha->hw->fw_major_version;
  732. tmp->firmware_version[1] = vha->hw->fw_minor_version;
  733. tmp->firmware_version[2] = vha->hw->fw_subminor_version;
  734. tmp->firmware_version[3] =
  735. vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
  736. tmp->firmware_version[4] =
  737. vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
  738. }
  739. static void
  740. ql27xx_edit_template(struct scsi_qla_host *vha,
  741. struct qla27xx_fwdt_template *tmp)
  742. {
  743. qla27xx_time_stamp(tmp);
  744. qla27xx_driver_info(tmp);
  745. qla27xx_firmware_info(tmp, vha);
  746. }
  747. static inline uint32_t
  748. qla27xx_template_checksum(void *p, ulong size)
  749. {
  750. uint32_t *buf = p;
  751. uint64_t sum = 0;
  752. size /= sizeof(*buf);
  753. while (size--)
  754. sum += *buf++;
  755. sum = (sum & 0xffffffff) + (sum >> 32);
  756. return ~sum;
  757. }
  758. static inline int
  759. qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
  760. {
  761. return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
  762. }
  763. static inline int
  764. qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
  765. {
  766. return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
  767. }
  768. static void
  769. qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
  770. {
  771. struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
  772. ulong len;
  773. if (qla27xx_fwdt_template_valid(tmp)) {
  774. len = tmp->template_size;
  775. tmp = memcpy(vha->hw->fw_dump, tmp, len);
  776. ql27xx_edit_template(vha, tmp);
  777. qla27xx_walk_template(vha, tmp, tmp, &len);
  778. vha->hw->fw_dump_len = len;
  779. vha->hw->fw_dumped = 1;
  780. }
  781. }
  782. ulong
  783. qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
  784. {
  785. struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
  786. ulong len = 0;
  787. if (qla27xx_fwdt_template_valid(tmp)) {
  788. len = tmp->template_size;
  789. qla27xx_walk_template(vha, tmp, NULL, &len);
  790. }
  791. return len;
  792. }
  793. ulong
  794. qla27xx_fwdt_template_size(void *p)
  795. {
  796. struct qla27xx_fwdt_template *tmp = p;
  797. return tmp->template_size;
  798. }
  799. ulong
  800. qla27xx_fwdt_template_default_size(void)
  801. {
  802. return sizeof(ql27xx_fwdt_default_template);
  803. }
  804. const void *
  805. qla27xx_fwdt_template_default(void)
  806. {
  807. return ql27xx_fwdt_default_template;
  808. }
  809. int
  810. qla27xx_fwdt_template_valid(void *p)
  811. {
  812. struct qla27xx_fwdt_template *tmp = p;
  813. if (!qla27xx_verify_template_header(tmp)) {
  814. ql_log(ql_log_warn, NULL, 0xd01c,
  815. "%s: template type %x\n", __func__, tmp->template_type);
  816. return false;
  817. }
  818. if (!qla27xx_verify_template_checksum(tmp)) {
  819. ql_log(ql_log_warn, NULL, 0xd01d,
  820. "%s: failed template checksum\n", __func__);
  821. return false;
  822. }
  823. return true;
  824. }
  825. void
  826. qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
  827. {
  828. ulong flags = 0;
  829. #ifndef __CHECKER__
  830. if (!hardware_locked)
  831. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  832. #endif
  833. if (!vha->hw->fw_dump)
  834. ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
  835. else if (!vha->hw->fw_dump_template)
  836. ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
  837. else if (vha->hw->fw_dumped)
  838. ql_log(ql_log_warn, vha, 0xd300,
  839. "Firmware has been previously dumped (%p),"
  840. " -- ignoring request\n", vha->hw->fw_dump);
  841. else
  842. qla27xx_execute_fwdt_template(vha);
  843. #ifndef __CHECKER__
  844. if (!hardware_locked)
  845. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  846. #endif
  847. }