bnx2x_init_ops.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. /* bnx2x_init_ops.h: Broadcom Everest network driver.
  2. * Static functions needed during the initialization.
  3. * This file is "included" in bnx2x_main.c.
  4. *
  5. * Copyright (c) 2007-2011 Broadcom Corporation
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation.
  10. *
  11. * Maintained by: Eilon Greenstein <eilong@broadcom.com>
  12. * Written by: Vladislav Zolotarov <vladz@broadcom.com>
  13. */
  14. #ifndef BNX2X_INIT_OPS_H
  15. #define BNX2X_INIT_OPS_H
  16. static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
  17. static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
  18. static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
  19. u32 addr, u32 len);
  20. static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr, const u32 *data,
  21. u32 len)
  22. {
  23. u32 i;
  24. for (i = 0; i < len; i++)
  25. REG_WR(bp, addr + i*4, data[i]);
  26. }
  27. static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr, const u32 *data,
  28. u32 len)
  29. {
  30. u32 i;
  31. for (i = 0; i < len; i++)
  32. REG_WR_IND(bp, addr + i*4, data[i]);
  33. }
  34. static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len)
  35. {
  36. if (bp->dmae_ready)
  37. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  38. else
  39. bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
  40. }
  41. static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
  42. {
  43. u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
  44. u32 buf_len32 = buf_len/4;
  45. u32 i;
  46. memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
  47. for (i = 0; i < len; i += buf_len32) {
  48. u32 cur_len = min(buf_len32, len - i);
  49. bnx2x_write_big_buf(bp, addr + i*4, cur_len);
  50. }
  51. }
  52. static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr, const u32 *data,
  53. u32 len64)
  54. {
  55. u32 buf_len32 = FW_BUF_SIZE/4;
  56. u32 len = len64*2;
  57. u64 data64 = 0;
  58. u32 i;
  59. /* 64 bit value is in a blob: first low DWORD, then high DWORD */
  60. data64 = HILO_U64((*(data + 1)), (*data));
  61. len64 = min((u32)(FW_BUF_SIZE/8), len64);
  62. for (i = 0; i < len64; i++) {
  63. u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
  64. *pdata = data64;
  65. }
  66. for (i = 0; i < len; i += buf_len32) {
  67. u32 cur_len = min(buf_len32, len - i);
  68. bnx2x_write_big_buf(bp, addr + i*4, cur_len);
  69. }
  70. }
  71. /*********************************************************
  72. There are different blobs for each PRAM section.
  73. In addition, each blob write operation is divided into a few operations
  74. in order to decrease the amount of phys. contiguous buffer needed.
  75. Thus, when we select a blob the address may be with some offset
  76. from the beginning of PRAM section.
  77. The same holds for the INT_TABLE sections.
  78. **********************************************************/
  79. #define IF_IS_INT_TABLE_ADDR(base, addr) \
  80. if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
  81. #define IF_IS_PRAM_ADDR(base, addr) \
  82. if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
  83. static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr, const u8 *data)
  84. {
  85. IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
  86. data = INIT_TSEM_INT_TABLE_DATA(bp);
  87. else
  88. IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
  89. data = INIT_CSEM_INT_TABLE_DATA(bp);
  90. else
  91. IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
  92. data = INIT_USEM_INT_TABLE_DATA(bp);
  93. else
  94. IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
  95. data = INIT_XSEM_INT_TABLE_DATA(bp);
  96. else
  97. IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
  98. data = INIT_TSEM_PRAM_DATA(bp);
  99. else
  100. IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
  101. data = INIT_CSEM_PRAM_DATA(bp);
  102. else
  103. IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
  104. data = INIT_USEM_PRAM_DATA(bp);
  105. else
  106. IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
  107. data = INIT_XSEM_PRAM_DATA(bp);
  108. return data;
  109. }
  110. static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
  111. {
  112. if (bp->dmae_ready)
  113. bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
  114. else
  115. bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
  116. }
  117. static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
  118. u32 len)
  119. {
  120. const u32 *old_data = data;
  121. data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
  122. if (bp->dmae_ready) {
  123. if (old_data != data)
  124. VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
  125. else
  126. VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
  127. } else
  128. bnx2x_init_ind_wr(bp, addr, data, len);
  129. }
  130. static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo, u32 val_hi)
  131. {
  132. u32 wb_write[2];
  133. wb_write[0] = val_lo;
  134. wb_write[1] = val_hi;
  135. REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
  136. }
  137. static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len, u32 blob_off)
  138. {
  139. const u8 *data = NULL;
  140. int rc;
  141. u32 i;
  142. data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
  143. rc = bnx2x_gunzip(bp, data, len);
  144. if (rc)
  145. return;
  146. /* gunzip_outlen is in dwords */
  147. len = GUNZIP_OUTLEN(bp);
  148. for (i = 0; i < len; i++)
  149. ((u32 *)GUNZIP_BUF(bp))[i] =
  150. cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
  151. bnx2x_write_big_buf_wb(bp, addr, len);
  152. }
  153. static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
  154. {
  155. u16 op_start =
  156. INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_START)];
  157. u16 op_end =
  158. INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage, STAGE_END)];
  159. union init_op *op;
  160. int hw_wr;
  161. u32 i, op_type, addr, len;
  162. const u32 *data, *data_base;
  163. /* If empty block */
  164. if (op_start == op_end)
  165. return;
  166. if (CHIP_REV_IS_FPGA(bp))
  167. hw_wr = OP_WR_FPGA;
  168. else if (CHIP_REV_IS_EMUL(bp))
  169. hw_wr = OP_WR_EMUL;
  170. else
  171. hw_wr = OP_WR_ASIC;
  172. data_base = INIT_DATA(bp);
  173. for (i = op_start; i < op_end; i++) {
  174. op = (union init_op *)&(INIT_OPS(bp)[i]);
  175. op_type = op->str_wr.op;
  176. addr = op->str_wr.offset;
  177. len = op->str_wr.data_len;
  178. data = data_base + op->str_wr.data_off;
  179. /* HW/EMUL specific */
  180. if ((op_type > OP_WB) && (op_type == hw_wr))
  181. op_type = OP_WR;
  182. switch (op_type) {
  183. case OP_RD:
  184. REG_RD(bp, addr);
  185. break;
  186. case OP_WR:
  187. REG_WR(bp, addr, op->write.val);
  188. break;
  189. case OP_SW:
  190. bnx2x_init_str_wr(bp, addr, data, len);
  191. break;
  192. case OP_WB:
  193. bnx2x_init_wr_wb(bp, addr, data, len);
  194. break;
  195. case OP_SI:
  196. bnx2x_init_ind_wr(bp, addr, data, len);
  197. break;
  198. case OP_ZR:
  199. bnx2x_init_fill(bp, addr, 0, op->zero.len);
  200. break;
  201. case OP_ZP:
  202. bnx2x_init_wr_zp(bp, addr, len,
  203. op->str_wr.data_off);
  204. break;
  205. case OP_WR_64:
  206. bnx2x_init_wr_64(bp, addr, data, len);
  207. break;
  208. default:
  209. /* happens whenever an op is of a diff HW */
  210. break;
  211. }
  212. }
  213. }
  214. /****************************************************************************
  215. * PXP Arbiter
  216. ****************************************************************************/
  217. /*
  218. * This code configures the PCI read/write arbiter
  219. * which implements a weighted round robin
  220. * between the virtual queues in the chip.
  221. *
  222. * The values were derived for each PCI max payload and max request size.
  223. * since max payload and max request size are only known at run time,
  224. * this is done as a separate init stage.
  225. */
  226. #define NUM_WR_Q 13
  227. #define NUM_RD_Q 29
  228. #define MAX_RD_ORD 3
  229. #define MAX_WR_ORD 2
  230. /* configuration for one arbiter queue */
  231. struct arb_line {
  232. int l;
  233. int add;
  234. int ubound;
  235. };
  236. /* derived configuration for each read queue for each max request size */
  237. static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
  238. /* 1 */ { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  239. { {4, 8, 4}, {4, 8, 4}, {4, 8, 4}, {4, 8, 4} },
  240. { {4, 3, 3}, {4, 3, 3}, {4, 3, 3}, {4, 3, 3} },
  241. { {8, 3, 6}, {16, 3, 11}, {16, 3, 11}, {16, 3, 11} },
  242. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
  243. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  244. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  245. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  246. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {64, 3, 41} },
  247. /* 10 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  248. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  249. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  250. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  251. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  252. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  253. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  254. { {8, 64, 6}, {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
  255. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  256. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  257. /* 20 */{ {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  258. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  259. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  260. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  261. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  262. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  263. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  264. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  265. { {8, 3, 6}, {16, 3, 11}, {32, 3, 21}, {32, 3, 21} },
  266. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
  267. };
  268. /* derived configuration for each write queue for each max request size */
  269. static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
  270. /* 1 */ { {4, 6, 3}, {4, 6, 3}, {4, 6, 3} },
  271. { {4, 2, 3}, {4, 2, 3}, {4, 2, 3} },
  272. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  273. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  274. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  275. { {8, 2, 6}, {16, 2, 11}, {32, 2, 21} },
  276. { {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
  277. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  278. { {8, 2, 6}, {16, 2, 11}, {16, 2, 11} },
  279. /* 10 */{ {8, 9, 6}, {16, 9, 11}, {32, 9, 21} },
  280. { {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
  281. { {8, 9, 6}, {16, 9, 11}, {16, 9, 11} },
  282. { {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
  283. };
  284. /* register addresses for read queues */
  285. static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
  286. /* 1 */ {PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
  287. PXP2_REG_RQ_BW_RD_UBOUND0},
  288. {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  289. PXP2_REG_PSWRQ_BW_UB1},
  290. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  291. PXP2_REG_PSWRQ_BW_UB2},
  292. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  293. PXP2_REG_PSWRQ_BW_UB3},
  294. {PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
  295. PXP2_REG_RQ_BW_RD_UBOUND4},
  296. {PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
  297. PXP2_REG_RQ_BW_RD_UBOUND5},
  298. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  299. PXP2_REG_PSWRQ_BW_UB6},
  300. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  301. PXP2_REG_PSWRQ_BW_UB7},
  302. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  303. PXP2_REG_PSWRQ_BW_UB8},
  304. /* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  305. PXP2_REG_PSWRQ_BW_UB9},
  306. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  307. PXP2_REG_PSWRQ_BW_UB10},
  308. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  309. PXP2_REG_PSWRQ_BW_UB11},
  310. {PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
  311. PXP2_REG_RQ_BW_RD_UBOUND12},
  312. {PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
  313. PXP2_REG_RQ_BW_RD_UBOUND13},
  314. {PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
  315. PXP2_REG_RQ_BW_RD_UBOUND14},
  316. {PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
  317. PXP2_REG_RQ_BW_RD_UBOUND15},
  318. {PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
  319. PXP2_REG_RQ_BW_RD_UBOUND16},
  320. {PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
  321. PXP2_REG_RQ_BW_RD_UBOUND17},
  322. {PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
  323. PXP2_REG_RQ_BW_RD_UBOUND18},
  324. /* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
  325. PXP2_REG_RQ_BW_RD_UBOUND19},
  326. {PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
  327. PXP2_REG_RQ_BW_RD_UBOUND20},
  328. {PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
  329. PXP2_REG_RQ_BW_RD_UBOUND22},
  330. {PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
  331. PXP2_REG_RQ_BW_RD_UBOUND23},
  332. {PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
  333. PXP2_REG_RQ_BW_RD_UBOUND24},
  334. {PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
  335. PXP2_REG_RQ_BW_RD_UBOUND25},
  336. {PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
  337. PXP2_REG_RQ_BW_RD_UBOUND26},
  338. {PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
  339. PXP2_REG_RQ_BW_RD_UBOUND27},
  340. {PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  341. PXP2_REG_PSWRQ_BW_UB28}
  342. };
  343. /* register addresses for write queues */
  344. static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
  345. /* 1 */ {PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
  346. PXP2_REG_PSWRQ_BW_UB1},
  347. {PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
  348. PXP2_REG_PSWRQ_BW_UB2},
  349. {PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
  350. PXP2_REG_PSWRQ_BW_UB3},
  351. {PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
  352. PXP2_REG_PSWRQ_BW_UB6},
  353. {PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
  354. PXP2_REG_PSWRQ_BW_UB7},
  355. {PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
  356. PXP2_REG_PSWRQ_BW_UB8},
  357. {PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
  358. PXP2_REG_PSWRQ_BW_UB9},
  359. {PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
  360. PXP2_REG_PSWRQ_BW_UB10},
  361. {PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
  362. PXP2_REG_PSWRQ_BW_UB11},
  363. /* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
  364. PXP2_REG_PSWRQ_BW_UB28},
  365. {PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
  366. PXP2_REG_RQ_BW_WR_UBOUND29},
  367. {PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
  368. PXP2_REG_RQ_BW_WR_UBOUND30}
  369. };
  370. static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order, int w_order)
  371. {
  372. u32 val, i;
  373. if (r_order > MAX_RD_ORD) {
  374. DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
  375. r_order, MAX_RD_ORD);
  376. r_order = MAX_RD_ORD;
  377. }
  378. if (w_order > MAX_WR_ORD) {
  379. DP(NETIF_MSG_HW, "write order of %d order adjusted to %d\n",
  380. w_order, MAX_WR_ORD);
  381. w_order = MAX_WR_ORD;
  382. }
  383. if (CHIP_REV_IS_FPGA(bp)) {
  384. DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
  385. w_order = 0;
  386. }
  387. DP(NETIF_MSG_HW, "read order %d write order %d\n", r_order, w_order);
  388. for (i = 0; i < NUM_RD_Q-1; i++) {
  389. REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
  390. REG_WR(bp, read_arb_addr[i].add,
  391. read_arb_data[i][r_order].add);
  392. REG_WR(bp, read_arb_addr[i].ubound,
  393. read_arb_data[i][r_order].ubound);
  394. }
  395. for (i = 0; i < NUM_WR_Q-1; i++) {
  396. if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
  397. (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
  398. REG_WR(bp, write_arb_addr[i].l,
  399. write_arb_data[i][w_order].l);
  400. REG_WR(bp, write_arb_addr[i].add,
  401. write_arb_data[i][w_order].add);
  402. REG_WR(bp, write_arb_addr[i].ubound,
  403. write_arb_data[i][w_order].ubound);
  404. } else {
  405. val = REG_RD(bp, write_arb_addr[i].l);
  406. REG_WR(bp, write_arb_addr[i].l,
  407. val | (write_arb_data[i][w_order].l << 10));
  408. val = REG_RD(bp, write_arb_addr[i].add);
  409. REG_WR(bp, write_arb_addr[i].add,
  410. val | (write_arb_data[i][w_order].add << 10));
  411. val = REG_RD(bp, write_arb_addr[i].ubound);
  412. REG_WR(bp, write_arb_addr[i].ubound,
  413. val | (write_arb_data[i][w_order].ubound << 7));
  414. }
  415. }
  416. val = write_arb_data[NUM_WR_Q-1][w_order].add;
  417. val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
  418. val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
  419. REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
  420. val = read_arb_data[NUM_RD_Q-1][r_order].add;
  421. val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
  422. val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
  423. REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
  424. REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
  425. REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
  426. REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
  427. REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
  428. if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
  429. REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
  430. if (CHIP_IS_E2(bp))
  431. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
  432. else
  433. REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
  434. if (CHIP_IS_E1H(bp) || CHIP_IS_E2(bp)) {
  435. /* MPS w_order optimal TH presently TH
  436. * 128 0 0 2
  437. * 256 1 1 3
  438. * >=512 2 2 3
  439. */
  440. /* DMAE is special */
  441. if (CHIP_IS_E2(bp)) {
  442. /* E2 can use optimal TH */
  443. val = w_order;
  444. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
  445. } else {
  446. val = ((w_order == 0) ? 2 : 3);
  447. REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
  448. }
  449. REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
  450. REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
  451. REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
  452. REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
  453. REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
  454. REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
  455. REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
  456. REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
  457. REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
  458. REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
  459. }
  460. /* Validate number of tags suppoted by device */
  461. #define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
  462. val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
  463. val &= 0xFF;
  464. if (val <= 0x20)
  465. REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
  466. }
  467. /****************************************************************************
  468. * ILT management
  469. ****************************************************************************/
  470. /*
  471. * This codes hides the low level HW interaction for ILT management and
  472. * configuration. The API consists of a shadow ILT table which is set by the
  473. * driver and a set of routines to use it to configure the HW.
  474. *
  475. */
  476. /* ILT HW init operations */
  477. /* ILT memory management operations */
  478. #define ILT_MEMOP_ALLOC 0
  479. #define ILT_MEMOP_FREE 1
  480. /* the phys address is shifted right 12 bits and has an added
  481. * 1=valid bit added to the 53rd bit
  482. * then since this is a wide register(TM)
  483. * we split it into two 32 bit writes
  484. */
  485. #define ILT_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
  486. #define ILT_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
  487. #define ILT_RANGE(f, l) (((l) << 10) | f)
  488. static int bnx2x_ilt_line_mem_op(struct bnx2x *bp, struct ilt_line *line,
  489. u32 size, u8 memop)
  490. {
  491. if (memop == ILT_MEMOP_FREE) {
  492. BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
  493. return 0;
  494. }
  495. BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
  496. if (!line->page)
  497. return -1;
  498. line->size = size;
  499. return 0;
  500. }
  501. static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num, u8 memop)
  502. {
  503. int i, rc;
  504. struct bnx2x_ilt *ilt = BP_ILT(bp);
  505. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  506. if (!ilt || !ilt->lines)
  507. return -1;
  508. if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
  509. return 0;
  510. for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
  511. rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
  512. ilt_cli->page_size, memop);
  513. }
  514. return rc;
  515. }
  516. static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
  517. {
  518. int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
  519. if (!rc)
  520. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
  521. if (!rc)
  522. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
  523. if (!rc)
  524. rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
  525. return rc;
  526. }
  527. static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
  528. dma_addr_t page_mapping)
  529. {
  530. u32 reg;
  531. if (CHIP_IS_E1(bp))
  532. reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
  533. else
  534. reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
  535. bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
  536. }
  537. static void bnx2x_ilt_line_init_op(struct bnx2x *bp, struct bnx2x_ilt *ilt,
  538. int idx, u8 initop)
  539. {
  540. dma_addr_t null_mapping;
  541. int abs_idx = ilt->start_line + idx;
  542. switch (initop) {
  543. case INITOP_INIT:
  544. /* set in the init-value array */
  545. case INITOP_SET:
  546. bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
  547. break;
  548. case INITOP_CLEAR:
  549. null_mapping = 0;
  550. bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
  551. break;
  552. }
  553. }
  554. static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
  555. struct ilt_client_info *ilt_cli,
  556. u32 ilt_start, u8 initop)
  557. {
  558. u32 start_reg = 0;
  559. u32 end_reg = 0;
  560. /* The boundary is either SET or INIT,
  561. CLEAR => SET and for now SET ~~ INIT */
  562. /* find the appropriate regs */
  563. if (CHIP_IS_E1(bp)) {
  564. switch (ilt_cli->client_num) {
  565. case ILT_CLIENT_CDU:
  566. start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
  567. break;
  568. case ILT_CLIENT_QM:
  569. start_reg = PXP2_REG_PSWRQ_QM0_L2P;
  570. break;
  571. case ILT_CLIENT_SRC:
  572. start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
  573. break;
  574. case ILT_CLIENT_TM:
  575. start_reg = PXP2_REG_PSWRQ_TM0_L2P;
  576. break;
  577. }
  578. REG_WR(bp, start_reg + BP_FUNC(bp)*4,
  579. ILT_RANGE((ilt_start + ilt_cli->start),
  580. (ilt_start + ilt_cli->end)));
  581. } else {
  582. switch (ilt_cli->client_num) {
  583. case ILT_CLIENT_CDU:
  584. start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
  585. end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
  586. break;
  587. case ILT_CLIENT_QM:
  588. start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
  589. end_reg = PXP2_REG_RQ_QM_LAST_ILT;
  590. break;
  591. case ILT_CLIENT_SRC:
  592. start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
  593. end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
  594. break;
  595. case ILT_CLIENT_TM:
  596. start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
  597. end_reg = PXP2_REG_RQ_TM_LAST_ILT;
  598. break;
  599. }
  600. REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
  601. REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
  602. }
  603. }
  604. static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
  605. struct bnx2x_ilt *ilt,
  606. struct ilt_client_info *ilt_cli,
  607. u8 initop)
  608. {
  609. int i;
  610. if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
  611. return;
  612. for (i = ilt_cli->start; i <= ilt_cli->end; i++)
  613. bnx2x_ilt_line_init_op(bp, ilt, i, initop);
  614. /* init/clear the ILT boundries */
  615. bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
  616. }
  617. static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
  618. struct ilt_client_info *ilt_cli, u8 initop)
  619. {
  620. struct bnx2x_ilt *ilt = BP_ILT(bp);
  621. bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
  622. }
  623. static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
  624. int cli_num, u8 initop)
  625. {
  626. struct bnx2x_ilt *ilt = BP_ILT(bp);
  627. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  628. bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
  629. }
  630. static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
  631. {
  632. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
  633. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
  634. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
  635. bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
  636. }
  637. static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
  638. u32 psz_reg, u8 initop)
  639. {
  640. struct bnx2x_ilt *ilt = BP_ILT(bp);
  641. struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
  642. if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
  643. return;
  644. switch (initop) {
  645. case INITOP_INIT:
  646. /* set in the init-value array */
  647. case INITOP_SET:
  648. REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
  649. break;
  650. case INITOP_CLEAR:
  651. break;
  652. }
  653. }
  654. /*
  655. * called during init common stage, ilt clients should be initialized
  656. * prioir to calling this function
  657. */
  658. static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
  659. {
  660. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
  661. PXP2_REG_RQ_CDU_P_SIZE, initop);
  662. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
  663. PXP2_REG_RQ_QM_P_SIZE, initop);
  664. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
  665. PXP2_REG_RQ_SRC_P_SIZE, initop);
  666. bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
  667. PXP2_REG_RQ_TM_P_SIZE, initop);
  668. }
  669. /****************************************************************************
  670. * QM initializations
  671. ****************************************************************************/
  672. #define QM_QUEUES_PER_FUNC 16 /* E1 has 32, but only 16 are used */
  673. #define QM_INIT_MIN_CID_COUNT 31
  674. #define QM_INIT(cid_cnt) (cid_cnt > QM_INIT_MIN_CID_COUNT)
  675. /* called during init port stage */
  676. static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
  677. u8 initop)
  678. {
  679. int port = BP_PORT(bp);
  680. if (QM_INIT(qm_cid_count)) {
  681. switch (initop) {
  682. case INITOP_INIT:
  683. /* set in the init-value array */
  684. case INITOP_SET:
  685. REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
  686. qm_cid_count/16 - 1);
  687. break;
  688. case INITOP_CLEAR:
  689. break;
  690. }
  691. }
  692. }
  693. static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
  694. {
  695. int i;
  696. u32 wb_data[2];
  697. wb_data[0] = wb_data[1] = 0;
  698. for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
  699. REG_WR(bp, QM_REG_BASEADDR + i*4,
  700. qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
  701. bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
  702. wb_data, 2);
  703. if (CHIP_IS_E1H(bp)) {
  704. REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
  705. qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
  706. bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
  707. wb_data, 2);
  708. }
  709. }
  710. }
  711. /* called during init common stage */
  712. static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
  713. u8 initop)
  714. {
  715. if (!QM_INIT(qm_cid_count))
  716. return;
  717. switch (initop) {
  718. case INITOP_INIT:
  719. /* set in the init-value array */
  720. case INITOP_SET:
  721. bnx2x_qm_set_ptr_table(bp, qm_cid_count);
  722. break;
  723. case INITOP_CLEAR:
  724. break;
  725. }
  726. }
  727. /****************************************************************************
  728. * SRC initializations
  729. ****************************************************************************/
  730. #ifdef BCM_CNIC
  731. /* called during init func stage */
  732. static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
  733. dma_addr_t t2_mapping, int src_cid_count)
  734. {
  735. int i;
  736. int port = BP_PORT(bp);
  737. /* Initialize T2 */
  738. for (i = 0; i < src_cid_count-1; i++)
  739. t2[i].next = (u64)(t2_mapping + (i+1)*sizeof(struct src_ent));
  740. /* tell the searcher where the T2 table is */
  741. REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
  742. bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
  743. U64_LO(t2_mapping), U64_HI(t2_mapping));
  744. bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
  745. U64_LO((u64)t2_mapping +
  746. (src_cid_count-1) * sizeof(struct src_ent)),
  747. U64_HI((u64)t2_mapping +
  748. (src_cid_count-1) * sizeof(struct src_ent)));
  749. }
  750. #endif
  751. #endif /* BNX2X_INIT_OPS_H */