t4vf_hw.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866
  1. /*
  2. * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
  3. * driver for Linux.
  4. *
  5. * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/pci.h>
  36. #include "t4vf_common.h"
  37. #include "t4vf_defs.h"
  38. #include "../cxgb4/t4_regs.h"
  39. #include "../cxgb4/t4_values.h"
  40. #include "../cxgb4/t4fw_api.h"
  41. /*
  42. * Wait for the device to become ready (signified by our "who am I" register
  43. * returning a value other than all 1's). Return an error if it doesn't
  44. * become ready ...
  45. */
  46. int t4vf_wait_dev_ready(struct adapter *adapter)
  47. {
  48. const u32 whoami = T4VF_PL_BASE_ADDR + PL_VF_WHOAMI;
  49. const u32 notready1 = 0xffffffff;
  50. const u32 notready2 = 0xeeeeeeee;
  51. u32 val;
  52. val = t4_read_reg(adapter, whoami);
  53. if (val != notready1 && val != notready2)
  54. return 0;
  55. msleep(500);
  56. val = t4_read_reg(adapter, whoami);
  57. if (val != notready1 && val != notready2)
  58. return 0;
  59. else
  60. return -EIO;
  61. }
  62. /*
  63. * Get the reply to a mailbox command and store it in @rpl in big-endian order
  64. * (since the firmware data structures are specified in a big-endian layout).
  65. */
  66. static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
  67. u32 mbox_data)
  68. {
  69. for ( ; size; size -= 8, mbox_data += 8)
  70. *rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
  71. }
  72. /**
  73. * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
  74. * @adapter: the adapter
  75. * @cmd: the Firmware Mailbox Command or Reply
  76. * @size: command length in bytes
  77. * @access: the time (ms) needed to access the Firmware Mailbox
  78. * @execute: the time (ms) the command spent being executed
  79. */
  80. static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
  81. int size, int access, int execute)
  82. {
  83. struct mbox_cmd_log *log = adapter->mbox_log;
  84. struct mbox_cmd *entry;
  85. int i;
  86. entry = mbox_cmd_log_entry(log, log->cursor++);
  87. if (log->cursor == log->size)
  88. log->cursor = 0;
  89. for (i = 0; i < size / 8; i++)
  90. entry->cmd[i] = be64_to_cpu(cmd[i]);
  91. while (i < MBOX_LEN / 8)
  92. entry->cmd[i++] = 0;
  93. entry->timestamp = jiffies;
  94. entry->seqno = log->seqno++;
  95. entry->access = access;
  96. entry->execute = execute;
  97. }
  98. /**
  99. * t4vf_wr_mbox_core - send a command to FW through the mailbox
  100. * @adapter: the adapter
  101. * @cmd: the command to write
  102. * @size: command length in bytes
  103. * @rpl: where to optionally store the reply
  104. * @sleep_ok: if true we may sleep while awaiting command completion
  105. *
  106. * Sends the given command to FW through the mailbox and waits for the
  107. * FW to execute the command. If @rpl is not %NULL it is used to store
  108. * the FW's reply to the command. The command and its optional reply
  109. * are of the same length. FW can take up to 500 ms to respond.
  110. * @sleep_ok determines whether we may sleep while awaiting the response.
  111. * If sleeping is allowed we use progressive backoff otherwise we spin.
  112. *
  113. * The return value is 0 on success or a negative errno on failure. A
  114. * failure can happen either because we are not able to execute the
  115. * command or FW executes it but signals an error. In the latter case
  116. * the return value is the error code indicated by FW (negated).
  117. */
  118. int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
  119. void *rpl, bool sleep_ok)
  120. {
  121. static const int delay[] = {
  122. 1, 1, 3, 5, 10, 10, 20, 50, 100
  123. };
  124. u16 access = 0, execute = 0;
  125. u32 v, mbox_data;
  126. int i, ms, delay_idx, ret;
  127. const __be64 *p;
  128. u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
  129. u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
  130. __be64 cmd_rpl[MBOX_LEN / 8];
  131. struct mbox_list entry;
  132. /* In T6, mailbox size is changed to 128 bytes to avoid
  133. * invalidating the entire prefetch buffer.
  134. */
  135. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
  136. mbox_data = T4VF_MBDATA_BASE_ADDR;
  137. else
  138. mbox_data = T6VF_MBDATA_BASE_ADDR;
  139. /*
  140. * Commands must be multiples of 16 bytes in length and may not be
  141. * larger than the size of the Mailbox Data register array.
  142. */
  143. if ((size % 16) != 0 ||
  144. size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
  145. return -EINVAL;
  146. /* Queue ourselves onto the mailbox access list. When our entry is at
  147. * the front of the list, we have rights to access the mailbox. So we
  148. * wait [for a while] till we're at the front [or bail out with an
  149. * EBUSY] ...
  150. */
  151. spin_lock(&adapter->mbox_lock);
  152. list_add_tail(&entry.list, &adapter->mlist.list);
  153. spin_unlock(&adapter->mbox_lock);
  154. delay_idx = 0;
  155. ms = delay[0];
  156. for (i = 0; ; i += ms) {
  157. /* If we've waited too long, return a busy indication. This
  158. * really ought to be based on our initial position in the
  159. * mailbox access list but this is a start. We very rearely
  160. * contend on access to the mailbox ...
  161. */
  162. if (i > FW_CMD_MAX_TIMEOUT) {
  163. spin_lock(&adapter->mbox_lock);
  164. list_del(&entry.list);
  165. spin_unlock(&adapter->mbox_lock);
  166. ret = -EBUSY;
  167. t4vf_record_mbox(adapter, cmd, size, access, ret);
  168. return ret;
  169. }
  170. /* If we're at the head, break out and start the mailbox
  171. * protocol.
  172. */
  173. if (list_first_entry(&adapter->mlist.list, struct mbox_list,
  174. list) == &entry)
  175. break;
  176. /* Delay for a bit before checking again ... */
  177. if (sleep_ok) {
  178. ms = delay[delay_idx]; /* last element may repeat */
  179. if (delay_idx < ARRAY_SIZE(delay) - 1)
  180. delay_idx++;
  181. msleep(ms);
  182. } else {
  183. mdelay(ms);
  184. }
  185. }
  186. /*
  187. * Loop trying to get ownership of the mailbox. Return an error
  188. * if we can't gain ownership.
  189. */
  190. v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
  191. for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
  192. v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
  193. if (v != MBOX_OWNER_DRV) {
  194. spin_lock(&adapter->mbox_lock);
  195. list_del(&entry.list);
  196. spin_unlock(&adapter->mbox_lock);
  197. ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
  198. t4vf_record_mbox(adapter, cmd, size, access, ret);
  199. return ret;
  200. }
  201. /*
  202. * Write the command array into the Mailbox Data register array and
  203. * transfer ownership of the mailbox to the firmware.
  204. *
  205. * For the VFs, the Mailbox Data "registers" are actually backed by
  206. * T4's "MA" interface rather than PL Registers (as is the case for
  207. * the PFs). Because these are in different coherency domains, the
  208. * write to the VF's PL-register-backed Mailbox Control can race in
  209. * front of the writes to the MA-backed VF Mailbox Data "registers".
  210. * So we need to do a read-back on at least one byte of the VF Mailbox
  211. * Data registers before doing the write to the VF Mailbox Control
  212. * register.
  213. */
  214. if (cmd_op != FW_VI_STATS_CMD)
  215. t4vf_record_mbox(adapter, cmd, size, access, 0);
  216. for (i = 0, p = cmd; i < size; i += 8)
  217. t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
  218. t4_read_reg(adapter, mbox_data); /* flush write */
  219. t4_write_reg(adapter, mbox_ctl,
  220. MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
  221. t4_read_reg(adapter, mbox_ctl); /* flush write */
  222. /*
  223. * Spin waiting for firmware to acknowledge processing our command.
  224. */
  225. delay_idx = 0;
  226. ms = delay[0];
  227. for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
  228. if (sleep_ok) {
  229. ms = delay[delay_idx];
  230. if (delay_idx < ARRAY_SIZE(delay) - 1)
  231. delay_idx++;
  232. msleep(ms);
  233. } else
  234. mdelay(ms);
  235. /*
  236. * If we're the owner, see if this is the reply we wanted.
  237. */
  238. v = t4_read_reg(adapter, mbox_ctl);
  239. if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
  240. /*
  241. * If the Message Valid bit isn't on, revoke ownership
  242. * of the mailbox and continue waiting for our reply.
  243. */
  244. if ((v & MBMSGVALID_F) == 0) {
  245. t4_write_reg(adapter, mbox_ctl,
  246. MBOWNER_V(MBOX_OWNER_NONE));
  247. continue;
  248. }
  249. /*
  250. * We now have our reply. Extract the command return
  251. * value, copy the reply back to our caller's buffer
  252. * (if specified) and revoke ownership of the mailbox.
  253. * We return the (negated) firmware command return
  254. * code (this depends on FW_SUCCESS == 0).
  255. */
  256. get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
  257. /* return value in low-order little-endian word */
  258. v = be64_to_cpu(cmd_rpl[0]);
  259. if (rpl) {
  260. /* request bit in high-order BE word */
  261. WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
  262. & FW_CMD_REQUEST_F) == 0);
  263. memcpy(rpl, cmd_rpl, size);
  264. WARN_ON((be32_to_cpu(*(__be32 *)rpl)
  265. & FW_CMD_REQUEST_F) != 0);
  266. }
  267. t4_write_reg(adapter, mbox_ctl,
  268. MBOWNER_V(MBOX_OWNER_NONE));
  269. execute = i + ms;
  270. if (cmd_op != FW_VI_STATS_CMD)
  271. t4vf_record_mbox(adapter, cmd_rpl, size, access,
  272. execute);
  273. spin_lock(&adapter->mbox_lock);
  274. list_del(&entry.list);
  275. spin_unlock(&adapter->mbox_lock);
  276. return -FW_CMD_RETVAL_G(v);
  277. }
  278. }
  279. /* We timed out. Return the error ... */
  280. ret = -ETIMEDOUT;
  281. t4vf_record_mbox(adapter, cmd, size, access, ret);
  282. spin_lock(&adapter->mbox_lock);
  283. list_del(&entry.list);
  284. spin_unlock(&adapter->mbox_lock);
  285. return ret;
  286. }
  287. #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
  288. FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
  289. FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
  290. FW_PORT_CAP_ANEG)
  291. /**
  292. * init_link_config - initialize a link's SW state
  293. * @lc: structure holding the link state
  294. * @caps: link capabilities
  295. *
  296. * Initializes the SW state maintained for each link, including the link's
  297. * capabilities and default speed/flow-control/autonegotiation settings.
  298. */
  299. static void init_link_config(struct link_config *lc, unsigned int caps)
  300. {
  301. lc->supported = caps;
  302. lc->lp_advertising = 0;
  303. lc->requested_speed = 0;
  304. lc->speed = 0;
  305. lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
  306. if (lc->supported & FW_PORT_CAP_ANEG) {
  307. lc->advertising = lc->supported & ADVERT_MASK;
  308. lc->autoneg = AUTONEG_ENABLE;
  309. lc->requested_fc |= PAUSE_AUTONEG;
  310. } else {
  311. lc->advertising = 0;
  312. lc->autoneg = AUTONEG_DISABLE;
  313. }
  314. }
  315. /**
  316. * t4vf_port_init - initialize port hardware/software state
  317. * @adapter: the adapter
  318. * @pidx: the adapter port index
  319. */
  320. int t4vf_port_init(struct adapter *adapter, int pidx)
  321. {
  322. struct port_info *pi = adap2pinfo(adapter, pidx);
  323. struct fw_vi_cmd vi_cmd, vi_rpl;
  324. struct fw_port_cmd port_cmd, port_rpl;
  325. int v;
  326. /*
  327. * Execute a VI Read command to get our Virtual Interface information
  328. * like MAC address, etc.
  329. */
  330. memset(&vi_cmd, 0, sizeof(vi_cmd));
  331. vi_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  332. FW_CMD_REQUEST_F |
  333. FW_CMD_READ_F);
  334. vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
  335. vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid));
  336. v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
  337. if (v)
  338. return v;
  339. BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd));
  340. pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd));
  341. t4_os_set_hw_addr(adapter, pidx, vi_rpl.mac);
  342. /*
  343. * If we don't have read access to our port information, we're done
  344. * now. Otherwise, execute a PORT Read command to get it ...
  345. */
  346. if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
  347. return 0;
  348. memset(&port_cmd, 0, sizeof(port_cmd));
  349. port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  350. FW_CMD_REQUEST_F |
  351. FW_CMD_READ_F |
  352. FW_PORT_CMD_PORTID_V(pi->port_id));
  353. port_cmd.action_to_len16 =
  354. cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
  355. FW_LEN16(port_cmd));
  356. v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl);
  357. if (v)
  358. return v;
  359. v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
  360. pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
  361. FW_PORT_CMD_MDIOADDR_G(v) : -1;
  362. pi->port_type = FW_PORT_CMD_PTYPE_G(v);
  363. pi->mod_type = FW_PORT_MOD_TYPE_NA;
  364. init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
  365. return 0;
  366. }
  367. /**
  368. * t4vf_fw_reset - issue a reset to FW
  369. * @adapter: the adapter
  370. *
  371. * Issues a reset command to FW. For a Physical Function this would
  372. * result in the Firmware resetting all of its state. For a Virtual
  373. * Function this just resets the state associated with the VF.
  374. */
  375. int t4vf_fw_reset(struct adapter *adapter)
  376. {
  377. struct fw_reset_cmd cmd;
  378. memset(&cmd, 0, sizeof(cmd));
  379. cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RESET_CMD) |
  380. FW_CMD_WRITE_F);
  381. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  382. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  383. }
  384. /**
  385. * t4vf_query_params - query FW or device parameters
  386. * @adapter: the adapter
  387. * @nparams: the number of parameters
  388. * @params: the parameter names
  389. * @vals: the parameter values
  390. *
  391. * Reads the values of firmware or device parameters. Up to 7 parameters
  392. * can be queried at once.
  393. */
  394. static int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
  395. const u32 *params, u32 *vals)
  396. {
  397. int i, ret;
  398. struct fw_params_cmd cmd, rpl;
  399. struct fw_params_param *p;
  400. size_t len16;
  401. if (nparams > 7)
  402. return -EINVAL;
  403. memset(&cmd, 0, sizeof(cmd));
  404. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  405. FW_CMD_REQUEST_F |
  406. FW_CMD_READ_F);
  407. len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
  408. param[nparams].mnem), 16);
  409. cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  410. for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
  411. p->mnem = htonl(*params++);
  412. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  413. if (ret == 0)
  414. for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
  415. *vals++ = be32_to_cpu(p->val);
  416. return ret;
  417. }
  418. /**
  419. * t4vf_set_params - sets FW or device parameters
  420. * @adapter: the adapter
  421. * @nparams: the number of parameters
  422. * @params: the parameter names
  423. * @vals: the parameter values
  424. *
  425. * Sets the values of firmware or device parameters. Up to 7 parameters
  426. * can be specified at once.
  427. */
  428. int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
  429. const u32 *params, const u32 *vals)
  430. {
  431. int i;
  432. struct fw_params_cmd cmd;
  433. struct fw_params_param *p;
  434. size_t len16;
  435. if (nparams > 7)
  436. return -EINVAL;
  437. memset(&cmd, 0, sizeof(cmd));
  438. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  439. FW_CMD_REQUEST_F |
  440. FW_CMD_WRITE_F);
  441. len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
  442. param[nparams]), 16);
  443. cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  444. for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
  445. p->mnem = cpu_to_be32(*params++);
  446. p->val = cpu_to_be32(*vals++);
  447. }
  448. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  449. }
  450. /**
  451. * t4vf_fl_pkt_align - return the fl packet alignment
  452. * @adapter: the adapter
  453. *
  454. * T4 has a single field to specify the packing and padding boundary.
  455. * T5 onwards has separate fields for this and hence the alignment for
  456. * next packet offset is maximum of these two. And T6 changes the
  457. * Ingress Padding Boundary Shift, so it's all a mess and it's best
  458. * if we put this in low-level Common Code ...
  459. *
  460. */
  461. int t4vf_fl_pkt_align(struct adapter *adapter)
  462. {
  463. u32 sge_control, sge_control2;
  464. unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
  465. sge_control = adapter->params.sge.sge_control;
  466. /* T4 uses a single control field to specify both the PCIe Padding and
  467. * Packing Boundary. T5 introduced the ability to specify these
  468. * separately. The actual Ingress Packet Data alignment boundary
  469. * within Packed Buffer Mode is the maximum of these two
  470. * specifications. (Note that it makes no real practical sense to
  471. * have the Pading Boudary be larger than the Packing Boundary but you
  472. * could set the chip up that way and, in fact, legacy T4 code would
  473. * end doing this because it would initialize the Padding Boundary and
  474. * leave the Packing Boundary initialized to 0 (16 bytes).)
  475. * Padding Boundary values in T6 starts from 8B,
  476. * where as it is 32B for T4 and T5.
  477. */
  478. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
  479. ingpad_shift = INGPADBOUNDARY_SHIFT_X;
  480. else
  481. ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
  482. ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
  483. fl_align = ingpadboundary;
  484. if (!is_t4(adapter->params.chip)) {
  485. /* T5 has a different interpretation of one of the PCIe Packing
  486. * Boundary values.
  487. */
  488. sge_control2 = adapter->params.sge.sge_control2;
  489. ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
  490. if (ingpackboundary == INGPACKBOUNDARY_16B_X)
  491. ingpackboundary = 16;
  492. else
  493. ingpackboundary = 1 << (ingpackboundary +
  494. INGPACKBOUNDARY_SHIFT_X);
  495. fl_align = max(ingpadboundary, ingpackboundary);
  496. }
  497. return fl_align;
  498. }
  499. /**
  500. * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
  501. * @adapter: the adapter
  502. * @qid: the Queue ID
  503. * @qtype: the Ingress or Egress type for @qid
  504. * @pbar2_qoffset: BAR2 Queue Offset
  505. * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
  506. *
  507. * Returns the BAR2 SGE Queue Registers information associated with the
  508. * indicated Absolute Queue ID. These are passed back in return value
  509. * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
  510. * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
  511. *
  512. * This may return an error which indicates that BAR2 SGE Queue
  513. * registers aren't available. If an error is not returned, then the
  514. * following values are returned:
  515. *
  516. * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
  517. * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
  518. *
  519. * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
  520. * require the "Inferred Queue ID" ability may be used. E.g. the
  521. * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  522. * then these "Inferred Queue ID" register may not be used.
  523. */
  524. int t4vf_bar2_sge_qregs(struct adapter *adapter,
  525. unsigned int qid,
  526. enum t4_bar2_qtype qtype,
  527. u64 *pbar2_qoffset,
  528. unsigned int *pbar2_qid)
  529. {
  530. unsigned int page_shift, page_size, qpp_shift, qpp_mask;
  531. u64 bar2_page_offset, bar2_qoffset;
  532. unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
  533. /* T4 doesn't support BAR2 SGE Queue registers.
  534. */
  535. if (is_t4(adapter->params.chip))
  536. return -EINVAL;
  537. /* Get our SGE Page Size parameters.
  538. */
  539. page_shift = adapter->params.sge.sge_vf_hps + 10;
  540. page_size = 1 << page_shift;
  541. /* Get the right Queues per Page parameters for our Queue.
  542. */
  543. qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
  544. ? adapter->params.sge.sge_vf_eq_qpp
  545. : adapter->params.sge.sge_vf_iq_qpp);
  546. qpp_mask = (1 << qpp_shift) - 1;
  547. /* Calculate the basics of the BAR2 SGE Queue register area:
  548. * o The BAR2 page the Queue registers will be in.
  549. * o The BAR2 Queue ID.
  550. * o The BAR2 Queue ID Offset into the BAR2 page.
  551. */
  552. bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
  553. bar2_qid = qid & qpp_mask;
  554. bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
  555. /* If the BAR2 Queue ID Offset is less than the Page Size, then the
  556. * hardware will infer the Absolute Queue ID simply from the writes to
  557. * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
  558. * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
  559. * write to the first BAR2 SGE Queue Area within the BAR2 Page with
  560. * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
  561. * from the BAR2 Page and BAR2 Queue ID.
  562. *
  563. * One important censequence of this is that some BAR2 SGE registers
  564. * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
  565. * there. But other registers synthesize the SGE Queue ID purely
  566. * from the writes to the registers -- the Write Combined Doorbell
  567. * Buffer is a good example. These BAR2 SGE Registers are only
  568. * available for those BAR2 SGE Register areas where the SGE Absolute
  569. * Queue ID can be inferred from simple writes.
  570. */
  571. bar2_qoffset = bar2_page_offset;
  572. bar2_qinferred = (bar2_qid_offset < page_size);
  573. if (bar2_qinferred) {
  574. bar2_qoffset += bar2_qid_offset;
  575. bar2_qid = 0;
  576. }
  577. *pbar2_qoffset = bar2_qoffset;
  578. *pbar2_qid = bar2_qid;
  579. return 0;
  580. }
  581. unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
  582. {
  583. u32 whoami;
  584. whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + PL_VF_WHOAMI_A);
  585. return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
  586. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami));
  587. }
  588. /**
  589. * t4vf_get_sge_params - retrieve adapter Scatter gather Engine parameters
  590. * @adapter: the adapter
  591. *
  592. * Retrieves various core SGE parameters in the form of hardware SGE
  593. * register values. The caller is responsible for decoding these as
  594. * needed. The SGE parameters are stored in @adapter->params.sge.
  595. */
  596. int t4vf_get_sge_params(struct adapter *adapter)
  597. {
  598. struct sge_params *sge_params = &adapter->params.sge;
  599. u32 params[7], vals[7];
  600. int v;
  601. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  602. FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL_A));
  603. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  604. FW_PARAMS_PARAM_XYZ_V(SGE_HOST_PAGE_SIZE_A));
  605. params[2] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  606. FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE0_A));
  607. params[3] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  608. FW_PARAMS_PARAM_XYZ_V(SGE_FL_BUFFER_SIZE1_A));
  609. params[4] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  610. FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_0_AND_1_A));
  611. params[5] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  612. FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_2_AND_3_A));
  613. params[6] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  614. FW_PARAMS_PARAM_XYZ_V(SGE_TIMER_VALUE_4_AND_5_A));
  615. v = t4vf_query_params(adapter, 7, params, vals);
  616. if (v)
  617. return v;
  618. sge_params->sge_control = vals[0];
  619. sge_params->sge_host_page_size = vals[1];
  620. sge_params->sge_fl_buffer_size[0] = vals[2];
  621. sge_params->sge_fl_buffer_size[1] = vals[3];
  622. sge_params->sge_timer_value_0_and_1 = vals[4];
  623. sge_params->sge_timer_value_2_and_3 = vals[5];
  624. sge_params->sge_timer_value_4_and_5 = vals[6];
  625. /* T4 uses a single control field to specify both the PCIe Padding and
  626. * Packing Boundary. T5 introduced the ability to specify these
  627. * separately with the Padding Boundary in SGE_CONTROL and and Packing
  628. * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
  629. * SGE_CONTROL in order to determine how ingress packet data will be
  630. * laid out in Packed Buffer Mode. Unfortunately, older versions of
  631. * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
  632. * failure grabbing it we throw an error since we can't figure out the
  633. * right value.
  634. */
  635. if (!is_t4(adapter->params.chip)) {
  636. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  637. FW_PARAMS_PARAM_XYZ_V(SGE_CONTROL2_A));
  638. v = t4vf_query_params(adapter, 1, params, vals);
  639. if (v != FW_SUCCESS) {
  640. dev_err(adapter->pdev_dev,
  641. "Unable to get SGE Control2; "
  642. "probably old firmware.\n");
  643. return v;
  644. }
  645. sge_params->sge_control2 = vals[0];
  646. }
  647. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  648. FW_PARAMS_PARAM_XYZ_V(SGE_INGRESS_RX_THRESHOLD_A));
  649. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  650. FW_PARAMS_PARAM_XYZ_V(SGE_CONM_CTRL_A));
  651. v = t4vf_query_params(adapter, 2, params, vals);
  652. if (v)
  653. return v;
  654. sge_params->sge_ingress_rx_threshold = vals[0];
  655. sge_params->sge_congestion_control = vals[1];
  656. /* For T5 and later we want to use the new BAR2 Doorbells.
  657. * Unfortunately, older firmware didn't allow the this register to be
  658. * read.
  659. */
  660. if (!is_t4(adapter->params.chip)) {
  661. unsigned int pf, s_hps, s_qpp;
  662. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  663. FW_PARAMS_PARAM_XYZ_V(
  664. SGE_EGRESS_QUEUES_PER_PAGE_VF_A));
  665. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_REG) |
  666. FW_PARAMS_PARAM_XYZ_V(
  667. SGE_INGRESS_QUEUES_PER_PAGE_VF_A));
  668. v = t4vf_query_params(adapter, 2, params, vals);
  669. if (v != FW_SUCCESS) {
  670. dev_warn(adapter->pdev_dev,
  671. "Unable to get VF SGE Queues/Page; "
  672. "probably old firmware.\n");
  673. return v;
  674. }
  675. sge_params->sge_egress_queues_per_page = vals[0];
  676. sge_params->sge_ingress_queues_per_page = vals[1];
  677. /* We need the Queues/Page for our VF. This is based on the
  678. * PF from which we're instantiated and is indexed in the
  679. * register we just read. Do it once here so other code in
  680. * the driver can just use it.
  681. */
  682. pf = t4vf_get_pf_from_vf(adapter);
  683. s_hps = (HOSTPAGESIZEPF0_S +
  684. (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * pf);
  685. sge_params->sge_vf_hps =
  686. ((sge_params->sge_host_page_size >> s_hps)
  687. & HOSTPAGESIZEPF0_M);
  688. s_qpp = (QUEUESPERPAGEPF0_S +
  689. (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * pf);
  690. sge_params->sge_vf_eq_qpp =
  691. ((sge_params->sge_egress_queues_per_page >> s_qpp)
  692. & QUEUESPERPAGEPF0_M);
  693. sge_params->sge_vf_iq_qpp =
  694. ((sge_params->sge_ingress_queues_per_page >> s_qpp)
  695. & QUEUESPERPAGEPF0_M);
  696. }
  697. return 0;
  698. }
  699. /**
  700. * t4vf_get_vpd_params - retrieve device VPD paremeters
  701. * @adapter: the adapter
  702. *
  703. * Retrives various device Vital Product Data parameters. The parameters
  704. * are stored in @adapter->params.vpd.
  705. */
  706. int t4vf_get_vpd_params(struct adapter *adapter)
  707. {
  708. struct vpd_params *vpd_params = &adapter->params.vpd;
  709. u32 params[7], vals[7];
  710. int v;
  711. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  712. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
  713. v = t4vf_query_params(adapter, 1, params, vals);
  714. if (v)
  715. return v;
  716. vpd_params->cclk = vals[0];
  717. return 0;
  718. }
  719. /**
  720. * t4vf_get_dev_params - retrieve device paremeters
  721. * @adapter: the adapter
  722. *
  723. * Retrives various device parameters. The parameters are stored in
  724. * @adapter->params.dev.
  725. */
  726. int t4vf_get_dev_params(struct adapter *adapter)
  727. {
  728. struct dev_params *dev_params = &adapter->params.dev;
  729. u32 params[7], vals[7];
  730. int v;
  731. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  732. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWREV));
  733. params[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  734. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_TPREV));
  735. v = t4vf_query_params(adapter, 2, params, vals);
  736. if (v)
  737. return v;
  738. dev_params->fwrev = vals[0];
  739. dev_params->tprev = vals[1];
  740. return 0;
  741. }
  742. /**
  743. * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
  744. * @adapter: the adapter
  745. *
  746. * Retrieves global RSS mode and parameters with which we have to live
  747. * and stores them in the @adapter's RSS parameters.
  748. */
  749. int t4vf_get_rss_glb_config(struct adapter *adapter)
  750. {
  751. struct rss_params *rss = &adapter->params.rss;
  752. struct fw_rss_glb_config_cmd cmd, rpl;
  753. int v;
  754. /*
  755. * Execute an RSS Global Configuration read command to retrieve
  756. * our RSS configuration.
  757. */
  758. memset(&cmd, 0, sizeof(cmd));
  759. cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
  760. FW_CMD_REQUEST_F |
  761. FW_CMD_READ_F);
  762. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  763. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  764. if (v)
  765. return v;
  766. /*
  767. * Transate the big-endian RSS Global Configuration into our
  768. * cpu-endian format based on the RSS mode. We also do first level
  769. * filtering at this point to weed out modes which don't support
  770. * VF Drivers ...
  771. */
  772. rss->mode = FW_RSS_GLB_CONFIG_CMD_MODE_G(
  773. be32_to_cpu(rpl.u.manual.mode_pkd));
  774. switch (rss->mode) {
  775. case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
  776. u32 word = be32_to_cpu(
  777. rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
  778. rss->u.basicvirtual.synmapen =
  779. ((word & FW_RSS_GLB_CONFIG_CMD_SYNMAPEN_F) != 0);
  780. rss->u.basicvirtual.syn4tupenipv6 =
  781. ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6_F) != 0);
  782. rss->u.basicvirtual.syn2tupenipv6 =
  783. ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6_F) != 0);
  784. rss->u.basicvirtual.syn4tupenipv4 =
  785. ((word & FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4_F) != 0);
  786. rss->u.basicvirtual.syn2tupenipv4 =
  787. ((word & FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4_F) != 0);
  788. rss->u.basicvirtual.ofdmapen =
  789. ((word & FW_RSS_GLB_CONFIG_CMD_OFDMAPEN_F) != 0);
  790. rss->u.basicvirtual.tnlmapen =
  791. ((word & FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F) != 0);
  792. rss->u.basicvirtual.tnlalllookup =
  793. ((word & FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F) != 0);
  794. rss->u.basicvirtual.hashtoeplitz =
  795. ((word & FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ_F) != 0);
  796. /* we need at least Tunnel Map Enable to be set */
  797. if (!rss->u.basicvirtual.tnlmapen)
  798. return -EINVAL;
  799. break;
  800. }
  801. default:
  802. /* all unknown/unsupported RSS modes result in an error */
  803. return -EINVAL;
  804. }
  805. return 0;
  806. }
  807. /**
  808. * t4vf_get_vfres - retrieve VF resource limits
  809. * @adapter: the adapter
  810. *
  811. * Retrieves configured resource limits and capabilities for a virtual
  812. * function. The results are stored in @adapter->vfres.
  813. */
  814. int t4vf_get_vfres(struct adapter *adapter)
  815. {
  816. struct vf_resources *vfres = &adapter->params.vfres;
  817. struct fw_pfvf_cmd cmd, rpl;
  818. int v;
  819. u32 word;
  820. /*
  821. * Execute PFVF Read command to get VF resource limits; bail out early
  822. * with error on command failure.
  823. */
  824. memset(&cmd, 0, sizeof(cmd));
  825. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
  826. FW_CMD_REQUEST_F |
  827. FW_CMD_READ_F);
  828. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  829. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  830. if (v)
  831. return v;
  832. /*
  833. * Extract VF resource limits and return success.
  834. */
  835. word = be32_to_cpu(rpl.niqflint_niq);
  836. vfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
  837. vfres->niq = FW_PFVF_CMD_NIQ_G(word);
  838. word = be32_to_cpu(rpl.type_to_neq);
  839. vfres->neq = FW_PFVF_CMD_NEQ_G(word);
  840. vfres->pmask = FW_PFVF_CMD_PMASK_G(word);
  841. word = be32_to_cpu(rpl.tc_to_nexactf);
  842. vfres->tc = FW_PFVF_CMD_TC_G(word);
  843. vfres->nvi = FW_PFVF_CMD_NVI_G(word);
  844. vfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
  845. word = be32_to_cpu(rpl.r_caps_to_nethctrl);
  846. vfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
  847. vfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
  848. vfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
  849. return 0;
  850. }
  851. /**
  852. * t4vf_read_rss_vi_config - read a VI's RSS configuration
  853. * @adapter: the adapter
  854. * @viid: Virtual Interface ID
  855. * @config: pointer to host-native VI RSS Configuration buffer
  856. *
  857. * Reads the Virtual Interface's RSS configuration information and
  858. * translates it into CPU-native format.
  859. */
  860. int t4vf_read_rss_vi_config(struct adapter *adapter, unsigned int viid,
  861. union rss_vi_config *config)
  862. {
  863. struct fw_rss_vi_config_cmd cmd, rpl;
  864. int v;
  865. memset(&cmd, 0, sizeof(cmd));
  866. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  867. FW_CMD_REQUEST_F |
  868. FW_CMD_READ_F |
  869. FW_RSS_VI_CONFIG_CMD_VIID(viid));
  870. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  871. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  872. if (v)
  873. return v;
  874. switch (adapter->params.rss.mode) {
  875. case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
  876. u32 word = be32_to_cpu(rpl.u.basicvirtual.defaultq_to_udpen);
  877. config->basicvirtual.ip6fourtupen =
  878. ((word & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) != 0);
  879. config->basicvirtual.ip6twotupen =
  880. ((word & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F) != 0);
  881. config->basicvirtual.ip4fourtupen =
  882. ((word & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) != 0);
  883. config->basicvirtual.ip4twotupen =
  884. ((word & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F) != 0);
  885. config->basicvirtual.udpen =
  886. ((word & FW_RSS_VI_CONFIG_CMD_UDPEN_F) != 0);
  887. config->basicvirtual.defaultq =
  888. FW_RSS_VI_CONFIG_CMD_DEFAULTQ_G(word);
  889. break;
  890. }
  891. default:
  892. return -EINVAL;
  893. }
  894. return 0;
  895. }
  896. /**
  897. * t4vf_write_rss_vi_config - write a VI's RSS configuration
  898. * @adapter: the adapter
  899. * @viid: Virtual Interface ID
  900. * @config: pointer to host-native VI RSS Configuration buffer
  901. *
  902. * Write the Virtual Interface's RSS configuration information
  903. * (translating it into firmware-native format before writing).
  904. */
  905. int t4vf_write_rss_vi_config(struct adapter *adapter, unsigned int viid,
  906. union rss_vi_config *config)
  907. {
  908. struct fw_rss_vi_config_cmd cmd, rpl;
  909. memset(&cmd, 0, sizeof(cmd));
  910. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  911. FW_CMD_REQUEST_F |
  912. FW_CMD_WRITE_F |
  913. FW_RSS_VI_CONFIG_CMD_VIID(viid));
  914. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  915. switch (adapter->params.rss.mode) {
  916. case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
  917. u32 word = 0;
  918. if (config->basicvirtual.ip6fourtupen)
  919. word |= FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F;
  920. if (config->basicvirtual.ip6twotupen)
  921. word |= FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F;
  922. if (config->basicvirtual.ip4fourtupen)
  923. word |= FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F;
  924. if (config->basicvirtual.ip4twotupen)
  925. word |= FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F;
  926. if (config->basicvirtual.udpen)
  927. word |= FW_RSS_VI_CONFIG_CMD_UDPEN_F;
  928. word |= FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(
  929. config->basicvirtual.defaultq);
  930. cmd.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(word);
  931. break;
  932. }
  933. default:
  934. return -EINVAL;
  935. }
  936. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  937. }
  938. /**
  939. * t4vf_config_rss_range - configure a portion of the RSS mapping table
  940. * @adapter: the adapter
  941. * @viid: Virtual Interface of RSS Table Slice
  942. * @start: starting entry in the table to write
  943. * @n: how many table entries to write
  944. * @rspq: values for the "Response Queue" (Ingress Queue) lookup table
  945. * @nrspq: number of values in @rspq
  946. *
  947. * Programs the selected part of the VI's RSS mapping table with the
  948. * provided values. If @nrspq < @n the supplied values are used repeatedly
  949. * until the full table range is populated.
  950. *
  951. * The caller must ensure the values in @rspq are in the range 0..1023.
  952. */
  953. int t4vf_config_rss_range(struct adapter *adapter, unsigned int viid,
  954. int start, int n, const u16 *rspq, int nrspq)
  955. {
  956. const u16 *rsp = rspq;
  957. const u16 *rsp_end = rspq+nrspq;
  958. struct fw_rss_ind_tbl_cmd cmd;
  959. /*
  960. * Initialize firmware command template to write the RSS table.
  961. */
  962. memset(&cmd, 0, sizeof(cmd));
  963. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
  964. FW_CMD_REQUEST_F |
  965. FW_CMD_WRITE_F |
  966. FW_RSS_IND_TBL_CMD_VIID_V(viid));
  967. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  968. /*
  969. * Each firmware RSS command can accommodate up to 32 RSS Ingress
  970. * Queue Identifiers. These Ingress Queue IDs are packed three to
  971. * a 32-bit word as 10-bit values with the upper remaining 2 bits
  972. * reserved.
  973. */
  974. while (n > 0) {
  975. __be32 *qp = &cmd.iq0_to_iq2;
  976. int nq = min(n, 32);
  977. int ret;
  978. /*
  979. * Set up the firmware RSS command header to send the next
  980. * "nq" Ingress Queue IDs to the firmware.
  981. */
  982. cmd.niqid = cpu_to_be16(nq);
  983. cmd.startidx = cpu_to_be16(start);
  984. /*
  985. * "nq" more done for the start of the next loop.
  986. */
  987. start += nq;
  988. n -= nq;
  989. /*
  990. * While there are still Ingress Queue IDs to stuff into the
  991. * current firmware RSS command, retrieve them from the
  992. * Ingress Queue ID array and insert them into the command.
  993. */
  994. while (nq > 0) {
  995. /*
  996. * Grab up to the next 3 Ingress Queue IDs (wrapping
  997. * around the Ingress Queue ID array if necessary) and
  998. * insert them into the firmware RSS command at the
  999. * current 3-tuple position within the commad.
  1000. */
  1001. u16 qbuf[3];
  1002. u16 *qbp = qbuf;
  1003. int nqbuf = min(3, nq);
  1004. nq -= nqbuf;
  1005. qbuf[0] = qbuf[1] = qbuf[2] = 0;
  1006. while (nqbuf) {
  1007. nqbuf--;
  1008. *qbp++ = *rsp++;
  1009. if (rsp >= rsp_end)
  1010. rsp = rspq;
  1011. }
  1012. *qp++ = cpu_to_be32(FW_RSS_IND_TBL_CMD_IQ0_V(qbuf[0]) |
  1013. FW_RSS_IND_TBL_CMD_IQ1_V(qbuf[1]) |
  1014. FW_RSS_IND_TBL_CMD_IQ2_V(qbuf[2]));
  1015. }
  1016. /*
  1017. * Send this portion of the RRS table update to the firmware;
  1018. * bail out on any errors.
  1019. */
  1020. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1021. if (ret)
  1022. return ret;
  1023. }
  1024. return 0;
  1025. }
  1026. /**
  1027. * t4vf_alloc_vi - allocate a virtual interface on a port
  1028. * @adapter: the adapter
  1029. * @port_id: physical port associated with the VI
  1030. *
  1031. * Allocate a new Virtual Interface and bind it to the indicated
  1032. * physical port. Return the new Virtual Interface Identifier on
  1033. * success, or a [negative] error number on failure.
  1034. */
  1035. int t4vf_alloc_vi(struct adapter *adapter, int port_id)
  1036. {
  1037. struct fw_vi_cmd cmd, rpl;
  1038. int v;
  1039. /*
  1040. * Execute a VI command to allocate Virtual Interface and return its
  1041. * VIID.
  1042. */
  1043. memset(&cmd, 0, sizeof(cmd));
  1044. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  1045. FW_CMD_REQUEST_F |
  1046. FW_CMD_WRITE_F |
  1047. FW_CMD_EXEC_F);
  1048. cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
  1049. FW_VI_CMD_ALLOC_F);
  1050. cmd.portid_pkd = FW_VI_CMD_PORTID_V(port_id);
  1051. v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  1052. if (v)
  1053. return v;
  1054. return FW_VI_CMD_VIID_G(be16_to_cpu(rpl.type_viid));
  1055. }
  1056. /**
  1057. * t4vf_free_vi -- free a virtual interface
  1058. * @adapter: the adapter
  1059. * @viid: the virtual interface identifier
  1060. *
  1061. * Free a previously allocated Virtual Interface. Return an error on
  1062. * failure.
  1063. */
  1064. int t4vf_free_vi(struct adapter *adapter, int viid)
  1065. {
  1066. struct fw_vi_cmd cmd;
  1067. /*
  1068. * Execute a VI command to free the Virtual Interface.
  1069. */
  1070. memset(&cmd, 0, sizeof(cmd));
  1071. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  1072. FW_CMD_REQUEST_F |
  1073. FW_CMD_EXEC_F);
  1074. cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
  1075. FW_VI_CMD_FREE_F);
  1076. cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
  1077. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1078. }
  1079. /**
  1080. * t4vf_enable_vi - enable/disable a virtual interface
  1081. * @adapter: the adapter
  1082. * @viid: the Virtual Interface ID
  1083. * @rx_en: 1=enable Rx, 0=disable Rx
  1084. * @tx_en: 1=enable Tx, 0=disable Tx
  1085. *
  1086. * Enables/disables a virtual interface.
  1087. */
  1088. int t4vf_enable_vi(struct adapter *adapter, unsigned int viid,
  1089. bool rx_en, bool tx_en)
  1090. {
  1091. struct fw_vi_enable_cmd cmd;
  1092. memset(&cmd, 0, sizeof(cmd));
  1093. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  1094. FW_CMD_REQUEST_F |
  1095. FW_CMD_EXEC_F |
  1096. FW_VI_ENABLE_CMD_VIID_V(viid));
  1097. cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
  1098. FW_VI_ENABLE_CMD_EEN_V(tx_en) |
  1099. FW_LEN16(cmd));
  1100. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1101. }
  1102. /**
  1103. * t4vf_identify_port - identify a VI's port by blinking its LED
  1104. * @adapter: the adapter
  1105. * @viid: the Virtual Interface ID
  1106. * @nblinks: how many times to blink LED at 2.5 Hz
  1107. *
  1108. * Identifies a VI's port by blinking its LED.
  1109. */
  1110. int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
  1111. unsigned int nblinks)
  1112. {
  1113. struct fw_vi_enable_cmd cmd;
  1114. memset(&cmd, 0, sizeof(cmd));
  1115. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  1116. FW_CMD_REQUEST_F |
  1117. FW_CMD_EXEC_F |
  1118. FW_VI_ENABLE_CMD_VIID_V(viid));
  1119. cmd.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F |
  1120. FW_LEN16(cmd));
  1121. cmd.blinkdur = cpu_to_be16(nblinks);
  1122. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1123. }
  1124. /**
  1125. * t4vf_set_rxmode - set Rx properties of a virtual interface
  1126. * @adapter: the adapter
  1127. * @viid: the VI id
  1128. * @mtu: the new MTU or -1 for no change
  1129. * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
  1130. * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
  1131. * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  1132. * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
  1133. * -1 no change
  1134. *
  1135. * Sets Rx properties of a virtual interface.
  1136. */
  1137. int t4vf_set_rxmode(struct adapter *adapter, unsigned int viid,
  1138. int mtu, int promisc, int all_multi, int bcast, int vlanex,
  1139. bool sleep_ok)
  1140. {
  1141. struct fw_vi_rxmode_cmd cmd;
  1142. /* convert to FW values */
  1143. if (mtu < 0)
  1144. mtu = FW_VI_RXMODE_CMD_MTU_M;
  1145. if (promisc < 0)
  1146. promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
  1147. if (all_multi < 0)
  1148. all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
  1149. if (bcast < 0)
  1150. bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
  1151. if (vlanex < 0)
  1152. vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
  1153. memset(&cmd, 0, sizeof(cmd));
  1154. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
  1155. FW_CMD_REQUEST_F |
  1156. FW_CMD_WRITE_F |
  1157. FW_VI_RXMODE_CMD_VIID_V(viid));
  1158. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  1159. cmd.mtu_to_vlanexen =
  1160. cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
  1161. FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
  1162. FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
  1163. FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
  1164. FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
  1165. return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
  1166. }
  1167. /**
  1168. * t4vf_alloc_mac_filt - allocates exact-match filters for MAC addresses
  1169. * @adapter: the adapter
  1170. * @viid: the Virtual Interface Identifier
  1171. * @free: if true any existing filters for this VI id are first removed
  1172. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  1173. * @addr: the MAC address(es)
  1174. * @idx: where to store the index of each allocated filter
  1175. * @hash: pointer to hash address filter bitmap
  1176. * @sleep_ok: call is allowed to sleep
  1177. *
  1178. * Allocates an exact-match filter for each of the supplied addresses and
  1179. * sets it to the corresponding address. If @idx is not %NULL it should
  1180. * have at least @naddr entries, each of which will be set to the index of
  1181. * the filter allocated for the corresponding MAC address. If a filter
  1182. * could not be allocated for an address its index is set to 0xffff.
  1183. * If @hash is not %NULL addresses that fail to allocate an exact filter
  1184. * are hashed and update the hash filter bitmap pointed at by @hash.
  1185. *
  1186. * Returns a negative error number or the number of filters allocated.
  1187. */
  1188. int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
  1189. unsigned int naddr, const u8 **addr, u16 *idx,
  1190. u64 *hash, bool sleep_ok)
  1191. {
  1192. int offset, ret = 0;
  1193. unsigned nfilters = 0;
  1194. unsigned int rem = naddr;
  1195. struct fw_vi_mac_cmd cmd, rpl;
  1196. unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
  1197. if (naddr > max_naddr)
  1198. return -EINVAL;
  1199. for (offset = 0; offset < naddr; /**/) {
  1200. unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
  1201. ? rem
  1202. : ARRAY_SIZE(cmd.u.exact));
  1203. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1204. u.exact[fw_naddr]), 16);
  1205. struct fw_vi_mac_exact *p;
  1206. int i;
  1207. memset(&cmd, 0, sizeof(cmd));
  1208. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1209. FW_CMD_REQUEST_F |
  1210. FW_CMD_WRITE_F |
  1211. (free ? FW_CMD_EXEC_F : 0) |
  1212. FW_VI_MAC_CMD_VIID_V(viid));
  1213. cmd.freemacs_to_len16 =
  1214. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
  1215. FW_CMD_LEN16_V(len16));
  1216. for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
  1217. p->valid_to_idx = cpu_to_be16(
  1218. FW_VI_MAC_CMD_VALID_F |
  1219. FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
  1220. memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
  1221. }
  1222. ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
  1223. sleep_ok);
  1224. if (ret && ret != -ENOMEM)
  1225. break;
  1226. for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
  1227. u16 index = FW_VI_MAC_CMD_IDX_G(
  1228. be16_to_cpu(p->valid_to_idx));
  1229. if (idx)
  1230. idx[offset+i] =
  1231. (index >= max_naddr
  1232. ? 0xffff
  1233. : index);
  1234. if (index < max_naddr)
  1235. nfilters++;
  1236. else if (hash)
  1237. *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
  1238. }
  1239. free = false;
  1240. offset += fw_naddr;
  1241. rem -= fw_naddr;
  1242. }
  1243. /*
  1244. * If there were no errors or we merely ran out of room in our MAC
  1245. * address arena, return the number of filters actually written.
  1246. */
  1247. if (ret == 0 || ret == -ENOMEM)
  1248. ret = nfilters;
  1249. return ret;
  1250. }
  1251. /**
  1252. * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses
  1253. * @adapter: the adapter
  1254. * @viid: the VI id
  1255. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  1256. * @addr: the MAC address(es)
  1257. * @sleep_ok: call is allowed to sleep
  1258. *
  1259. * Frees the exact-match filter for each of the supplied addresses
  1260. *
  1261. * Returns a negative error number or the number of filters freed.
  1262. */
  1263. int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid,
  1264. unsigned int naddr, const u8 **addr, bool sleep_ok)
  1265. {
  1266. int offset, ret = 0;
  1267. struct fw_vi_mac_cmd cmd;
  1268. unsigned int nfilters = 0;
  1269. unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
  1270. unsigned int rem = naddr;
  1271. if (naddr > max_naddr)
  1272. return -EINVAL;
  1273. for (offset = 0; offset < (int)naddr ; /**/) {
  1274. unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ?
  1275. rem : ARRAY_SIZE(cmd.u.exact));
  1276. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1277. u.exact[fw_naddr]), 16);
  1278. struct fw_vi_mac_exact *p;
  1279. int i;
  1280. memset(&cmd, 0, sizeof(cmd));
  1281. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1282. FW_CMD_REQUEST_F |
  1283. FW_CMD_WRITE_F |
  1284. FW_CMD_EXEC_V(0) |
  1285. FW_VI_MAC_CMD_VIID_V(viid));
  1286. cmd.freemacs_to_len16 =
  1287. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
  1288. FW_CMD_LEN16_V(len16));
  1289. for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) {
  1290. p->valid_to_idx = cpu_to_be16(
  1291. FW_VI_MAC_CMD_VALID_F |
  1292. FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
  1293. memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
  1294. }
  1295. ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd,
  1296. sleep_ok);
  1297. if (ret)
  1298. break;
  1299. for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
  1300. u16 index = FW_VI_MAC_CMD_IDX_G(
  1301. be16_to_cpu(p->valid_to_idx));
  1302. if (index < max_naddr)
  1303. nfilters++;
  1304. }
  1305. offset += fw_naddr;
  1306. rem -= fw_naddr;
  1307. }
  1308. if (ret == 0)
  1309. ret = nfilters;
  1310. return ret;
  1311. }
  1312. /**
  1313. * t4vf_change_mac - modifies the exact-match filter for a MAC address
  1314. * @adapter: the adapter
  1315. * @viid: the Virtual Interface ID
  1316. * @idx: index of existing filter for old value of MAC address, or -1
  1317. * @addr: the new MAC address value
  1318. * @persist: if idx < 0, the new MAC allocation should be persistent
  1319. *
  1320. * Modifies an exact-match filter and sets it to the new MAC address.
  1321. * Note that in general it is not possible to modify the value of a given
  1322. * filter so the generic way to modify an address filter is to free the
  1323. * one being used by the old address value and allocate a new filter for
  1324. * the new address value. @idx can be -1 if the address is a new
  1325. * addition.
  1326. *
  1327. * Returns a negative error number or the index of the filter with the new
  1328. * MAC value.
  1329. */
  1330. int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
  1331. int idx, const u8 *addr, bool persist)
  1332. {
  1333. int ret;
  1334. struct fw_vi_mac_cmd cmd, rpl;
  1335. struct fw_vi_mac_exact *p = &cmd.u.exact[0];
  1336. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1337. u.exact[1]), 16);
  1338. unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
  1339. /*
  1340. * If this is a new allocation, determine whether it should be
  1341. * persistent (across a "freemacs" operation) or not.
  1342. */
  1343. if (idx < 0)
  1344. idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
  1345. memset(&cmd, 0, sizeof(cmd));
  1346. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1347. FW_CMD_REQUEST_F |
  1348. FW_CMD_WRITE_F |
  1349. FW_VI_MAC_CMD_VIID_V(viid));
  1350. cmd.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  1351. p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  1352. FW_VI_MAC_CMD_IDX_V(idx));
  1353. memcpy(p->macaddr, addr, sizeof(p->macaddr));
  1354. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
  1355. if (ret == 0) {
  1356. p = &rpl.u.exact[0];
  1357. ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
  1358. if (ret >= max_mac_addr)
  1359. ret = -ENOMEM;
  1360. }
  1361. return ret;
  1362. }
  1363. /**
  1364. * t4vf_set_addr_hash - program the MAC inexact-match hash filter
  1365. * @adapter: the adapter
  1366. * @viid: the Virtual Interface Identifier
  1367. * @ucast: whether the hash filter should also match unicast addresses
  1368. * @vec: the value to be written to the hash filter
  1369. * @sleep_ok: call is allowed to sleep
  1370. *
  1371. * Sets the 64-bit inexact-match hash filter for a virtual interface.
  1372. */
  1373. int t4vf_set_addr_hash(struct adapter *adapter, unsigned int viid,
  1374. bool ucast, u64 vec, bool sleep_ok)
  1375. {
  1376. struct fw_vi_mac_cmd cmd;
  1377. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  1378. u.exact[0]), 16);
  1379. memset(&cmd, 0, sizeof(cmd));
  1380. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  1381. FW_CMD_REQUEST_F |
  1382. FW_CMD_WRITE_F |
  1383. FW_VI_ENABLE_CMD_VIID_V(viid));
  1384. cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
  1385. FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
  1386. FW_CMD_LEN16_V(len16));
  1387. cmd.u.hash.hashvec = cpu_to_be64(vec);
  1388. return t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), NULL, sleep_ok);
  1389. }
  1390. /**
  1391. * t4vf_get_port_stats - collect "port" statistics
  1392. * @adapter: the adapter
  1393. * @pidx: the port index
  1394. * @s: the stats structure to fill
  1395. *
  1396. * Collect statistics for the "port"'s Virtual Interface.
  1397. */
  1398. int t4vf_get_port_stats(struct adapter *adapter, int pidx,
  1399. struct t4vf_port_stats *s)
  1400. {
  1401. struct port_info *pi = adap2pinfo(adapter, pidx);
  1402. struct fw_vi_stats_vf fwstats;
  1403. unsigned int rem = VI_VF_NUM_STATS;
  1404. __be64 *fwsp = (__be64 *)&fwstats;
  1405. /*
  1406. * Grab the Virtual Interface statistics a chunk at a time via mailbox
  1407. * commands. We could use a Work Request and get all of them at once
  1408. * but that's an asynchronous interface which is awkward to use.
  1409. */
  1410. while (rem) {
  1411. unsigned int ix = VI_VF_NUM_STATS - rem;
  1412. unsigned int nstats = min(6U, rem);
  1413. struct fw_vi_stats_cmd cmd, rpl;
  1414. size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
  1415. sizeof(struct fw_vi_stats_ctl));
  1416. size_t len16 = DIV_ROUND_UP(len, 16);
  1417. int ret;
  1418. memset(&cmd, 0, sizeof(cmd));
  1419. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_STATS_CMD) |
  1420. FW_VI_STATS_CMD_VIID_V(pi->viid) |
  1421. FW_CMD_REQUEST_F |
  1422. FW_CMD_READ_F);
  1423. cmd.retval_len16 = cpu_to_be32(FW_CMD_LEN16_V(len16));
  1424. cmd.u.ctl.nstats_ix =
  1425. cpu_to_be16(FW_VI_STATS_CMD_IX_V(ix) |
  1426. FW_VI_STATS_CMD_NSTATS_V(nstats));
  1427. ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
  1428. if (ret)
  1429. return ret;
  1430. memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
  1431. rem -= nstats;
  1432. fwsp += nstats;
  1433. }
  1434. /*
  1435. * Translate firmware statistics into host native statistics.
  1436. */
  1437. s->tx_bcast_bytes = be64_to_cpu(fwstats.tx_bcast_bytes);
  1438. s->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
  1439. s->tx_mcast_bytes = be64_to_cpu(fwstats.tx_mcast_bytes);
  1440. s->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
  1441. s->tx_ucast_bytes = be64_to_cpu(fwstats.tx_ucast_bytes);
  1442. s->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
  1443. s->tx_drop_frames = be64_to_cpu(fwstats.tx_drop_frames);
  1444. s->tx_offload_bytes = be64_to_cpu(fwstats.tx_offload_bytes);
  1445. s->tx_offload_frames = be64_to_cpu(fwstats.tx_offload_frames);
  1446. s->rx_bcast_bytes = be64_to_cpu(fwstats.rx_bcast_bytes);
  1447. s->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
  1448. s->rx_mcast_bytes = be64_to_cpu(fwstats.rx_mcast_bytes);
  1449. s->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
  1450. s->rx_ucast_bytes = be64_to_cpu(fwstats.rx_ucast_bytes);
  1451. s->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
  1452. s->rx_err_frames = be64_to_cpu(fwstats.rx_err_frames);
  1453. return 0;
  1454. }
  1455. /**
  1456. * t4vf_iq_free - free an ingress queue and its free lists
  1457. * @adapter: the adapter
  1458. * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
  1459. * @iqid: ingress queue ID
  1460. * @fl0id: FL0 queue ID or 0xffff if no attached FL0
  1461. * @fl1id: FL1 queue ID or 0xffff if no attached FL1
  1462. *
  1463. * Frees an ingress queue and its associated free lists, if any.
  1464. */
  1465. int t4vf_iq_free(struct adapter *adapter, unsigned int iqtype,
  1466. unsigned int iqid, unsigned int fl0id, unsigned int fl1id)
  1467. {
  1468. struct fw_iq_cmd cmd;
  1469. memset(&cmd, 0, sizeof(cmd));
  1470. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
  1471. FW_CMD_REQUEST_F |
  1472. FW_CMD_EXEC_F);
  1473. cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F |
  1474. FW_LEN16(cmd));
  1475. cmd.type_to_iqandstindex =
  1476. cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
  1477. cmd.iqid = cpu_to_be16(iqid);
  1478. cmd.fl0id = cpu_to_be16(fl0id);
  1479. cmd.fl1id = cpu_to_be16(fl1id);
  1480. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1481. }
  1482. /**
  1483. * t4vf_eth_eq_free - free an Ethernet egress queue
  1484. * @adapter: the adapter
  1485. * @eqid: egress queue ID
  1486. *
  1487. * Frees an Ethernet egress queue.
  1488. */
  1489. int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
  1490. {
  1491. struct fw_eq_eth_cmd cmd;
  1492. memset(&cmd, 0, sizeof(cmd));
  1493. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
  1494. FW_CMD_REQUEST_F |
  1495. FW_CMD_EXEC_F);
  1496. cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F |
  1497. FW_LEN16(cmd));
  1498. cmd.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
  1499. return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
  1500. }
  1501. /**
  1502. * t4vf_handle_fw_rpl - process a firmware reply message
  1503. * @adapter: the adapter
  1504. * @rpl: start of the firmware message
  1505. *
  1506. * Processes a firmware message, such as link state change messages.
  1507. */
  1508. int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
  1509. {
  1510. const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
  1511. u8 opcode = FW_CMD_OP_G(be32_to_cpu(cmd_hdr->hi));
  1512. switch (opcode) {
  1513. case FW_PORT_CMD: {
  1514. /*
  1515. * Link/module state change message.
  1516. */
  1517. const struct fw_port_cmd *port_cmd =
  1518. (const struct fw_port_cmd *)rpl;
  1519. u32 stat, mod;
  1520. int action, port_id, link_ok, speed, fc, pidx;
  1521. /*
  1522. * Extract various fields from port status change message.
  1523. */
  1524. action = FW_PORT_CMD_ACTION_G(
  1525. be32_to_cpu(port_cmd->action_to_len16));
  1526. if (action != FW_PORT_ACTION_GET_PORT_INFO) {
  1527. dev_err(adapter->pdev_dev,
  1528. "Unknown firmware PORT reply action %x\n",
  1529. action);
  1530. break;
  1531. }
  1532. port_id = FW_PORT_CMD_PORTID_G(
  1533. be32_to_cpu(port_cmd->op_to_portid));
  1534. stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
  1535. link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
  1536. speed = 0;
  1537. fc = 0;
  1538. if (stat & FW_PORT_CMD_RXPAUSE_F)
  1539. fc |= PAUSE_RX;
  1540. if (stat & FW_PORT_CMD_TXPAUSE_F)
  1541. fc |= PAUSE_TX;
  1542. if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
  1543. speed = 100;
  1544. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
  1545. speed = 1000;
  1546. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
  1547. speed = 10000;
  1548. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
  1549. speed = 25000;
  1550. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
  1551. speed = 40000;
  1552. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
  1553. speed = 100000;
  1554. /*
  1555. * Scan all of our "ports" (Virtual Interfaces) looking for
  1556. * those bound to the physical port which has changed. If
  1557. * our recorded state doesn't match the current state,
  1558. * signal that change to the OS code.
  1559. */
  1560. for_each_port(adapter, pidx) {
  1561. struct port_info *pi = adap2pinfo(adapter, pidx);
  1562. struct link_config *lc;
  1563. if (pi->port_id != port_id)
  1564. continue;
  1565. lc = &pi->link_cfg;
  1566. mod = FW_PORT_CMD_MODTYPE_G(stat);
  1567. if (mod != pi->mod_type) {
  1568. pi->mod_type = mod;
  1569. t4vf_os_portmod_changed(adapter, pidx);
  1570. }
  1571. if (link_ok != lc->link_ok || speed != lc->speed ||
  1572. fc != lc->fc) {
  1573. /* something changed */
  1574. lc->link_ok = link_ok;
  1575. lc->speed = speed;
  1576. lc->fc = fc;
  1577. lc->supported =
  1578. be16_to_cpu(port_cmd->u.info.pcap);
  1579. lc->lp_advertising =
  1580. be16_to_cpu(port_cmd->u.info.lpacap);
  1581. t4vf_os_link_changed(adapter, pidx, link_ok);
  1582. }
  1583. }
  1584. break;
  1585. }
  1586. default:
  1587. dev_err(adapter->pdev_dev, "Unknown firmware reply %X\n",
  1588. opcode);
  1589. }
  1590. return 0;
  1591. }
  1592. /**
  1593. */
  1594. int t4vf_prep_adapter(struct adapter *adapter)
  1595. {
  1596. int err;
  1597. unsigned int chipid;
  1598. /* Wait for the device to become ready before proceeding ...
  1599. */
  1600. err = t4vf_wait_dev_ready(adapter);
  1601. if (err)
  1602. return err;
  1603. /* Default port and clock for debugging in case we can't reach
  1604. * firmware.
  1605. */
  1606. adapter->params.nports = 1;
  1607. adapter->params.vfres.pmask = 1;
  1608. adapter->params.vpd.cclk = 50000;
  1609. adapter->params.chip = 0;
  1610. switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
  1611. case CHELSIO_T4:
  1612. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
  1613. adapter->params.arch.sge_fl_db = DBPRIO_F;
  1614. adapter->params.arch.mps_tcam_size =
  1615. NUM_MPS_CLS_SRAM_L_INSTANCES;
  1616. break;
  1617. case CHELSIO_T5:
  1618. chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
  1619. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
  1620. adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
  1621. adapter->params.arch.mps_tcam_size =
  1622. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  1623. break;
  1624. case CHELSIO_T6:
  1625. chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
  1626. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
  1627. adapter->params.arch.sge_fl_db = 0;
  1628. adapter->params.arch.mps_tcam_size =
  1629. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  1630. break;
  1631. }
  1632. return 0;
  1633. }
  1634. /**
  1635. * t4vf_get_vf_mac_acl - Get the MAC address to be set to
  1636. * the VI of this VF.
  1637. * @adapter: The adapter
  1638. * @pf: The pf associated with vf
  1639. * @naddr: the number of ACL MAC addresses returned in addr
  1640. * @addr: Placeholder for MAC addresses
  1641. *
  1642. * Find the MAC address to be set to the VF's VI. The requested MAC address
  1643. * is from the host OS via callback in the PF driver.
  1644. */
  1645. int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
  1646. unsigned int *naddr, u8 *addr)
  1647. {
  1648. struct fw_acl_mac_cmd cmd;
  1649. int ret;
  1650. memset(&cmd, 0, sizeof(cmd));
  1651. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
  1652. FW_CMD_REQUEST_F |
  1653. FW_CMD_READ_F);
  1654. cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
  1655. ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
  1656. if (ret)
  1657. return ret;
  1658. if (cmd.nmac < *naddr)
  1659. *naddr = cmd.nmac;
  1660. switch (pf) {
  1661. case 3:
  1662. memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
  1663. break;
  1664. case 2:
  1665. memcpy(addr, cmd.macaddr2, sizeof(cmd.macaddr2));
  1666. break;
  1667. case 1:
  1668. memcpy(addr, cmd.macaddr1, sizeof(cmd.macaddr1));
  1669. break;
  1670. case 0:
  1671. memcpy(addr, cmd.macaddr0, sizeof(cmd.macaddr0));
  1672. break;
  1673. }
  1674. return ret;
  1675. }