mcg.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. struct mlx4_mgm {
  42. __be32 next_gid_index;
  43. __be32 members_count;
  44. u32 reserved[2];
  45. u8 gid[16];
  46. __be32 qp[MLX4_MAX_QP_PER_MGM];
  47. };
  48. int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  49. {
  50. return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
  51. }
  52. int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  53. {
  54. return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  55. }
  56. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  57. struct mlx4_cmd_mailbox *mailbox)
  58. {
  59. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  60. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  61. }
  62. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  63. struct mlx4_cmd_mailbox *mailbox)
  64. {
  65. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  66. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  67. }
  68. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  69. struct mlx4_cmd_mailbox *mailbox)
  70. {
  71. u32 in_mod;
  72. in_mod = (u32) port << 16 | steer << 1;
  73. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  74. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  75. MLX4_CMD_NATIVE);
  76. }
  77. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  78. u16 *hash, u8 op_mod)
  79. {
  80. u64 imm;
  81. int err;
  82. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  83. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  84. MLX4_CMD_NATIVE);
  85. if (!err)
  86. *hash = imm;
  87. return err;
  88. }
  89. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  90. enum mlx4_steer_type steer,
  91. u32 qpn)
  92. {
  93. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  94. struct mlx4_promisc_qp *pqp;
  95. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  96. if (pqp->qpn == qpn)
  97. return pqp;
  98. }
  99. /* not found */
  100. return NULL;
  101. }
  102. /*
  103. * Add new entry to steering data structure.
  104. * All promisc QPs should be added as well
  105. */
  106. static int new_steering_entry(struct mlx4_dev *dev, u8 port,
  107. enum mlx4_steer_type steer,
  108. unsigned int index, u32 qpn)
  109. {
  110. struct mlx4_steer *s_steer;
  111. struct mlx4_cmd_mailbox *mailbox;
  112. struct mlx4_mgm *mgm;
  113. u32 members_count;
  114. struct mlx4_steer_index *new_entry;
  115. struct mlx4_promisc_qp *pqp;
  116. struct mlx4_promisc_qp *dqp = NULL;
  117. u32 prot;
  118. int err;
  119. s_steer = &mlx4_priv(dev)->steer[port - 1];
  120. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  121. if (!new_entry)
  122. return -ENOMEM;
  123. INIT_LIST_HEAD(&new_entry->duplicates);
  124. new_entry->index = index;
  125. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  126. /* If the given qpn is also a promisc qp,
  127. * it should be inserted to duplicates list
  128. */
  129. pqp = get_promisc_qp(dev, 0, steer, qpn);
  130. if (pqp) {
  131. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  132. if (!dqp) {
  133. err = -ENOMEM;
  134. goto out_alloc;
  135. }
  136. dqp->qpn = qpn;
  137. list_add_tail(&dqp->list, &new_entry->duplicates);
  138. }
  139. /* if no promisc qps for this vep, we are done */
  140. if (list_empty(&s_steer->promisc_qps[steer]))
  141. return 0;
  142. /* now need to add all the promisc qps to the new
  143. * steering entry, as they should also receive the packets
  144. * destined to this address */
  145. mailbox = mlx4_alloc_cmd_mailbox(dev);
  146. if (IS_ERR(mailbox)) {
  147. err = -ENOMEM;
  148. goto out_alloc;
  149. }
  150. mgm = mailbox->buf;
  151. err = mlx4_READ_ENTRY(dev, index, mailbox);
  152. if (err)
  153. goto out_mailbox;
  154. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  155. prot = be32_to_cpu(mgm->members_count) >> 30;
  156. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  157. /* don't add already existing qpn */
  158. if (pqp->qpn == qpn)
  159. continue;
  160. if (members_count == dev->caps.num_qp_per_mgm) {
  161. /* out of space */
  162. err = -ENOMEM;
  163. goto out_mailbox;
  164. }
  165. /* add the qpn */
  166. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  167. }
  168. /* update the qps count and update the entry with all the promisc qps*/
  169. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  170. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  171. out_mailbox:
  172. mlx4_free_cmd_mailbox(dev, mailbox);
  173. if (!err)
  174. return 0;
  175. out_alloc:
  176. if (dqp) {
  177. list_del(&dqp->list);
  178. kfree(dqp);
  179. }
  180. list_del(&new_entry->list);
  181. kfree(new_entry);
  182. return err;
  183. }
  184. /* update the data structures with existing steering entry */
  185. static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
  186. enum mlx4_steer_type steer,
  187. unsigned int index, u32 qpn)
  188. {
  189. struct mlx4_steer *s_steer;
  190. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  191. struct mlx4_promisc_qp *pqp;
  192. struct mlx4_promisc_qp *dqp;
  193. s_steer = &mlx4_priv(dev)->steer[port - 1];
  194. pqp = get_promisc_qp(dev, 0, steer, qpn);
  195. if (!pqp)
  196. return 0; /* nothing to do */
  197. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  198. if (tmp_entry->index == index) {
  199. entry = tmp_entry;
  200. break;
  201. }
  202. }
  203. if (unlikely(!entry)) {
  204. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  205. return -EINVAL;
  206. }
  207. /* the given qpn is listed as a promisc qpn
  208. * we need to add it as a duplicate to this entry
  209. * for future references */
  210. list_for_each_entry(dqp, &entry->duplicates, list) {
  211. if (qpn == pqp->qpn)
  212. return 0; /* qp is already duplicated */
  213. }
  214. /* add the qp as a duplicate on this index */
  215. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  216. if (!dqp)
  217. return -ENOMEM;
  218. dqp->qpn = qpn;
  219. list_add_tail(&dqp->list, &entry->duplicates);
  220. return 0;
  221. }
  222. /* Check whether a qpn is a duplicate on steering entry
  223. * If so, it should not be removed from mgm */
  224. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
  225. enum mlx4_steer_type steer,
  226. unsigned int index, u32 qpn)
  227. {
  228. struct mlx4_steer *s_steer;
  229. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  230. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  231. s_steer = &mlx4_priv(dev)->steer[port - 1];
  232. /* if qp is not promisc, it cannot be duplicated */
  233. if (!get_promisc_qp(dev, 0, steer, qpn))
  234. return false;
  235. /* The qp is promisc qp so it is a duplicate on this index
  236. * Find the index entry, and remove the duplicate */
  237. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  238. if (tmp_entry->index == index) {
  239. entry = tmp_entry;
  240. break;
  241. }
  242. }
  243. if (unlikely(!entry)) {
  244. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  245. return false;
  246. }
  247. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  248. if (dqp->qpn == qpn) {
  249. list_del(&dqp->list);
  250. kfree(dqp);
  251. }
  252. }
  253. return true;
  254. }
  255. /* I a steering entry contains only promisc QPs, it can be removed. */
  256. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
  257. enum mlx4_steer_type steer,
  258. unsigned int index, u32 tqpn)
  259. {
  260. struct mlx4_steer *s_steer;
  261. struct mlx4_cmd_mailbox *mailbox;
  262. struct mlx4_mgm *mgm;
  263. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  264. u32 qpn;
  265. u32 members_count;
  266. bool ret = false;
  267. int i;
  268. s_steer = &mlx4_priv(dev)->steer[port - 1];
  269. mailbox = mlx4_alloc_cmd_mailbox(dev);
  270. if (IS_ERR(mailbox))
  271. return false;
  272. mgm = mailbox->buf;
  273. if (mlx4_READ_ENTRY(dev, index, mailbox))
  274. goto out;
  275. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  276. for (i = 0; i < members_count; i++) {
  277. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  278. if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
  279. /* the qp is not promisc, the entry can't be removed */
  280. goto out;
  281. }
  282. }
  283. /* All the qps currently registered for this entry are promiscuous,
  284. * Checking for duplicates */
  285. ret = true;
  286. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  287. if (entry->index == index) {
  288. if (list_empty(&entry->duplicates)) {
  289. list_del(&entry->list);
  290. kfree(entry);
  291. } else {
  292. /* This entry contains duplicates so it shouldn't be removed */
  293. ret = false;
  294. goto out;
  295. }
  296. }
  297. }
  298. out:
  299. mlx4_free_cmd_mailbox(dev, mailbox);
  300. return ret;
  301. }
  302. static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
  303. enum mlx4_steer_type steer, u32 qpn)
  304. {
  305. struct mlx4_steer *s_steer;
  306. struct mlx4_cmd_mailbox *mailbox;
  307. struct mlx4_mgm *mgm;
  308. struct mlx4_steer_index *entry;
  309. struct mlx4_promisc_qp *pqp;
  310. struct mlx4_promisc_qp *dqp;
  311. u32 members_count;
  312. u32 prot;
  313. int i;
  314. bool found;
  315. int last_index;
  316. int err;
  317. struct mlx4_priv *priv = mlx4_priv(dev);
  318. s_steer = &mlx4_priv(dev)->steer[port - 1];
  319. mutex_lock(&priv->mcg_table.mutex);
  320. if (get_promisc_qp(dev, 0, steer, qpn)) {
  321. err = 0; /* Noting to do, already exists */
  322. goto out_mutex;
  323. }
  324. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  325. if (!pqp) {
  326. err = -ENOMEM;
  327. goto out_mutex;
  328. }
  329. pqp->qpn = qpn;
  330. mailbox = mlx4_alloc_cmd_mailbox(dev);
  331. if (IS_ERR(mailbox)) {
  332. err = -ENOMEM;
  333. goto out_alloc;
  334. }
  335. mgm = mailbox->buf;
  336. /* the promisc qp needs to be added for each one of the steering
  337. * entries, if it already exists, needs to be added as a duplicate
  338. * for this entry */
  339. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  340. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  341. if (err)
  342. goto out_mailbox;
  343. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  344. prot = be32_to_cpu(mgm->members_count) >> 30;
  345. found = false;
  346. for (i = 0; i < members_count; i++) {
  347. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  348. /* Entry already exists, add to duplicates */
  349. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  350. if (!dqp)
  351. goto out_mailbox;
  352. dqp->qpn = qpn;
  353. list_add_tail(&dqp->list, &entry->duplicates);
  354. found = true;
  355. }
  356. }
  357. if (!found) {
  358. /* Need to add the qpn to mgm */
  359. if (members_count == dev->caps.num_qp_per_mgm) {
  360. /* entry is full */
  361. err = -ENOMEM;
  362. goto out_mailbox;
  363. }
  364. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  365. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  366. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  367. if (err)
  368. goto out_mailbox;
  369. }
  370. last_index = entry->index;
  371. }
  372. /* add the new qpn to list of promisc qps */
  373. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  374. /* now need to add all the promisc qps to default entry */
  375. memset(mgm, 0, sizeof *mgm);
  376. members_count = 0;
  377. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  378. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  379. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  380. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  381. if (err)
  382. goto out_list;
  383. mlx4_free_cmd_mailbox(dev, mailbox);
  384. mutex_unlock(&priv->mcg_table.mutex);
  385. return 0;
  386. out_list:
  387. list_del(&pqp->list);
  388. out_mailbox:
  389. mlx4_free_cmd_mailbox(dev, mailbox);
  390. out_alloc:
  391. kfree(pqp);
  392. out_mutex:
  393. mutex_unlock(&priv->mcg_table.mutex);
  394. return err;
  395. }
  396. static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
  397. enum mlx4_steer_type steer, u32 qpn)
  398. {
  399. struct mlx4_priv *priv = mlx4_priv(dev);
  400. struct mlx4_steer *s_steer;
  401. struct mlx4_cmd_mailbox *mailbox;
  402. struct mlx4_mgm *mgm;
  403. struct mlx4_steer_index *entry;
  404. struct mlx4_promisc_qp *pqp;
  405. struct mlx4_promisc_qp *dqp;
  406. u32 members_count;
  407. bool found;
  408. bool back_to_list = false;
  409. int loc, i;
  410. int err;
  411. s_steer = &mlx4_priv(dev)->steer[port - 1];
  412. mutex_lock(&priv->mcg_table.mutex);
  413. pqp = get_promisc_qp(dev, 0, steer, qpn);
  414. if (unlikely(!pqp)) {
  415. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  416. /* nothing to do */
  417. err = 0;
  418. goto out_mutex;
  419. }
  420. /*remove from list of promisc qps */
  421. list_del(&pqp->list);
  422. /* set the default entry not to include the removed one */
  423. mailbox = mlx4_alloc_cmd_mailbox(dev);
  424. if (IS_ERR(mailbox)) {
  425. err = -ENOMEM;
  426. back_to_list = true;
  427. goto out_list;
  428. }
  429. mgm = mailbox->buf;
  430. memset(mgm, 0, sizeof *mgm);
  431. members_count = 0;
  432. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  433. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  434. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  435. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  436. if (err)
  437. goto out_mailbox;
  438. /* remove the qp from all the steering entries*/
  439. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  440. found = false;
  441. list_for_each_entry(dqp, &entry->duplicates, list) {
  442. if (dqp->qpn == qpn) {
  443. found = true;
  444. break;
  445. }
  446. }
  447. if (found) {
  448. /* a duplicate, no need to change the mgm,
  449. * only update the duplicates list */
  450. list_del(&dqp->list);
  451. kfree(dqp);
  452. } else {
  453. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  454. if (err)
  455. goto out_mailbox;
  456. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  457. for (loc = -1, i = 0; i < members_count; ++i)
  458. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  459. loc = i;
  460. mgm->members_count = cpu_to_be32(--members_count |
  461. (MLX4_PROT_ETH << 30));
  462. mgm->qp[loc] = mgm->qp[i - 1];
  463. mgm->qp[i - 1] = 0;
  464. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  465. if (err)
  466. goto out_mailbox;
  467. }
  468. }
  469. out_mailbox:
  470. mlx4_free_cmd_mailbox(dev, mailbox);
  471. out_list:
  472. if (back_to_list)
  473. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  474. else
  475. kfree(pqp);
  476. out_mutex:
  477. mutex_unlock(&priv->mcg_table.mutex);
  478. return err;
  479. }
  480. /*
  481. * Caller must hold MCG table semaphore. gid and mgm parameters must
  482. * be properly aligned for command interface.
  483. *
  484. * Returns 0 unless a firmware command error occurs.
  485. *
  486. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  487. * and *mgm holds MGM entry.
  488. *
  489. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  490. * previous entry in hash chain and *mgm holds AMGM entry.
  491. *
  492. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  493. * entry in hash chain and *mgm holds end of hash chain.
  494. */
  495. static int find_entry(struct mlx4_dev *dev, u8 port,
  496. u8 *gid, enum mlx4_protocol prot,
  497. struct mlx4_cmd_mailbox *mgm_mailbox,
  498. int *prev, int *index)
  499. {
  500. struct mlx4_cmd_mailbox *mailbox;
  501. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  502. u8 *mgid;
  503. int err;
  504. u16 hash;
  505. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  506. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  507. mailbox = mlx4_alloc_cmd_mailbox(dev);
  508. if (IS_ERR(mailbox))
  509. return -ENOMEM;
  510. mgid = mailbox->buf;
  511. memcpy(mgid, gid, 16);
  512. err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
  513. mlx4_free_cmd_mailbox(dev, mailbox);
  514. if (err)
  515. return err;
  516. if (0)
  517. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
  518. *index = hash;
  519. *prev = -1;
  520. do {
  521. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  522. if (err)
  523. return err;
  524. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  525. if (*index != hash) {
  526. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  527. err = -EINVAL;
  528. }
  529. return err;
  530. }
  531. if (!memcmp(mgm->gid, gid, 16) &&
  532. be32_to_cpu(mgm->members_count) >> 30 == prot)
  533. return err;
  534. *prev = *index;
  535. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  536. } while (*index);
  537. *index = -1;
  538. return err;
  539. }
  540. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  541. int block_mcast_loopback, enum mlx4_protocol prot,
  542. enum mlx4_steer_type steer)
  543. {
  544. struct mlx4_priv *priv = mlx4_priv(dev);
  545. struct mlx4_cmd_mailbox *mailbox;
  546. struct mlx4_mgm *mgm;
  547. u32 members_count;
  548. int index, prev;
  549. int link = 0;
  550. int i;
  551. int err;
  552. u8 port = gid[5];
  553. u8 new_entry = 0;
  554. mailbox = mlx4_alloc_cmd_mailbox(dev);
  555. if (IS_ERR(mailbox))
  556. return PTR_ERR(mailbox);
  557. mgm = mailbox->buf;
  558. mutex_lock(&priv->mcg_table.mutex);
  559. err = find_entry(dev, port, gid, prot,
  560. mailbox, &prev, &index);
  561. if (err)
  562. goto out;
  563. if (index != -1) {
  564. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  565. new_entry = 1;
  566. memcpy(mgm->gid, gid, 16);
  567. }
  568. } else {
  569. link = 1;
  570. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  571. if (index == -1) {
  572. mlx4_err(dev, "No AMGM entries left\n");
  573. err = -ENOMEM;
  574. goto out;
  575. }
  576. index += dev->caps.num_mgms;
  577. new_entry = 1;
  578. memset(mgm, 0, sizeof *mgm);
  579. memcpy(mgm->gid, gid, 16);
  580. }
  581. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  582. if (members_count == dev->caps.num_qp_per_mgm) {
  583. mlx4_err(dev, "MGM at index %x is full.\n", index);
  584. err = -ENOMEM;
  585. goto out;
  586. }
  587. for (i = 0; i < members_count; ++i)
  588. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  589. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  590. err = 0;
  591. goto out;
  592. }
  593. if (block_mcast_loopback)
  594. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  595. (1U << MGM_BLCK_LB_BIT));
  596. else
  597. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  598. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  599. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  600. if (err)
  601. goto out;
  602. if (!link)
  603. goto out;
  604. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  605. if (err)
  606. goto out;
  607. mgm->next_gid_index = cpu_to_be32(index << 6);
  608. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  609. if (err)
  610. goto out;
  611. out:
  612. if (prot == MLX4_PROT_ETH) {
  613. /* manage the steering entry for promisc mode */
  614. if (new_entry)
  615. new_steering_entry(dev, port, steer, index, qp->qpn);
  616. else
  617. existing_steering_entry(dev, port, steer,
  618. index, qp->qpn);
  619. }
  620. if (err && link && index != -1) {
  621. if (index < dev->caps.num_mgms)
  622. mlx4_warn(dev, "Got AMGM index %d < %d",
  623. index, dev->caps.num_mgms);
  624. else
  625. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  626. index - dev->caps.num_mgms);
  627. }
  628. mutex_unlock(&priv->mcg_table.mutex);
  629. mlx4_free_cmd_mailbox(dev, mailbox);
  630. return err;
  631. }
  632. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  633. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  634. {
  635. struct mlx4_priv *priv = mlx4_priv(dev);
  636. struct mlx4_cmd_mailbox *mailbox;
  637. struct mlx4_mgm *mgm;
  638. u32 members_count;
  639. int prev, index;
  640. int i, loc;
  641. int err;
  642. u8 port = gid[5];
  643. bool removed_entry = false;
  644. mailbox = mlx4_alloc_cmd_mailbox(dev);
  645. if (IS_ERR(mailbox))
  646. return PTR_ERR(mailbox);
  647. mgm = mailbox->buf;
  648. mutex_lock(&priv->mcg_table.mutex);
  649. err = find_entry(dev, port, gid, prot,
  650. mailbox, &prev, &index);
  651. if (err)
  652. goto out;
  653. if (index == -1) {
  654. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  655. err = -EINVAL;
  656. goto out;
  657. }
  658. /* if this pq is also a promisc qp, it shouldn't be removed */
  659. if (prot == MLX4_PROT_ETH &&
  660. check_duplicate_entry(dev, port, steer, index, qp->qpn))
  661. goto out;
  662. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  663. for (loc = -1, i = 0; i < members_count; ++i)
  664. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  665. loc = i;
  666. if (loc == -1) {
  667. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  668. err = -EINVAL;
  669. goto out;
  670. }
  671. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  672. mgm->qp[loc] = mgm->qp[i - 1];
  673. mgm->qp[i - 1] = 0;
  674. if (prot == MLX4_PROT_ETH)
  675. removed_entry = can_remove_steering_entry(dev, port, steer,
  676. index, qp->qpn);
  677. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  678. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  679. goto out;
  680. }
  681. /* We are going to delete the entry, members count should be 0 */
  682. mgm->members_count = cpu_to_be32((u32) prot << 30);
  683. if (prev == -1) {
  684. /* Remove entry from MGM */
  685. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  686. if (amgm_index) {
  687. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  688. if (err)
  689. goto out;
  690. } else
  691. memset(mgm->gid, 0, 16);
  692. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  693. if (err)
  694. goto out;
  695. if (amgm_index) {
  696. if (amgm_index < dev->caps.num_mgms)
  697. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  698. index, amgm_index, dev->caps.num_mgms);
  699. else
  700. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  701. amgm_index - dev->caps.num_mgms);
  702. }
  703. } else {
  704. /* Remove entry from AMGM */
  705. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  706. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  707. if (err)
  708. goto out;
  709. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  710. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  711. if (err)
  712. goto out;
  713. if (index < dev->caps.num_mgms)
  714. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  715. prev, index, dev->caps.num_mgms);
  716. else
  717. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  718. index - dev->caps.num_mgms);
  719. }
  720. out:
  721. mutex_unlock(&priv->mcg_table.mutex);
  722. mlx4_free_cmd_mailbox(dev, mailbox);
  723. return err;
  724. }
  725. static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
  726. u8 gid[16], u8 attach, u8 block_loopback,
  727. enum mlx4_protocol prot)
  728. {
  729. struct mlx4_cmd_mailbox *mailbox;
  730. int err = 0;
  731. int qpn;
  732. if (!mlx4_is_mfunc(dev))
  733. return -EBADF;
  734. mailbox = mlx4_alloc_cmd_mailbox(dev);
  735. if (IS_ERR(mailbox))
  736. return PTR_ERR(mailbox);
  737. memcpy(mailbox->buf, gid, 16);
  738. qpn = qp->qpn;
  739. qpn |= (prot << 28);
  740. if (attach && block_loopback)
  741. qpn |= (1 << 31);
  742. err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
  743. MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
  744. MLX4_CMD_WRAPPED);
  745. mlx4_free_cmd_mailbox(dev, mailbox);
  746. return err;
  747. }
  748. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  749. int block_mcast_loopback, enum mlx4_protocol prot)
  750. {
  751. if (prot == MLX4_PROT_ETH &&
  752. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  753. return 0;
  754. if (prot == MLX4_PROT_ETH)
  755. gid[7] |= (MLX4_MC_STEER << 1);
  756. if (mlx4_is_mfunc(dev))
  757. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  758. block_mcast_loopback, prot);
  759. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  760. prot, MLX4_MC_STEER);
  761. }
  762. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  763. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  764. enum mlx4_protocol prot)
  765. {
  766. if (prot == MLX4_PROT_ETH &&
  767. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  768. return 0;
  769. if (prot == MLX4_PROT_ETH)
  770. gid[7] |= (MLX4_MC_STEER << 1);
  771. if (mlx4_is_mfunc(dev))
  772. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  773. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
  774. }
  775. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  776. int mlx4_unicast_attach(struct mlx4_dev *dev,
  777. struct mlx4_qp *qp, u8 gid[16],
  778. int block_mcast_loopback, enum mlx4_protocol prot)
  779. {
  780. if (prot == MLX4_PROT_ETH &&
  781. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  782. return 0;
  783. if (prot == MLX4_PROT_ETH)
  784. gid[7] |= (MLX4_UC_STEER << 1);
  785. if (mlx4_is_mfunc(dev))
  786. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  787. block_mcast_loopback, prot);
  788. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  789. prot, MLX4_UC_STEER);
  790. }
  791. EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
  792. int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  793. u8 gid[16], enum mlx4_protocol prot)
  794. {
  795. if (prot == MLX4_PROT_ETH &&
  796. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  797. return 0;
  798. if (prot == MLX4_PROT_ETH)
  799. gid[7] |= (MLX4_UC_STEER << 1);
  800. if (mlx4_is_mfunc(dev))
  801. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  802. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
  803. }
  804. EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
  805. int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  806. struct mlx4_vhcr *vhcr,
  807. struct mlx4_cmd_mailbox *inbox,
  808. struct mlx4_cmd_mailbox *outbox,
  809. struct mlx4_cmd_info *cmd)
  810. {
  811. u32 qpn = (u32) vhcr->in_param & 0xffffffff;
  812. u8 port = vhcr->in_param >> 62;
  813. enum mlx4_steer_type steer = vhcr->in_modifier;
  814. /* Promiscuous unicast is not allowed in mfunc */
  815. if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
  816. return 0;
  817. if (vhcr->op_modifier)
  818. return add_promisc_qp(dev, port, steer, qpn);
  819. else
  820. return remove_promisc_qp(dev, port, steer, qpn);
  821. }
  822. static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
  823. enum mlx4_steer_type steer, u8 add, u8 port)
  824. {
  825. return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
  826. MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
  827. MLX4_CMD_WRAPPED);
  828. }
  829. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  830. {
  831. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  832. return 0;
  833. if (mlx4_is_mfunc(dev))
  834. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
  835. return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  836. }
  837. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  838. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  839. {
  840. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  841. return 0;
  842. if (mlx4_is_mfunc(dev))
  843. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
  844. return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  845. }
  846. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  847. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  848. {
  849. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  850. return 0;
  851. if (mlx4_is_mfunc(dev))
  852. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
  853. return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  854. }
  855. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  856. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  857. {
  858. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  859. return 0;
  860. if (mlx4_is_mfunc(dev))
  861. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
  862. return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  863. }
  864. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  865. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  866. {
  867. struct mlx4_priv *priv = mlx4_priv(dev);
  868. int err;
  869. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  870. dev->caps.num_amgms - 1, 0, 0);
  871. if (err)
  872. return err;
  873. mutex_init(&priv->mcg_table.mutex);
  874. return 0;
  875. }
  876. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  877. {
  878. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  879. }