i40iw_main.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/module.h>
  35. #include <linux/moduleparam.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/etherdevice.h>
  38. #include <linux/ip.h>
  39. #include <linux/tcp.h>
  40. #include <linux/if_vlan.h>
  41. #include <net/addrconf.h>
  42. #include "i40iw.h"
  43. #include "i40iw_register.h"
  44. #include <net/netevent.h>
  45. #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
  46. #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
  47. #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
  48. #define DRV_VERSION_MAJOR 0
  49. #define DRV_VERSION_MINOR 5
  50. #define DRV_VERSION_BUILD 123
  51. #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
  52. __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
  53. static int debug;
  54. module_param(debug, int, 0644);
  55. MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
  56. static int resource_profile;
  57. module_param(resource_profile, int, 0644);
  58. MODULE_PARM_DESC(resource_profile,
  59. "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
  60. static int max_rdma_vfs = 32;
  61. module_param(max_rdma_vfs, int, 0644);
  62. MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
  63. static int mpa_version = 2;
  64. module_param(mpa_version, int, 0644);
  65. MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
  66. MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
  67. MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
  68. MODULE_LICENSE("Dual BSD/GPL");
  69. static struct i40e_client i40iw_client;
  70. static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
  71. static LIST_HEAD(i40iw_handlers);
  72. static spinlock_t i40iw_handler_lock;
  73. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  74. u32 vf_id, u8 *msg, u16 len);
  75. static struct notifier_block i40iw_inetaddr_notifier = {
  76. .notifier_call = i40iw_inetaddr_event
  77. };
  78. static struct notifier_block i40iw_inetaddr6_notifier = {
  79. .notifier_call = i40iw_inet6addr_event
  80. };
  81. static struct notifier_block i40iw_net_notifier = {
  82. .notifier_call = i40iw_net_event
  83. };
  84. /**
  85. * i40iw_find_i40e_handler - find a handler given a client info
  86. * @ldev: pointer to a client info
  87. */
  88. static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
  89. {
  90. struct i40iw_handler *hdl;
  91. unsigned long flags;
  92. spin_lock_irqsave(&i40iw_handler_lock, flags);
  93. list_for_each_entry(hdl, &i40iw_handlers, list) {
  94. if (hdl->ldev.netdev == ldev->netdev) {
  95. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  96. return hdl;
  97. }
  98. }
  99. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  100. return NULL;
  101. }
  102. /**
  103. * i40iw_find_netdev - find a handler given a netdev
  104. * @netdev: pointer to net_device
  105. */
  106. struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
  107. {
  108. struct i40iw_handler *hdl;
  109. unsigned long flags;
  110. spin_lock_irqsave(&i40iw_handler_lock, flags);
  111. list_for_each_entry(hdl, &i40iw_handlers, list) {
  112. if (hdl->ldev.netdev == netdev) {
  113. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  114. return hdl;
  115. }
  116. }
  117. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  118. return NULL;
  119. }
  120. /**
  121. * i40iw_add_handler - add a handler to the list
  122. * @hdl: handler to be added to the handler list
  123. */
  124. static void i40iw_add_handler(struct i40iw_handler *hdl)
  125. {
  126. unsigned long flags;
  127. spin_lock_irqsave(&i40iw_handler_lock, flags);
  128. list_add(&hdl->list, &i40iw_handlers);
  129. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  130. }
  131. /**
  132. * i40iw_del_handler - delete a handler from the list
  133. * @hdl: handler to be deleted from the handler list
  134. */
  135. static int i40iw_del_handler(struct i40iw_handler *hdl)
  136. {
  137. unsigned long flags;
  138. spin_lock_irqsave(&i40iw_handler_lock, flags);
  139. list_del(&hdl->list);
  140. spin_unlock_irqrestore(&i40iw_handler_lock, flags);
  141. return 0;
  142. }
  143. /**
  144. * i40iw_enable_intr - set up device interrupts
  145. * @dev: hardware control device structure
  146. * @msix_id: id of the interrupt to be enabled
  147. */
  148. static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
  149. {
  150. u32 val;
  151. val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
  152. I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
  153. (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
  154. if (dev->is_pf)
  155. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
  156. else
  157. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
  158. }
  159. /**
  160. * i40iw_dpc - tasklet for aeq and ceq 0
  161. * @data: iwarp device
  162. */
  163. static void i40iw_dpc(unsigned long data)
  164. {
  165. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  166. if (iwdev->msix_shared)
  167. i40iw_process_ceq(iwdev, iwdev->ceqlist);
  168. i40iw_process_aeq(iwdev);
  169. i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
  170. }
  171. /**
  172. * i40iw_ceq_dpc - dpc handler for CEQ
  173. * @data: data points to CEQ
  174. */
  175. static void i40iw_ceq_dpc(unsigned long data)
  176. {
  177. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  178. struct i40iw_device *iwdev = iwceq->iwdev;
  179. i40iw_process_ceq(iwdev, iwceq);
  180. i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
  181. }
  182. /**
  183. * i40iw_irq_handler - interrupt handler for aeq and ceq0
  184. * @irq: Interrupt request number
  185. * @data: iwarp device
  186. */
  187. static irqreturn_t i40iw_irq_handler(int irq, void *data)
  188. {
  189. struct i40iw_device *iwdev = (struct i40iw_device *)data;
  190. tasklet_schedule(&iwdev->dpc_tasklet);
  191. return IRQ_HANDLED;
  192. }
  193. /**
  194. * i40iw_destroy_cqp - destroy control qp
  195. * @iwdev: iwarp device
  196. * @create_done: 1 if cqp create poll was success
  197. *
  198. * Issue destroy cqp request and
  199. * free the resources associated with the cqp
  200. */
  201. static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
  202. {
  203. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  204. struct i40iw_cqp *cqp = &iwdev->cqp;
  205. if (free_hwcqp)
  206. dev->cqp_ops->cqp_destroy(dev->cqp);
  207. i40iw_cleanup_pending_cqp_op(iwdev);
  208. i40iw_free_dma_mem(dev->hw, &cqp->sq);
  209. kfree(cqp->scratch_array);
  210. iwdev->cqp.scratch_array = NULL;
  211. kfree(cqp->cqp_requests);
  212. cqp->cqp_requests = NULL;
  213. }
  214. /**
  215. * i40iw_disable_irqs - disable device interrupts
  216. * @dev: hardware control device structure
  217. * @msic_vec: msix vector to disable irq
  218. * @dev_id: parameter to pass to free_irq (used during irq setup)
  219. *
  220. * The function is called when destroying aeq/ceq
  221. */
  222. static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
  223. struct i40iw_msix_vector *msix_vec,
  224. void *dev_id)
  225. {
  226. if (dev->is_pf)
  227. i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
  228. else
  229. i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
  230. irq_set_affinity_hint(msix_vec->irq, NULL);
  231. free_irq(msix_vec->irq, dev_id);
  232. }
  233. /**
  234. * i40iw_destroy_aeq - destroy aeq
  235. * @iwdev: iwarp device
  236. *
  237. * Issue a destroy aeq request and
  238. * free the resources associated with the aeq
  239. * The function is called during driver unload
  240. */
  241. static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
  242. {
  243. enum i40iw_status_code status = I40IW_ERR_NOT_READY;
  244. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  245. struct i40iw_aeq *aeq = &iwdev->aeq;
  246. if (!iwdev->msix_shared)
  247. i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
  248. if (iwdev->reset)
  249. goto exit;
  250. if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
  251. status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
  252. if (status)
  253. i40iw_pr_err("destroy aeq failed %d\n", status);
  254. exit:
  255. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  256. }
  257. /**
  258. * i40iw_destroy_ceq - destroy ceq
  259. * @iwdev: iwarp device
  260. * @iwceq: ceq to be destroyed
  261. *
  262. * Issue a destroy ceq request and
  263. * free the resources associated with the ceq
  264. */
  265. static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
  266. struct i40iw_ceq *iwceq)
  267. {
  268. enum i40iw_status_code status;
  269. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  270. if (iwdev->reset)
  271. goto exit;
  272. status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
  273. if (status) {
  274. i40iw_pr_err("ceq destroy command failed %d\n", status);
  275. goto exit;
  276. }
  277. status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
  278. if (status)
  279. i40iw_pr_err("ceq destroy completion failed %d\n", status);
  280. exit:
  281. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  282. }
  283. /**
  284. * i40iw_dele_ceqs - destroy all ceq's
  285. * @iwdev: iwarp device
  286. *
  287. * Go through all of the device ceq's and for each ceq
  288. * disable the ceq interrupt and destroy the ceq
  289. */
  290. static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
  291. {
  292. u32 i = 0;
  293. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  294. struct i40iw_ceq *iwceq = iwdev->ceqlist;
  295. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  296. if (iwdev->msix_shared) {
  297. i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
  298. i40iw_destroy_ceq(iwdev, iwceq);
  299. iwceq++;
  300. i++;
  301. }
  302. for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
  303. i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
  304. i40iw_destroy_ceq(iwdev, iwceq);
  305. }
  306. }
  307. /**
  308. * i40iw_destroy_ccq - destroy control cq
  309. * @iwdev: iwarp device
  310. *
  311. * Issue destroy ccq request and
  312. * free the resources associated with the ccq
  313. */
  314. static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
  315. {
  316. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  317. struct i40iw_ccq *ccq = &iwdev->ccq;
  318. enum i40iw_status_code status = 0;
  319. if (!iwdev->reset)
  320. status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
  321. if (status)
  322. i40iw_pr_err("ccq destroy failed %d\n", status);
  323. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  324. }
  325. /* types of hmc objects */
  326. static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
  327. I40IW_HMC_IW_QP,
  328. I40IW_HMC_IW_CQ,
  329. I40IW_HMC_IW_HTE,
  330. I40IW_HMC_IW_ARP,
  331. I40IW_HMC_IW_APBVT_ENTRY,
  332. I40IW_HMC_IW_MR,
  333. I40IW_HMC_IW_XF,
  334. I40IW_HMC_IW_XFFL,
  335. I40IW_HMC_IW_Q1,
  336. I40IW_HMC_IW_Q1FL,
  337. I40IW_HMC_IW_TIMER,
  338. };
  339. /**
  340. * i40iw_close_hmc_objects_type - delete hmc objects of a given type
  341. * @iwdev: iwarp device
  342. * @obj_type: the hmc object type to be deleted
  343. * @is_pf: true if the function is PF otherwise false
  344. * @reset: true if called before reset
  345. */
  346. static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
  347. enum i40iw_hmc_rsrc_type obj_type,
  348. struct i40iw_hmc_info *hmc_info,
  349. bool is_pf,
  350. bool reset)
  351. {
  352. struct i40iw_hmc_del_obj_info info;
  353. memset(&info, 0, sizeof(info));
  354. info.hmc_info = hmc_info;
  355. info.rsrc_type = obj_type;
  356. info.count = hmc_info->hmc_obj[obj_type].cnt;
  357. info.is_pf = is_pf;
  358. if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
  359. i40iw_pr_err("del obj of type %d failed\n", obj_type);
  360. }
  361. /**
  362. * i40iw_del_hmc_objects - remove all device hmc objects
  363. * @dev: iwarp device
  364. * @hmc_info: hmc_info to free
  365. * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
  366. * by PF on behalf of VF
  367. * @reset: true if called before reset
  368. */
  369. static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
  370. struct i40iw_hmc_info *hmc_info,
  371. bool is_pf,
  372. bool reset)
  373. {
  374. unsigned int i;
  375. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
  376. i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
  377. }
  378. /**
  379. * i40iw_ceq_handler - interrupt handler for ceq
  380. * @data: ceq pointer
  381. */
  382. static irqreturn_t i40iw_ceq_handler(int irq, void *data)
  383. {
  384. struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
  385. if (iwceq->irq != irq)
  386. i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
  387. tasklet_schedule(&iwceq->dpc_tasklet);
  388. return IRQ_HANDLED;
  389. }
  390. /**
  391. * i40iw_create_hmc_obj_type - create hmc object of a given type
  392. * @dev: hardware control device structure
  393. * @info: information for the hmc object to create
  394. */
  395. static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
  396. struct i40iw_hmc_create_obj_info *info)
  397. {
  398. return dev->hmc_ops->create_hmc_object(dev, info);
  399. }
  400. /**
  401. * i40iw_create_hmc_objs - create all hmc objects for the device
  402. * @iwdev: iwarp device
  403. * @is_pf: true if the function is PF otherwise false
  404. *
  405. * Create the device hmc objects and allocate hmc pages
  406. * Return 0 if successful, otherwise clean up and return error
  407. */
  408. static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
  409. bool is_pf)
  410. {
  411. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  412. struct i40iw_hmc_create_obj_info info;
  413. enum i40iw_status_code status;
  414. int i;
  415. memset(&info, 0, sizeof(info));
  416. info.hmc_info = dev->hmc_info;
  417. info.is_pf = is_pf;
  418. info.entry_type = iwdev->sd_type;
  419. for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
  420. info.rsrc_type = iw_hmc_obj_types[i];
  421. info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
  422. status = i40iw_create_hmc_obj_type(dev, &info);
  423. if (status) {
  424. i40iw_pr_err("create obj type %d status = %d\n",
  425. iw_hmc_obj_types[i], status);
  426. break;
  427. }
  428. }
  429. if (!status)
  430. return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
  431. dev->hmc_fn_id,
  432. true, true));
  433. while (i) {
  434. i--;
  435. /* destroy the hmc objects of a given type */
  436. i40iw_close_hmc_objects_type(dev,
  437. iw_hmc_obj_types[i],
  438. dev->hmc_info,
  439. is_pf,
  440. false);
  441. }
  442. return status;
  443. }
  444. /**
  445. * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
  446. * @iwdev: iwarp device
  447. * @memptr: points to the memory addresses
  448. * @size: size of memory needed
  449. * @mask: mask for the aligned memory
  450. *
  451. * Get aligned memory of the requested size and
  452. * update the memptr to point to the new aligned memory
  453. * Return 0 if successful, otherwise return no memory error
  454. */
  455. enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
  456. struct i40iw_dma_mem *memptr,
  457. u32 size,
  458. u32 mask)
  459. {
  460. unsigned long va, newva;
  461. unsigned long extra;
  462. va = (unsigned long)iwdev->obj_next.va;
  463. newva = va;
  464. if (mask)
  465. newva = ALIGN(va, (mask + 1));
  466. extra = newva - va;
  467. memptr->va = (u8 *)va + extra;
  468. memptr->pa = iwdev->obj_next.pa + extra;
  469. memptr->size = size;
  470. if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
  471. return I40IW_ERR_NO_MEMORY;
  472. iwdev->obj_next.va = memptr->va + size;
  473. iwdev->obj_next.pa = memptr->pa + size;
  474. return 0;
  475. }
  476. /**
  477. * i40iw_create_cqp - create control qp
  478. * @iwdev: iwarp device
  479. *
  480. * Return 0, if the cqp and all the resources associated with it
  481. * are successfully created, otherwise return error
  482. */
  483. static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
  484. {
  485. enum i40iw_status_code status;
  486. u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
  487. struct i40iw_dma_mem mem;
  488. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  489. struct i40iw_cqp_init_info cqp_init_info;
  490. struct i40iw_cqp *cqp = &iwdev->cqp;
  491. u16 maj_err, min_err;
  492. int i;
  493. cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
  494. if (!cqp->cqp_requests)
  495. return I40IW_ERR_NO_MEMORY;
  496. cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
  497. if (!cqp->scratch_array) {
  498. kfree(cqp->cqp_requests);
  499. return I40IW_ERR_NO_MEMORY;
  500. }
  501. dev->cqp = &cqp->sc_cqp;
  502. dev->cqp->dev = dev;
  503. memset(&cqp_init_info, 0, sizeof(cqp_init_info));
  504. status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
  505. (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
  506. I40IW_CQP_ALIGNMENT);
  507. if (status)
  508. goto exit;
  509. status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
  510. I40IW_HOST_CTX_ALIGNMENT_MASK);
  511. if (status)
  512. goto exit;
  513. dev->cqp->host_ctx_pa = mem.pa;
  514. dev->cqp->host_ctx = mem.va;
  515. /* populate the cqp init info */
  516. cqp_init_info.dev = dev;
  517. cqp_init_info.sq_size = sqsize;
  518. cqp_init_info.sq = cqp->sq.va;
  519. cqp_init_info.sq_pa = cqp->sq.pa;
  520. cqp_init_info.host_ctx_pa = mem.pa;
  521. cqp_init_info.host_ctx = mem.va;
  522. cqp_init_info.hmc_profile = iwdev->resource_profile;
  523. cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
  524. cqp_init_info.scratch_array = cqp->scratch_array;
  525. status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
  526. if (status) {
  527. i40iw_pr_err("cqp init status %d\n", status);
  528. goto exit;
  529. }
  530. status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
  531. if (status) {
  532. i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
  533. status, maj_err, min_err);
  534. goto exit;
  535. }
  536. spin_lock_init(&cqp->req_lock);
  537. INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
  538. INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
  539. /* init the waitq of the cqp_requests and add them to the list */
  540. for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
  541. init_waitqueue_head(&cqp->cqp_requests[i].waitq);
  542. list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
  543. }
  544. return 0;
  545. exit:
  546. /* clean up the created resources */
  547. i40iw_destroy_cqp(iwdev, false);
  548. return status;
  549. }
  550. /**
  551. * i40iw_create_ccq - create control cq
  552. * @iwdev: iwarp device
  553. *
  554. * Return 0, if the ccq and the resources associated with it
  555. * are successfully created, otherwise return error
  556. */
  557. static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
  558. {
  559. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  560. struct i40iw_dma_mem mem;
  561. enum i40iw_status_code status;
  562. struct i40iw_ccq_init_info info;
  563. struct i40iw_ccq *ccq = &iwdev->ccq;
  564. memset(&info, 0, sizeof(info));
  565. dev->ccq = &ccq->sc_cq;
  566. dev->ccq->dev = dev;
  567. info.dev = dev;
  568. ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
  569. ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
  570. status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
  571. ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
  572. if (status)
  573. goto exit;
  574. status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
  575. I40IW_SHADOWAREA_MASK);
  576. if (status)
  577. goto exit;
  578. ccq->sc_cq.back_cq = (void *)ccq;
  579. /* populate the ccq init info */
  580. info.cq_base = ccq->mem_cq.va;
  581. info.cq_pa = ccq->mem_cq.pa;
  582. info.num_elem = IW_CCQ_SIZE;
  583. info.shadow_area = mem.va;
  584. info.shadow_area_pa = mem.pa;
  585. info.ceqe_mask = false;
  586. info.ceq_id_valid = true;
  587. info.shadow_read_threshold = 16;
  588. status = dev->ccq_ops->ccq_init(dev->ccq, &info);
  589. if (!status)
  590. status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
  591. exit:
  592. if (status)
  593. i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
  594. return status;
  595. }
  596. /**
  597. * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
  598. * @iwdev: iwarp device
  599. * @msix_vec: interrupt vector information
  600. * @iwceq: ceq associated with the vector
  601. * @ceq_id: the id number of the iwceq
  602. *
  603. * Allocate interrupt resources and enable irq handling
  604. * Return 0 if successful, otherwise return error
  605. */
  606. static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
  607. struct i40iw_ceq *iwceq,
  608. u32 ceq_id,
  609. struct i40iw_msix_vector *msix_vec)
  610. {
  611. enum i40iw_status_code status;
  612. if (iwdev->msix_shared && !ceq_id) {
  613. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  614. status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
  615. } else {
  616. tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
  617. status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
  618. }
  619. cpumask_clear(&msix_vec->mask);
  620. cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
  621. irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
  622. if (status) {
  623. i40iw_pr_err("ceq irq config fail\n");
  624. return I40IW_ERR_CONFIG;
  625. }
  626. msix_vec->ceq_id = ceq_id;
  627. return 0;
  628. }
  629. /**
  630. * i40iw_create_ceq - create completion event queue
  631. * @iwdev: iwarp device
  632. * @iwceq: pointer to the ceq resources to be created
  633. * @ceq_id: the id number of the iwceq
  634. *
  635. * Return 0, if the ceq and the resources associated with it
  636. * are successfully created, otherwise return error
  637. */
  638. static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
  639. struct i40iw_ceq *iwceq,
  640. u32 ceq_id)
  641. {
  642. enum i40iw_status_code status;
  643. struct i40iw_ceq_init_info info;
  644. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  645. u64 scratch;
  646. memset(&info, 0, sizeof(info));
  647. info.ceq_id = ceq_id;
  648. iwceq->iwdev = iwdev;
  649. iwceq->mem.size = sizeof(struct i40iw_ceqe) *
  650. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  651. status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
  652. I40IW_CEQ_ALIGNMENT);
  653. if (status)
  654. goto exit;
  655. info.ceq_id = ceq_id;
  656. info.ceqe_base = iwceq->mem.va;
  657. info.ceqe_pa = iwceq->mem.pa;
  658. info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  659. iwceq->sc_ceq.ceq_id = ceq_id;
  660. info.dev = dev;
  661. scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
  662. status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
  663. if (!status)
  664. status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
  665. exit:
  666. if (status)
  667. i40iw_free_dma_mem(dev->hw, &iwceq->mem);
  668. return status;
  669. }
  670. void i40iw_request_reset(struct i40iw_device *iwdev)
  671. {
  672. struct i40e_info *ldev = iwdev->ldev;
  673. ldev->ops->request_reset(ldev, iwdev->client, 1);
  674. }
  675. /**
  676. * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
  677. * @iwdev: iwarp device
  678. * @ldev: i40e lan device
  679. *
  680. * Allocate a list for all device completion event queues
  681. * Create the ceq's and configure their msix interrupt vectors
  682. * Return 0, if at least one ceq is successfully set up, otherwise return error
  683. */
  684. static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
  685. struct i40e_info *ldev)
  686. {
  687. u32 i;
  688. u32 ceq_id;
  689. struct i40iw_ceq *iwceq;
  690. struct i40iw_msix_vector *msix_vec;
  691. enum i40iw_status_code status = 0;
  692. u32 num_ceqs;
  693. if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
  694. status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
  695. iwdev->iw_qvlist);
  696. if (status)
  697. goto exit;
  698. } else {
  699. status = I40IW_ERR_BAD_PTR;
  700. goto exit;
  701. }
  702. num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
  703. iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
  704. if (!iwdev->ceqlist) {
  705. status = I40IW_ERR_NO_MEMORY;
  706. goto exit;
  707. }
  708. i = (iwdev->msix_shared) ? 0 : 1;
  709. for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
  710. iwceq = &iwdev->ceqlist[ceq_id];
  711. status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
  712. if (status) {
  713. i40iw_pr_err("create ceq status = %d\n", status);
  714. break;
  715. }
  716. msix_vec = &iwdev->iw_msixtbl[i];
  717. iwceq->irq = msix_vec->irq;
  718. iwceq->msix_idx = msix_vec->idx;
  719. status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
  720. if (status) {
  721. i40iw_destroy_ceq(iwdev, iwceq);
  722. break;
  723. }
  724. i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
  725. iwdev->ceqs_count++;
  726. }
  727. exit:
  728. if (status) {
  729. if (!iwdev->ceqs_count) {
  730. kfree(iwdev->ceqlist);
  731. iwdev->ceqlist = NULL;
  732. } else {
  733. status = 0;
  734. }
  735. }
  736. return status;
  737. }
  738. /**
  739. * i40iw_configure_aeq_vector - set up the msix vector for aeq
  740. * @iwdev: iwarp device
  741. *
  742. * Allocate interrupt resources and enable irq handling
  743. * Return 0 if successful, otherwise return error
  744. */
  745. static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
  746. {
  747. struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
  748. u32 ret = 0;
  749. if (!iwdev->msix_shared) {
  750. tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
  751. ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
  752. }
  753. if (ret) {
  754. i40iw_pr_err("aeq irq config fail\n");
  755. return I40IW_ERR_CONFIG;
  756. }
  757. return 0;
  758. }
  759. /**
  760. * i40iw_create_aeq - create async event queue
  761. * @iwdev: iwarp device
  762. *
  763. * Return 0, if the aeq and the resources associated with it
  764. * are successfully created, otherwise return error
  765. */
  766. static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
  767. {
  768. enum i40iw_status_code status;
  769. struct i40iw_aeq_init_info info;
  770. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  771. struct i40iw_aeq *aeq = &iwdev->aeq;
  772. u64 scratch = 0;
  773. u32 aeq_size;
  774. aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
  775. iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
  776. memset(&info, 0, sizeof(info));
  777. aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
  778. status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
  779. I40IW_AEQ_ALIGNMENT);
  780. if (status)
  781. goto exit;
  782. info.aeqe_base = aeq->mem.va;
  783. info.aeq_elem_pa = aeq->mem.pa;
  784. info.elem_cnt = aeq_size;
  785. info.dev = dev;
  786. status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
  787. if (status)
  788. goto exit;
  789. status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
  790. if (!status)
  791. status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
  792. exit:
  793. if (status)
  794. i40iw_free_dma_mem(dev->hw, &aeq->mem);
  795. return status;
  796. }
  797. /**
  798. * i40iw_setup_aeq - set up the device aeq
  799. * @iwdev: iwarp device
  800. *
  801. * Create the aeq and configure its msix interrupt vector
  802. * Return 0 if successful, otherwise return error
  803. */
  804. static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
  805. {
  806. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  807. enum i40iw_status_code status;
  808. status = i40iw_create_aeq(iwdev);
  809. if (status)
  810. return status;
  811. status = i40iw_configure_aeq_vector(iwdev);
  812. if (status) {
  813. i40iw_destroy_aeq(iwdev);
  814. return status;
  815. }
  816. if (!iwdev->msix_shared)
  817. i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
  818. return 0;
  819. }
  820. /**
  821. * i40iw_initialize_ilq - create iwarp local queue for cm
  822. * @iwdev: iwarp device
  823. *
  824. * Return 0 if successful, otherwise return error
  825. */
  826. static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
  827. {
  828. struct i40iw_puda_rsrc_info info;
  829. enum i40iw_status_code status;
  830. memset(&info, 0, sizeof(info));
  831. info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
  832. info.cq_id = 1;
  833. info.qp_id = 0;
  834. info.count = 1;
  835. info.pd_id = 1;
  836. info.sq_size = 8192;
  837. info.rq_size = 8192;
  838. info.buf_size = 1024;
  839. info.tx_buf_cnt = 16384;
  840. info.receive = i40iw_receive_ilq;
  841. info.xmit_complete = i40iw_free_sqbuf;
  842. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  843. if (status)
  844. i40iw_pr_err("ilq create fail\n");
  845. return status;
  846. }
  847. /**
  848. * i40iw_initialize_ieq - create iwarp exception queue
  849. * @iwdev: iwarp device
  850. *
  851. * Return 0 if successful, otherwise return error
  852. */
  853. static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
  854. {
  855. struct i40iw_puda_rsrc_info info;
  856. enum i40iw_status_code status;
  857. memset(&info, 0, sizeof(info));
  858. info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
  859. info.cq_id = 2;
  860. info.qp_id = iwdev->sc_dev.exception_lan_queue;
  861. info.count = 1;
  862. info.pd_id = 2;
  863. info.sq_size = 8192;
  864. info.rq_size = 8192;
  865. info.buf_size = 2048;
  866. info.tx_buf_cnt = 16384;
  867. status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
  868. if (status)
  869. i40iw_pr_err("ieq create fail\n");
  870. return status;
  871. }
  872. /**
  873. * i40iw_hmc_setup - create hmc objects for the device
  874. * @iwdev: iwarp device
  875. *
  876. * Set up the device private memory space for the number and size of
  877. * the hmc objects and create the objects
  878. * Return 0 if successful, otherwise return error
  879. */
  880. static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
  881. {
  882. enum i40iw_status_code status;
  883. iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
  884. status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
  885. if (status)
  886. goto exit;
  887. status = i40iw_create_hmc_objs(iwdev, true);
  888. if (status)
  889. goto exit;
  890. iwdev->init_state = HMC_OBJS_CREATED;
  891. exit:
  892. return status;
  893. }
  894. /**
  895. * i40iw_del_init_mem - deallocate memory resources
  896. * @iwdev: iwarp device
  897. */
  898. static void i40iw_del_init_mem(struct i40iw_device *iwdev)
  899. {
  900. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  901. i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
  902. kfree(dev->hmc_info->sd_table.sd_entry);
  903. dev->hmc_info->sd_table.sd_entry = NULL;
  904. kfree(iwdev->mem_resources);
  905. iwdev->mem_resources = NULL;
  906. kfree(iwdev->ceqlist);
  907. iwdev->ceqlist = NULL;
  908. kfree(iwdev->iw_msixtbl);
  909. iwdev->iw_msixtbl = NULL;
  910. kfree(iwdev->hmc_info_mem);
  911. iwdev->hmc_info_mem = NULL;
  912. }
  913. /**
  914. * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
  915. * @iwdev: iwarp device
  916. * @idx: the index of the mac ip address to delete
  917. */
  918. static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
  919. {
  920. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  921. struct i40iw_cqp_request *cqp_request;
  922. struct cqp_commands_info *cqp_info;
  923. enum i40iw_status_code status = 0;
  924. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  925. if (!cqp_request) {
  926. i40iw_pr_err("cqp_request memory failed\n");
  927. return;
  928. }
  929. cqp_info = &cqp_request->info;
  930. cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
  931. cqp_info->post_sq = 1;
  932. cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  933. cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  934. cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
  935. cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
  936. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  937. if (status)
  938. i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
  939. }
  940. /**
  941. * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
  942. * @iwdev: iwarp device
  943. * @mac_addr: pointer to mac address
  944. * @idx: the index of the mac ip address to add
  945. */
  946. static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
  947. u8 *mac_addr,
  948. u8 idx)
  949. {
  950. struct i40iw_local_mac_ipaddr_entry_info *info;
  951. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  952. struct i40iw_cqp_request *cqp_request;
  953. struct cqp_commands_info *cqp_info;
  954. enum i40iw_status_code status = 0;
  955. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  956. if (!cqp_request) {
  957. i40iw_pr_err("cqp_request memory failed\n");
  958. return I40IW_ERR_NO_MEMORY;
  959. }
  960. cqp_info = &cqp_request->info;
  961. cqp_info->post_sq = 1;
  962. info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
  963. ether_addr_copy(info->mac_addr, mac_addr);
  964. info->entry_idx = idx;
  965. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  966. cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
  967. cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  968. cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  969. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  970. if (status)
  971. i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
  972. return status;
  973. }
  974. /**
  975. * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
  976. * @iwdev: iwarp device
  977. * @mac_ip_tbl_idx: the index of the new mac ip address
  978. *
  979. * Allocate a mac ip address entry and update the mac_ip_tbl_idx
  980. * to hold the index of the newly created mac ip address
  981. * Return 0 if successful, otherwise return error
  982. */
  983. static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
  984. u16 *mac_ip_tbl_idx)
  985. {
  986. struct i40iw_cqp *iwcqp = &iwdev->cqp;
  987. struct i40iw_cqp_request *cqp_request;
  988. struct cqp_commands_info *cqp_info;
  989. enum i40iw_status_code status = 0;
  990. cqp_request = i40iw_get_cqp_request(iwcqp, true);
  991. if (!cqp_request) {
  992. i40iw_pr_err("cqp_request memory failed\n");
  993. return I40IW_ERR_NO_MEMORY;
  994. }
  995. /* increment refcount, because we need the cqp request ret value */
  996. atomic_inc(&cqp_request->refcount);
  997. cqp_info = &cqp_request->info;
  998. cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
  999. cqp_info->post_sq = 1;
  1000. cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
  1001. cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
  1002. status = i40iw_handle_cqp_op(iwdev, cqp_request);
  1003. if (!status)
  1004. *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
  1005. else
  1006. i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
  1007. /* decrement refcount and free the cqp request, if no longer used */
  1008. i40iw_put_cqp_request(iwcqp, cqp_request);
  1009. return status;
  1010. }
  1011. /**
  1012. * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
  1013. * @iwdev: iwarp device
  1014. * @macaddr: pointer to mac address
  1015. *
  1016. * Allocate a mac ip address entry and add it to the hw table
  1017. * Return 0 if successful, otherwise return error
  1018. */
  1019. static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
  1020. u8 *macaddr)
  1021. {
  1022. enum i40iw_status_code status;
  1023. status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
  1024. if (!status) {
  1025. status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
  1026. (u8)iwdev->mac_ip_table_idx);
  1027. if (status)
  1028. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1029. }
  1030. return status;
  1031. }
  1032. /**
  1033. * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
  1034. * @iwdev: iwarp device
  1035. */
  1036. static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
  1037. {
  1038. struct net_device *ip_dev;
  1039. struct inet6_dev *idev;
  1040. struct inet6_ifaddr *ifp, *tmp;
  1041. u32 local_ipaddr6[4];
  1042. rcu_read_lock();
  1043. for_each_netdev_rcu(&init_net, ip_dev) {
  1044. if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
  1045. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1046. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1047. idev = __in6_dev_get(ip_dev);
  1048. if (!idev) {
  1049. i40iw_pr_err("ipv6 inet device not found\n");
  1050. break;
  1051. }
  1052. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1053. i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
  1054. rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
  1055. i40iw_copy_ip_ntohl(local_ipaddr6,
  1056. ifp->addr.in6_u.u6_addr32);
  1057. i40iw_manage_arp_cache(iwdev,
  1058. ip_dev->dev_addr,
  1059. local_ipaddr6,
  1060. false,
  1061. I40IW_ARP_ADD);
  1062. }
  1063. }
  1064. }
  1065. rcu_read_unlock();
  1066. }
  1067. /**
  1068. * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
  1069. * @iwdev: iwarp device
  1070. */
  1071. static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
  1072. {
  1073. struct net_device *dev;
  1074. struct in_device *idev;
  1075. bool got_lock = true;
  1076. u32 ip_addr;
  1077. if (!rtnl_trylock())
  1078. got_lock = false;
  1079. for_each_netdev(&init_net, dev) {
  1080. if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
  1081. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1082. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1083. idev = in_dev_get(dev);
  1084. for_ifa(idev) {
  1085. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1086. "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
  1087. rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
  1088. ip_addr = ntohl(ifa->ifa_address);
  1089. i40iw_manage_arp_cache(iwdev,
  1090. dev->dev_addr,
  1091. &ip_addr,
  1092. true,
  1093. I40IW_ARP_ADD);
  1094. }
  1095. endfor_ifa(idev);
  1096. in_dev_put(idev);
  1097. }
  1098. }
  1099. if (got_lock)
  1100. rtnl_unlock();
  1101. }
  1102. /**
  1103. * i40iw_add_mac_ip - add mac and ip addresses
  1104. * @iwdev: iwarp device
  1105. *
  1106. * Create and add a mac ip address entry to the hw table and
  1107. * ipv4/ipv6 addresses to the arp cache
  1108. * Return 0 if successful, otherwise return error
  1109. */
  1110. static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
  1111. {
  1112. struct net_device *netdev = iwdev->netdev;
  1113. enum i40iw_status_code status;
  1114. status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
  1115. if (status)
  1116. return status;
  1117. i40iw_add_ipv4_addr(iwdev);
  1118. i40iw_add_ipv6_addr(iwdev);
  1119. return 0;
  1120. }
  1121. /**
  1122. * i40iw_wait_pe_ready - Check if firmware is ready
  1123. * @hw: provides access to registers
  1124. */
  1125. static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
  1126. {
  1127. u32 statusfw;
  1128. u32 statuscpu0;
  1129. u32 statuscpu1;
  1130. u32 statuscpu2;
  1131. u32 retrycount = 0;
  1132. do {
  1133. statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
  1134. i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
  1135. statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
  1136. i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
  1137. statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
  1138. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
  1139. __LINE__, statuscpu1);
  1140. statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
  1141. i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
  1142. __LINE__, statuscpu2);
  1143. if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
  1144. break; /* SUCCESS */
  1145. mdelay(1000);
  1146. retrycount++;
  1147. } while (retrycount < 14);
  1148. i40iw_wr32(hw, 0xb4040, 0x4C104C5);
  1149. }
  1150. /**
  1151. * i40iw_initialize_dev - initialize device
  1152. * @iwdev: iwarp device
  1153. * @ldev: lan device information
  1154. *
  1155. * Allocate memory for the hmc objects and initialize iwdev
  1156. * Return 0 if successful, otherwise clean up the resources
  1157. * and return error
  1158. */
  1159. static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
  1160. struct i40e_info *ldev)
  1161. {
  1162. enum i40iw_status_code status;
  1163. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1164. struct i40iw_device_init_info info;
  1165. struct i40iw_vsi_init_info vsi_info;
  1166. struct i40iw_dma_mem mem;
  1167. struct i40iw_l2params l2params;
  1168. u32 size;
  1169. struct i40iw_vsi_stats_info stats_info;
  1170. u16 last_qset = I40IW_NO_QSET;
  1171. u16 qset;
  1172. u32 i;
  1173. memset(&l2params, 0, sizeof(l2params));
  1174. memset(&info, 0, sizeof(info));
  1175. size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
  1176. (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
  1177. iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
  1178. if (!iwdev->hmc_info_mem)
  1179. return I40IW_ERR_NO_MEMORY;
  1180. iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
  1181. dev->hmc_info = &iwdev->hw.hmc;
  1182. dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
  1183. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
  1184. I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
  1185. if (status)
  1186. goto error;
  1187. info.fpm_query_buf_pa = mem.pa;
  1188. info.fpm_query_buf = mem.va;
  1189. status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
  1190. I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
  1191. if (status)
  1192. goto error;
  1193. info.fpm_commit_buf_pa = mem.pa;
  1194. info.fpm_commit_buf = mem.va;
  1195. info.hmc_fn_id = ldev->fid;
  1196. info.is_pf = (ldev->ftype) ? false : true;
  1197. info.bar0 = ldev->hw_addr;
  1198. info.hw = &iwdev->hw;
  1199. info.debug_mask = debug;
  1200. l2params.mss =
  1201. (ldev->params.mtu) ? ldev->params.mtu - I40IW_MTU_TO_MSS : I40IW_DEFAULT_MSS;
  1202. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
  1203. qset = ldev->params.qos.prio_qos[i].qs_handle;
  1204. l2params.qs_handle_list[i] = qset;
  1205. if (last_qset == I40IW_NO_QSET)
  1206. last_qset = qset;
  1207. else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
  1208. iwdev->dcb = true;
  1209. }
  1210. i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
  1211. info.exception_lan_queue = 1;
  1212. info.vchnl_send = i40iw_virtchnl_send;
  1213. status = i40iw_device_init(&iwdev->sc_dev, &info);
  1214. if (status)
  1215. goto error;
  1216. memset(&vsi_info, 0, sizeof(vsi_info));
  1217. vsi_info.dev = &iwdev->sc_dev;
  1218. vsi_info.back_vsi = (void *)iwdev;
  1219. vsi_info.params = &l2params;
  1220. i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
  1221. if (dev->is_pf) {
  1222. memset(&stats_info, 0, sizeof(stats_info));
  1223. stats_info.fcn_id = ldev->fid;
  1224. stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
  1225. if (!stats_info.pestat) {
  1226. status = I40IW_ERR_NO_MEMORY;
  1227. goto error;
  1228. }
  1229. stats_info.stats_initialize = true;
  1230. if (stats_info.pestat)
  1231. i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
  1232. }
  1233. return status;
  1234. error:
  1235. kfree(iwdev->hmc_info_mem);
  1236. iwdev->hmc_info_mem = NULL;
  1237. return status;
  1238. }
  1239. /**
  1240. * i40iw_register_notifiers - register tcp ip notifiers
  1241. */
  1242. static void i40iw_register_notifiers(void)
  1243. {
  1244. register_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1245. register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1246. register_netevent_notifier(&i40iw_net_notifier);
  1247. }
  1248. /**
  1249. * i40iw_unregister_notifiers - unregister tcp ip notifiers
  1250. */
  1251. static void i40iw_unregister_notifiers(void)
  1252. {
  1253. unregister_netevent_notifier(&i40iw_net_notifier);
  1254. unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
  1255. unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
  1256. }
  1257. /**
  1258. * i40iw_save_msix_info - copy msix vector information to iwarp device
  1259. * @iwdev: iwarp device
  1260. * @ldev: lan device information
  1261. *
  1262. * Allocate iwdev msix table and copy the ldev msix info to the table
  1263. * Return 0 if successful, otherwise return error
  1264. */
  1265. static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
  1266. struct i40e_info *ldev)
  1267. {
  1268. struct i40e_qvlist_info *iw_qvlist;
  1269. struct i40e_qv_info *iw_qvinfo;
  1270. u32 ceq_idx;
  1271. u32 i;
  1272. u32 size;
  1273. if (!ldev->msix_count) {
  1274. i40iw_pr_err("No MSI-X vectors\n");
  1275. return I40IW_ERR_CONFIG;
  1276. }
  1277. iwdev->msix_count = ldev->msix_count;
  1278. size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
  1279. size += sizeof(struct i40e_qvlist_info);
  1280. size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
  1281. iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
  1282. if (!iwdev->iw_msixtbl)
  1283. return I40IW_ERR_NO_MEMORY;
  1284. iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
  1285. iw_qvlist = iwdev->iw_qvlist;
  1286. iw_qvinfo = iw_qvlist->qv_info;
  1287. iw_qvlist->num_vectors = iwdev->msix_count;
  1288. if (iwdev->msix_count <= num_online_cpus())
  1289. iwdev->msix_shared = true;
  1290. for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
  1291. iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
  1292. iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
  1293. iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
  1294. if (i == 0) {
  1295. iw_qvinfo->aeq_idx = 0;
  1296. if (iwdev->msix_shared)
  1297. iw_qvinfo->ceq_idx = ceq_idx++;
  1298. else
  1299. iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
  1300. } else {
  1301. iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
  1302. iw_qvinfo->ceq_idx = ceq_idx++;
  1303. }
  1304. iw_qvinfo->itr_idx = 3;
  1305. iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
  1306. }
  1307. return 0;
  1308. }
  1309. /**
  1310. * i40iw_deinit_device - clean up the device resources
  1311. * @iwdev: iwarp device
  1312. *
  1313. * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
  1314. * destroy the device queues and free the pble and the hmc objects
  1315. */
  1316. static void i40iw_deinit_device(struct i40iw_device *iwdev)
  1317. {
  1318. struct i40e_info *ldev = iwdev->ldev;
  1319. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1320. i40iw_pr_info("state = %d\n", iwdev->init_state);
  1321. if (iwdev->param_wq)
  1322. destroy_workqueue(iwdev->param_wq);
  1323. switch (iwdev->init_state) {
  1324. case RDMA_DEV_REGISTERED:
  1325. iwdev->iw_status = 0;
  1326. i40iw_port_ibevent(iwdev);
  1327. i40iw_destroy_rdma_device(iwdev->iwibdev);
  1328. /* fallthrough */
  1329. case IP_ADDR_REGISTERED:
  1330. if (!iwdev->reset)
  1331. i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
  1332. /* fallthrough */
  1333. /* fallthrough */
  1334. case PBLE_CHUNK_MEM:
  1335. i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
  1336. /* fallthrough */
  1337. case CEQ_CREATED:
  1338. i40iw_dele_ceqs(iwdev);
  1339. /* fallthrough */
  1340. case AEQ_CREATED:
  1341. i40iw_destroy_aeq(iwdev);
  1342. /* fallthrough */
  1343. case IEQ_CREATED:
  1344. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
  1345. /* fallthrough */
  1346. case ILQ_CREATED:
  1347. i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
  1348. /* fallthrough */
  1349. case CCQ_CREATED:
  1350. i40iw_destroy_ccq(iwdev);
  1351. /* fallthrough */
  1352. case HMC_OBJS_CREATED:
  1353. i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
  1354. /* fallthrough */
  1355. case CQP_CREATED:
  1356. i40iw_destroy_cqp(iwdev, true);
  1357. /* fallthrough */
  1358. case INITIAL_STATE:
  1359. i40iw_cleanup_cm_core(&iwdev->cm_core);
  1360. if (iwdev->vsi.pestat) {
  1361. i40iw_vsi_stats_free(&iwdev->vsi);
  1362. kfree(iwdev->vsi.pestat);
  1363. }
  1364. i40iw_del_init_mem(iwdev);
  1365. break;
  1366. case INVALID_STATE:
  1367. /* fallthrough */
  1368. default:
  1369. i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
  1370. break;
  1371. }
  1372. i40iw_del_handler(i40iw_find_i40e_handler(ldev));
  1373. kfree(iwdev->hdl);
  1374. }
  1375. /**
  1376. * i40iw_setup_init_state - set up the initial device struct
  1377. * @hdl: handler for iwarp device - one per instance
  1378. * @ldev: lan device information
  1379. * @client: iwarp client information, provided during registration
  1380. *
  1381. * Initialize the iwarp device and its hdl information
  1382. * using the ldev and client information
  1383. * Return 0 if successful, otherwise return error
  1384. */
  1385. static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
  1386. struct i40e_info *ldev,
  1387. struct i40e_client *client)
  1388. {
  1389. struct i40iw_device *iwdev = &hdl->device;
  1390. struct i40iw_sc_dev *dev = &iwdev->sc_dev;
  1391. enum i40iw_status_code status;
  1392. memcpy(&hdl->ldev, ldev, sizeof(*ldev));
  1393. if (resource_profile == 1)
  1394. resource_profile = 2;
  1395. iwdev->mpa_version = mpa_version;
  1396. iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
  1397. (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
  1398. I40IW_HMC_PROFILE_DEFAULT;
  1399. iwdev->max_rdma_vfs =
  1400. (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
  1401. iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
  1402. iwdev->netdev = ldev->netdev;
  1403. hdl->client = client;
  1404. if (!ldev->ftype)
  1405. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
  1406. else
  1407. iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
  1408. status = i40iw_save_msix_info(iwdev, ldev);
  1409. if (status)
  1410. return status;
  1411. iwdev->hw.dev_context = (void *)ldev->pcidev;
  1412. iwdev->hw.hw_addr = ldev->hw_addr;
  1413. status = i40iw_allocate_dma_mem(&iwdev->hw,
  1414. &iwdev->obj_mem, 8192, 4096);
  1415. if (status)
  1416. goto exit;
  1417. iwdev->obj_next = iwdev->obj_mem;
  1418. init_waitqueue_head(&iwdev->vchnl_waitq);
  1419. init_waitqueue_head(&dev->vf_reqs);
  1420. init_waitqueue_head(&iwdev->close_wq);
  1421. status = i40iw_initialize_dev(iwdev, ldev);
  1422. exit:
  1423. if (status) {
  1424. kfree(iwdev->iw_msixtbl);
  1425. i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
  1426. iwdev->iw_msixtbl = NULL;
  1427. }
  1428. return status;
  1429. }
  1430. /**
  1431. * i40iw_get_used_rsrc - determine resources used internally
  1432. * @iwdev: iwarp device
  1433. *
  1434. * Called after internal allocations
  1435. */
  1436. static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
  1437. {
  1438. iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
  1439. iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
  1440. iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
  1441. iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
  1442. }
  1443. /**
  1444. * i40iw_open - client interface operation open for iwarp/uda device
  1445. * @ldev: lan device information
  1446. * @client: iwarp client information, provided during registration
  1447. *
  1448. * Called by the lan driver during the processing of client register
  1449. * Create device resources, set up queues, pble and hmc objects and
  1450. * register the device with the ib verbs interface
  1451. * Return 0 if successful, otherwise return error
  1452. */
  1453. static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
  1454. {
  1455. struct i40iw_device *iwdev;
  1456. struct i40iw_sc_dev *dev;
  1457. enum i40iw_status_code status;
  1458. struct i40iw_handler *hdl;
  1459. hdl = i40iw_find_netdev(ldev->netdev);
  1460. if (hdl)
  1461. return 0;
  1462. hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
  1463. if (!hdl)
  1464. return -ENOMEM;
  1465. iwdev = &hdl->device;
  1466. iwdev->hdl = hdl;
  1467. dev = &iwdev->sc_dev;
  1468. i40iw_setup_cm_core(iwdev);
  1469. dev->back_dev = (void *)iwdev;
  1470. iwdev->ldev = &hdl->ldev;
  1471. iwdev->client = client;
  1472. mutex_init(&iwdev->pbl_mutex);
  1473. i40iw_add_handler(hdl);
  1474. do {
  1475. status = i40iw_setup_init_state(hdl, ldev, client);
  1476. if (status)
  1477. break;
  1478. iwdev->init_state = INITIAL_STATE;
  1479. if (dev->is_pf)
  1480. i40iw_wait_pe_ready(dev->hw);
  1481. status = i40iw_create_cqp(iwdev);
  1482. if (status)
  1483. break;
  1484. iwdev->init_state = CQP_CREATED;
  1485. status = i40iw_hmc_setup(iwdev);
  1486. if (status)
  1487. break;
  1488. status = i40iw_create_ccq(iwdev);
  1489. if (status)
  1490. break;
  1491. iwdev->init_state = CCQ_CREATED;
  1492. status = i40iw_initialize_ilq(iwdev);
  1493. if (status)
  1494. break;
  1495. iwdev->init_state = ILQ_CREATED;
  1496. status = i40iw_initialize_ieq(iwdev);
  1497. if (status)
  1498. break;
  1499. iwdev->init_state = IEQ_CREATED;
  1500. status = i40iw_setup_aeq(iwdev);
  1501. if (status)
  1502. break;
  1503. iwdev->init_state = AEQ_CREATED;
  1504. status = i40iw_setup_ceqs(iwdev, ldev);
  1505. if (status)
  1506. break;
  1507. iwdev->init_state = CEQ_CREATED;
  1508. status = i40iw_initialize_hw_resources(iwdev);
  1509. if (status)
  1510. break;
  1511. i40iw_get_used_rsrc(iwdev);
  1512. dev->ccq_ops->ccq_arm(dev->ccq);
  1513. status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
  1514. if (status)
  1515. break;
  1516. iwdev->init_state = PBLE_CHUNK_MEM;
  1517. iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
  1518. status = i40iw_add_mac_ip(iwdev);
  1519. if (status)
  1520. break;
  1521. iwdev->init_state = IP_ADDR_REGISTERED;
  1522. if (i40iw_register_rdma_device(iwdev)) {
  1523. i40iw_pr_err("register rdma device fail\n");
  1524. break;
  1525. };
  1526. iwdev->init_state = RDMA_DEV_REGISTERED;
  1527. iwdev->iw_status = 1;
  1528. i40iw_port_ibevent(iwdev);
  1529. iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
  1530. if(iwdev->param_wq == NULL)
  1531. break;
  1532. i40iw_pr_info("i40iw_open completed\n");
  1533. return 0;
  1534. } while (0);
  1535. i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
  1536. i40iw_deinit_device(iwdev);
  1537. return -ERESTART;
  1538. }
  1539. /**
  1540. * i40iw_l2params_worker - worker for l2 params change
  1541. * @work: work pointer for l2 params
  1542. */
  1543. static void i40iw_l2params_worker(struct work_struct *work)
  1544. {
  1545. struct l2params_work *dwork =
  1546. container_of(work, struct l2params_work, work);
  1547. struct i40iw_device *iwdev = dwork->iwdev;
  1548. i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
  1549. atomic_dec(&iwdev->params_busy);
  1550. kfree(work);
  1551. }
  1552. /**
  1553. * i40iw_l2param_change - handle qs handles for qos and mss change
  1554. * @ldev: lan device information
  1555. * @client: client for paramater change
  1556. * @params: new parameters from L2
  1557. */
  1558. static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
  1559. struct i40e_params *params)
  1560. {
  1561. struct i40iw_handler *hdl;
  1562. struct i40iw_l2params *l2params;
  1563. struct l2params_work *work;
  1564. struct i40iw_device *iwdev;
  1565. int i;
  1566. hdl = i40iw_find_i40e_handler(ldev);
  1567. if (!hdl)
  1568. return;
  1569. iwdev = &hdl->device;
  1570. if (atomic_read(&iwdev->params_busy))
  1571. return;
  1572. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  1573. if (!work)
  1574. return;
  1575. atomic_inc(&iwdev->params_busy);
  1576. work->iwdev = iwdev;
  1577. l2params = &work->l2params;
  1578. for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
  1579. l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
  1580. l2params->mss = (params->mtu) ? params->mtu - I40IW_MTU_TO_MSS : iwdev->vsi.mss;
  1581. INIT_WORK(&work->work, i40iw_l2params_worker);
  1582. queue_work(iwdev->param_wq, &work->work);
  1583. }
  1584. /**
  1585. * i40iw_close - client interface operation close for iwarp/uda device
  1586. * @ldev: lan device information
  1587. * @client: client to close
  1588. *
  1589. * Called by the lan driver during the processing of client unregister
  1590. * Destroy and clean up the driver resources
  1591. */
  1592. static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
  1593. {
  1594. struct i40iw_device *iwdev;
  1595. struct i40iw_handler *hdl;
  1596. hdl = i40iw_find_i40e_handler(ldev);
  1597. if (!hdl)
  1598. return;
  1599. iwdev = &hdl->device;
  1600. iwdev->closing = true;
  1601. if (reset)
  1602. iwdev->reset = true;
  1603. i40iw_cm_disconnect_all(iwdev);
  1604. destroy_workqueue(iwdev->virtchnl_wq);
  1605. i40iw_deinit_device(iwdev);
  1606. }
  1607. /**
  1608. * i40iw_vf_reset - process VF reset
  1609. * @ldev: lan device information
  1610. * @client: client interface instance
  1611. * @vf_id: virtual function id
  1612. *
  1613. * Called when a VF is reset by the PF
  1614. * Destroy and clean up the VF resources
  1615. */
  1616. static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
  1617. {
  1618. struct i40iw_handler *hdl;
  1619. struct i40iw_sc_dev *dev;
  1620. struct i40iw_hmc_fcn_info hmc_fcn_info;
  1621. struct i40iw_virt_mem vf_dev_mem;
  1622. struct i40iw_vfdev *tmp_vfdev;
  1623. unsigned int i;
  1624. unsigned long flags;
  1625. struct i40iw_device *iwdev;
  1626. hdl = i40iw_find_i40e_handler(ldev);
  1627. if (!hdl)
  1628. return;
  1629. dev = &hdl->device.sc_dev;
  1630. iwdev = (struct i40iw_device *)dev->back_dev;
  1631. for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
  1632. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
  1633. continue;
  1634. /* free all resources allocated on behalf of vf */
  1635. tmp_vfdev = dev->vf_dev[i];
  1636. spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
  1637. dev->vf_dev[i] = NULL;
  1638. spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
  1639. i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
  1640. /* remove vf hmc function */
  1641. memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
  1642. hmc_fcn_info.vf_id = vf_id;
  1643. hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
  1644. hmc_fcn_info.free_fcn = true;
  1645. i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
  1646. /* free vf_dev */
  1647. vf_dev_mem.va = tmp_vfdev;
  1648. vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
  1649. sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
  1650. i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
  1651. break;
  1652. }
  1653. }
  1654. /**
  1655. * i40iw_vf_enable - enable a number of VFs
  1656. * @ldev: lan device information
  1657. * @client: client interface instance
  1658. * @num_vfs: number of VFs for the PF
  1659. *
  1660. * Called when the number of VFs changes
  1661. */
  1662. static void i40iw_vf_enable(struct i40e_info *ldev,
  1663. struct i40e_client *client,
  1664. u32 num_vfs)
  1665. {
  1666. struct i40iw_handler *hdl;
  1667. hdl = i40iw_find_i40e_handler(ldev);
  1668. if (!hdl)
  1669. return;
  1670. if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
  1671. hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
  1672. else
  1673. hdl->device.max_enabled_vfs = num_vfs;
  1674. }
  1675. /**
  1676. * i40iw_vf_capable - check if VF capable
  1677. * @ldev: lan device information
  1678. * @client: client interface instance
  1679. * @vf_id: virtual function id
  1680. *
  1681. * Return 1 if a VF slot is available or if VF is already RDMA enabled
  1682. * Return 0 otherwise
  1683. */
  1684. static int i40iw_vf_capable(struct i40e_info *ldev,
  1685. struct i40e_client *client,
  1686. u32 vf_id)
  1687. {
  1688. struct i40iw_handler *hdl;
  1689. struct i40iw_sc_dev *dev;
  1690. unsigned int i;
  1691. hdl = i40iw_find_i40e_handler(ldev);
  1692. if (!hdl)
  1693. return 0;
  1694. dev = &hdl->device.sc_dev;
  1695. for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
  1696. if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
  1697. return 1;
  1698. }
  1699. return 0;
  1700. }
  1701. /**
  1702. * i40iw_virtchnl_receive - receive a message through the virtual channel
  1703. * @ldev: lan device information
  1704. * @client: client interface instance
  1705. * @vf_id: virtual function id associated with the message
  1706. * @msg: message buffer pointer
  1707. * @len: length of the message
  1708. *
  1709. * Invoke virtual channel receive operation for the given msg
  1710. * Return 0 if successful, otherwise return error
  1711. */
  1712. static int i40iw_virtchnl_receive(struct i40e_info *ldev,
  1713. struct i40e_client *client,
  1714. u32 vf_id,
  1715. u8 *msg,
  1716. u16 len)
  1717. {
  1718. struct i40iw_handler *hdl;
  1719. struct i40iw_sc_dev *dev;
  1720. struct i40iw_device *iwdev;
  1721. int ret_code = I40IW_NOT_SUPPORTED;
  1722. if (!len || !msg)
  1723. return I40IW_ERR_PARAM;
  1724. hdl = i40iw_find_i40e_handler(ldev);
  1725. if (!hdl)
  1726. return I40IW_ERR_PARAM;
  1727. dev = &hdl->device.sc_dev;
  1728. iwdev = dev->back_dev;
  1729. if (dev->vchnl_if.vchnl_recv) {
  1730. ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
  1731. if (!dev->is_pf) {
  1732. atomic_dec(&iwdev->vchnl_msgs);
  1733. wake_up(&iwdev->vchnl_waitq);
  1734. }
  1735. }
  1736. return ret_code;
  1737. }
  1738. /**
  1739. * i40iw_vf_clear_to_send - wait to send virtual channel message
  1740. * @dev: iwarp device *
  1741. * Wait for until virtual channel is clear
  1742. * before sending the next message
  1743. *
  1744. * Returns false if error
  1745. * Returns true if clear to send
  1746. */
  1747. bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
  1748. {
  1749. struct i40iw_device *iwdev;
  1750. wait_queue_entry_t wait;
  1751. iwdev = dev->back_dev;
  1752. if (!wq_has_sleeper(&dev->vf_reqs) &&
  1753. (atomic_read(&iwdev->vchnl_msgs) == 0))
  1754. return true; /* virtual channel is clear */
  1755. init_wait(&wait);
  1756. add_wait_queue_exclusive(&dev->vf_reqs, &wait);
  1757. if (!wait_event_timeout(dev->vf_reqs,
  1758. (atomic_read(&iwdev->vchnl_msgs) == 0),
  1759. I40IW_VCHNL_EVENT_TIMEOUT))
  1760. dev->vchnl_up = false;
  1761. remove_wait_queue(&dev->vf_reqs, &wait);
  1762. return dev->vchnl_up;
  1763. }
  1764. /**
  1765. * i40iw_virtchnl_send - send a message through the virtual channel
  1766. * @dev: iwarp device
  1767. * @vf_id: virtual function id associated with the message
  1768. * @msg: virtual channel message buffer pointer
  1769. * @len: length of the message
  1770. *
  1771. * Invoke virtual channel send operation for the given msg
  1772. * Return 0 if successful, otherwise return error
  1773. */
  1774. static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
  1775. u32 vf_id,
  1776. u8 *msg,
  1777. u16 len)
  1778. {
  1779. struct i40iw_device *iwdev;
  1780. struct i40e_info *ldev;
  1781. if (!dev || !dev->back_dev)
  1782. return I40IW_ERR_BAD_PTR;
  1783. iwdev = dev->back_dev;
  1784. ldev = iwdev->ldev;
  1785. if (ldev && ldev->ops && ldev->ops->virtchnl_send)
  1786. return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
  1787. return I40IW_ERR_BAD_PTR;
  1788. }
  1789. /* client interface functions */
  1790. static const struct i40e_client_ops i40e_ops = {
  1791. .open = i40iw_open,
  1792. .close = i40iw_close,
  1793. .l2_param_change = i40iw_l2param_change,
  1794. .virtchnl_receive = i40iw_virtchnl_receive,
  1795. .vf_reset = i40iw_vf_reset,
  1796. .vf_enable = i40iw_vf_enable,
  1797. .vf_capable = i40iw_vf_capable
  1798. };
  1799. /**
  1800. * i40iw_init_module - driver initialization function
  1801. *
  1802. * First function to call when the driver is loaded
  1803. * Register the driver as i40e client and port mapper client
  1804. */
  1805. static int __init i40iw_init_module(void)
  1806. {
  1807. int ret;
  1808. memset(&i40iw_client, 0, sizeof(i40iw_client));
  1809. i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
  1810. i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
  1811. i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
  1812. i40iw_client.ops = &i40e_ops;
  1813. memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
  1814. i40iw_client.type = I40E_CLIENT_IWARP;
  1815. spin_lock_init(&i40iw_handler_lock);
  1816. ret = i40e_register_client(&i40iw_client);
  1817. i40iw_register_notifiers();
  1818. return ret;
  1819. }
  1820. /**
  1821. * i40iw_exit_module - driver exit clean up function
  1822. *
  1823. * The function is called just before the driver is unloaded
  1824. * Unregister the driver as i40e client and port mapper client
  1825. */
  1826. static void __exit i40iw_exit_module(void)
  1827. {
  1828. i40iw_unregister_notifiers();
  1829. i40e_unregister_client(&i40iw_client);
  1830. }
  1831. module_init(i40iw_init_module);
  1832. module_exit(i40iw_exit_module);