eq.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. /*
  2. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/init.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/slab.h>
  36. #include <linux/export.h>
  37. #include <linux/mm.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/mlx4/cmd.h>
  40. #include "mlx4.h"
  41. #include "fw.h"
  42. enum {
  43. MLX4_IRQNAME_SIZE = 32
  44. };
  45. enum {
  46. MLX4_NUM_ASYNC_EQE = 0x100,
  47. MLX4_NUM_SPARE_EQE = 0x80,
  48. MLX4_EQ_ENTRY_SIZE = 0x20
  49. };
  50. #define MLX4_EQ_STATUS_OK ( 0 << 28)
  51. #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
  52. #define MLX4_EQ_OWNER_SW ( 0 << 24)
  53. #define MLX4_EQ_OWNER_HW ( 1 << 24)
  54. #define MLX4_EQ_FLAG_EC ( 1 << 18)
  55. #define MLX4_EQ_FLAG_OI ( 1 << 17)
  56. #define MLX4_EQ_STATE_ARMED ( 9 << 8)
  57. #define MLX4_EQ_STATE_FIRED (10 << 8)
  58. #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
  59. #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
  60. (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
  61. (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
  62. (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
  63. (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
  64. (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
  65. (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
  66. (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  67. (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
  68. (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
  69. (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
  70. (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
  71. (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
  72. (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
  73. (1ull << MLX4_EVENT_TYPE_CMD) | \
  74. (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
  75. (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
  76. (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
  77. static void eq_set_ci(struct mlx4_eq *eq, int req_not)
  78. {
  79. __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
  80. req_not << 31),
  81. eq->doorbell);
  82. /* We still want ordering, just not swabbing, so add a barrier */
  83. mb();
  84. }
  85. static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
  86. {
  87. unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
  88. return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
  89. }
  90. static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
  91. {
  92. struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
  93. return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
  94. }
  95. static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
  96. {
  97. struct mlx4_eqe *eqe =
  98. &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
  99. return (!!(eqe->owner & 0x80) ^
  100. !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
  101. eqe : NULL;
  102. }
  103. void mlx4_gen_slave_eqe(struct work_struct *work)
  104. {
  105. struct mlx4_mfunc_master_ctx *master =
  106. container_of(work, struct mlx4_mfunc_master_ctx,
  107. slave_event_work);
  108. struct mlx4_mfunc *mfunc =
  109. container_of(master, struct mlx4_mfunc, master);
  110. struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
  111. struct mlx4_dev *dev = &priv->dev;
  112. struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
  113. struct mlx4_eqe *eqe;
  114. u8 slave;
  115. int i;
  116. for (eqe = next_slave_event_eqe(slave_eq); eqe;
  117. eqe = next_slave_event_eqe(slave_eq)) {
  118. slave = eqe->slave_id;
  119. /* All active slaves need to receive the event */
  120. if (slave == ALL_SLAVES) {
  121. for (i = 0; i < dev->num_slaves; i++) {
  122. if (i != dev->caps.function &&
  123. master->slave_state[i].active)
  124. if (mlx4_GEN_EQE(dev, i, eqe))
  125. mlx4_warn(dev, "Failed to "
  126. " generate event "
  127. "for slave %d\n", i);
  128. }
  129. } else {
  130. if (mlx4_GEN_EQE(dev, slave, eqe))
  131. mlx4_warn(dev, "Failed to generate event "
  132. "for slave %d\n", slave);
  133. }
  134. ++slave_eq->cons;
  135. }
  136. }
  137. static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
  138. {
  139. struct mlx4_priv *priv = mlx4_priv(dev);
  140. struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
  141. struct mlx4_eqe *s_eqe =
  142. &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
  143. if ((!!(s_eqe->owner & 0x80)) ^
  144. (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
  145. mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
  146. "No free EQE on slave events queue\n", slave);
  147. return;
  148. }
  149. memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
  150. s_eqe->slave_id = slave;
  151. /* ensure all information is written before setting the ownersip bit */
  152. wmb();
  153. s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
  154. ++slave_eq->prod;
  155. queue_work(priv->mfunc.master.comm_wq,
  156. &priv->mfunc.master.slave_event_work);
  157. }
  158. static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
  159. struct mlx4_eqe *eqe)
  160. {
  161. struct mlx4_priv *priv = mlx4_priv(dev);
  162. struct mlx4_slave_state *s_slave =
  163. &priv->mfunc.master.slave_state[slave];
  164. if (!s_slave->active) {
  165. /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
  166. return;
  167. }
  168. slave_event(dev, slave, eqe);
  169. }
  170. void mlx4_master_handle_slave_flr(struct work_struct *work)
  171. {
  172. struct mlx4_mfunc_master_ctx *master =
  173. container_of(work, struct mlx4_mfunc_master_ctx,
  174. slave_flr_event_work);
  175. struct mlx4_mfunc *mfunc =
  176. container_of(master, struct mlx4_mfunc, master);
  177. struct mlx4_priv *priv =
  178. container_of(mfunc, struct mlx4_priv, mfunc);
  179. struct mlx4_dev *dev = &priv->dev;
  180. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  181. int i;
  182. int err;
  183. mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
  184. for (i = 0 ; i < dev->num_slaves; i++) {
  185. if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
  186. mlx4_dbg(dev, "mlx4_handle_slave_flr: "
  187. "clean slave: %d\n", i);
  188. mlx4_delete_all_resources_for_slave(dev, i);
  189. /*return the slave to running mode*/
  190. spin_lock(&priv->mfunc.master.slave_state_lock);
  191. slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
  192. slave_state[i].is_slave_going_down = 0;
  193. spin_unlock(&priv->mfunc.master.slave_state_lock);
  194. /*notify the FW:*/
  195. err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
  196. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  197. if (err)
  198. mlx4_warn(dev, "Failed to notify FW on "
  199. "FLR done (slave:%d)\n", i);
  200. }
  201. }
  202. }
  203. static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
  204. {
  205. struct mlx4_priv *priv = mlx4_priv(dev);
  206. struct mlx4_eqe *eqe;
  207. int cqn;
  208. int eqes_found = 0;
  209. int set_ci = 0;
  210. int port;
  211. int slave = 0;
  212. int ret;
  213. u32 flr_slave;
  214. u8 update_slave_state;
  215. int i;
  216. while ((eqe = next_eqe_sw(eq))) {
  217. /*
  218. * Make sure we read EQ entry contents after we've
  219. * checked the ownership bit.
  220. */
  221. rmb();
  222. switch (eqe->type) {
  223. case MLX4_EVENT_TYPE_COMP:
  224. cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
  225. mlx4_cq_completion(dev, cqn);
  226. break;
  227. case MLX4_EVENT_TYPE_PATH_MIG:
  228. case MLX4_EVENT_TYPE_COMM_EST:
  229. case MLX4_EVENT_TYPE_SQ_DRAINED:
  230. case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
  231. case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
  232. case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
  233. case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  234. case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
  235. mlx4_dbg(dev, "event %d arrived\n", eqe->type);
  236. if (mlx4_is_master(dev)) {
  237. /* forward only to slave owning the QP */
  238. ret = mlx4_get_slave_from_resource_id(dev,
  239. RES_QP,
  240. be32_to_cpu(eqe->event.qp.qpn)
  241. & 0xffffff, &slave);
  242. if (ret && ret != -ENOENT) {
  243. mlx4_dbg(dev, "QP event %02x(%02x) on "
  244. "EQ %d at index %u: could "
  245. "not get slave id (%d)\n",
  246. eqe->type, eqe->subtype,
  247. eq->eqn, eq->cons_index, ret);
  248. break;
  249. }
  250. if (!ret && slave != dev->caps.function) {
  251. mlx4_slave_event(dev, slave, eqe);
  252. break;
  253. }
  254. }
  255. mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
  256. 0xffffff, eqe->type);
  257. break;
  258. case MLX4_EVENT_TYPE_SRQ_LIMIT:
  259. mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
  260. __func__);
  261. case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
  262. if (mlx4_is_master(dev)) {
  263. /* forward only to slave owning the SRQ */
  264. ret = mlx4_get_slave_from_resource_id(dev,
  265. RES_SRQ,
  266. be32_to_cpu(eqe->event.srq.srqn)
  267. & 0xffffff,
  268. &slave);
  269. if (ret && ret != -ENOENT) {
  270. mlx4_warn(dev, "SRQ event %02x(%02x) "
  271. "on EQ %d at index %u: could"
  272. " not get slave id (%d)\n",
  273. eqe->type, eqe->subtype,
  274. eq->eqn, eq->cons_index, ret);
  275. break;
  276. }
  277. mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
  278. " event: %02x(%02x)\n", __func__,
  279. slave,
  280. be32_to_cpu(eqe->event.srq.srqn),
  281. eqe->type, eqe->subtype);
  282. if (!ret && slave != dev->caps.function) {
  283. mlx4_warn(dev, "%s: sending event "
  284. "%02x(%02x) to slave:%d\n",
  285. __func__, eqe->type,
  286. eqe->subtype, slave);
  287. mlx4_slave_event(dev, slave, eqe);
  288. break;
  289. }
  290. }
  291. mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
  292. 0xffffff, eqe->type);
  293. break;
  294. case MLX4_EVENT_TYPE_CMD:
  295. mlx4_cmd_event(dev,
  296. be16_to_cpu(eqe->event.cmd.token),
  297. eqe->event.cmd.status,
  298. be64_to_cpu(eqe->event.cmd.out_param));
  299. break;
  300. case MLX4_EVENT_TYPE_PORT_CHANGE:
  301. port = be32_to_cpu(eqe->event.port_change.port) >> 28;
  302. if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
  303. mlx4_dispatch_event(dev,
  304. MLX4_DEV_EVENT_PORT_DOWN,
  305. port);
  306. mlx4_priv(dev)->sense.do_sense_port[port] = 1;
  307. if (mlx4_is_master(dev))
  308. /*change the state of all slave's port
  309. * to down:*/
  310. for (i = 0; i < dev->num_slaves; i++) {
  311. mlx4_dbg(dev, "%s: Sending "
  312. "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
  313. " to slave: %d, port:%d\n",
  314. __func__, i, port);
  315. if (i == dev->caps.function)
  316. continue;
  317. mlx4_slave_event(dev, i, eqe);
  318. }
  319. } else {
  320. mlx4_dispatch_event(dev,
  321. MLX4_DEV_EVENT_PORT_UP,
  322. port);
  323. mlx4_priv(dev)->sense.do_sense_port[port] = 0;
  324. if (mlx4_is_master(dev)) {
  325. for (i = 0; i < dev->num_slaves; i++) {
  326. if (i == dev->caps.function)
  327. continue;
  328. mlx4_slave_event(dev, i, eqe);
  329. }
  330. }
  331. }
  332. break;
  333. case MLX4_EVENT_TYPE_CQ_ERROR:
  334. mlx4_warn(dev, "CQ %s on CQN %06x\n",
  335. eqe->event.cq_err.syndrome == 1 ?
  336. "overrun" : "access violation",
  337. be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
  338. if (mlx4_is_master(dev)) {
  339. ret = mlx4_get_slave_from_resource_id(dev,
  340. RES_CQ,
  341. be32_to_cpu(eqe->event.cq_err.cqn)
  342. & 0xffffff, &slave);
  343. if (ret && ret != -ENOENT) {
  344. mlx4_dbg(dev, "CQ event %02x(%02x) on "
  345. "EQ %d at index %u: could "
  346. "not get slave id (%d)\n",
  347. eqe->type, eqe->subtype,
  348. eq->eqn, eq->cons_index, ret);
  349. break;
  350. }
  351. if (!ret && slave != dev->caps.function) {
  352. mlx4_slave_event(dev, slave, eqe);
  353. break;
  354. }
  355. }
  356. mlx4_cq_event(dev,
  357. be32_to_cpu(eqe->event.cq_err.cqn)
  358. & 0xffffff,
  359. eqe->type);
  360. break;
  361. case MLX4_EVENT_TYPE_EQ_OVERFLOW:
  362. mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
  363. break;
  364. case MLX4_EVENT_TYPE_COMM_CHANNEL:
  365. if (!mlx4_is_master(dev)) {
  366. mlx4_warn(dev, "Received comm channel event "
  367. "for non master device\n");
  368. break;
  369. }
  370. memcpy(&priv->mfunc.master.comm_arm_bit_vector,
  371. eqe->event.comm_channel_arm.bit_vec,
  372. sizeof eqe->event.comm_channel_arm.bit_vec);
  373. queue_work(priv->mfunc.master.comm_wq,
  374. &priv->mfunc.master.comm_work);
  375. break;
  376. case MLX4_EVENT_TYPE_FLR_EVENT:
  377. flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
  378. if (!mlx4_is_master(dev)) {
  379. mlx4_warn(dev, "Non-master function received"
  380. "FLR event\n");
  381. break;
  382. }
  383. mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
  384. if (flr_slave > dev->num_slaves) {
  385. mlx4_warn(dev,
  386. "Got FLR for unknown function: %d\n",
  387. flr_slave);
  388. update_slave_state = 0;
  389. } else
  390. update_slave_state = 1;
  391. spin_lock(&priv->mfunc.master.slave_state_lock);
  392. if (update_slave_state) {
  393. priv->mfunc.master.slave_state[flr_slave].active = false;
  394. priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
  395. priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
  396. }
  397. spin_unlock(&priv->mfunc.master.slave_state_lock);
  398. queue_work(priv->mfunc.master.comm_wq,
  399. &priv->mfunc.master.slave_flr_event_work);
  400. break;
  401. case MLX4_EVENT_TYPE_FATAL_WARNING:
  402. if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
  403. if (mlx4_is_master(dev))
  404. for (i = 0; i < dev->num_slaves; i++) {
  405. mlx4_dbg(dev, "%s: Sending "
  406. "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
  407. " to slave: %d\n", __func__, i);
  408. if (i == dev->caps.function)
  409. continue;
  410. mlx4_slave_event(dev, i, eqe);
  411. }
  412. mlx4_err(dev, "Temperature Threshold was reached! "
  413. "Threshold: %d celsius degrees; "
  414. "Current Temperature: %d\n",
  415. be16_to_cpu(eqe->event.warming.warning_threshold),
  416. be16_to_cpu(eqe->event.warming.current_temperature));
  417. } else
  418. mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
  419. "subtype %02x on EQ %d at index %u. owner=%x, "
  420. "nent=0x%x, slave=%x, ownership=%s\n",
  421. eqe->type, eqe->subtype, eq->eqn,
  422. eq->cons_index, eqe->owner, eq->nent,
  423. eqe->slave_id,
  424. !!(eqe->owner & 0x80) ^
  425. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  426. break;
  427. case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
  428. case MLX4_EVENT_TYPE_ECC_DETECT:
  429. default:
  430. mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
  431. "index %u. owner=%x, nent=0x%x, slave=%x, "
  432. "ownership=%s\n",
  433. eqe->type, eqe->subtype, eq->eqn,
  434. eq->cons_index, eqe->owner, eq->nent,
  435. eqe->slave_id,
  436. !!(eqe->owner & 0x80) ^
  437. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  438. break;
  439. };
  440. ++eq->cons_index;
  441. eqes_found = 1;
  442. ++set_ci;
  443. /*
  444. * The HCA will think the queue has overflowed if we
  445. * don't tell it we've been processing events. We
  446. * create our EQs with MLX4_NUM_SPARE_EQE extra
  447. * entries, so we must update our consumer index at
  448. * least that often.
  449. */
  450. if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
  451. eq_set_ci(eq, 0);
  452. set_ci = 0;
  453. }
  454. }
  455. eq_set_ci(eq, 1);
  456. return eqes_found;
  457. }
  458. static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
  459. {
  460. struct mlx4_dev *dev = dev_ptr;
  461. struct mlx4_priv *priv = mlx4_priv(dev);
  462. int work = 0;
  463. int i;
  464. writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
  465. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  466. work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
  467. return IRQ_RETVAL(work);
  468. }
  469. static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
  470. {
  471. struct mlx4_eq *eq = eq_ptr;
  472. struct mlx4_dev *dev = eq->dev;
  473. mlx4_eq_int(dev, eq);
  474. /* MSI-X vectors always belong to us */
  475. return IRQ_HANDLED;
  476. }
  477. int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
  478. struct mlx4_vhcr *vhcr,
  479. struct mlx4_cmd_mailbox *inbox,
  480. struct mlx4_cmd_mailbox *outbox,
  481. struct mlx4_cmd_info *cmd)
  482. {
  483. struct mlx4_priv *priv = mlx4_priv(dev);
  484. struct mlx4_slave_event_eq_info *event_eq =
  485. priv->mfunc.master.slave_state[slave].event_eq;
  486. u32 in_modifier = vhcr->in_modifier;
  487. u32 eqn = in_modifier & 0x1FF;
  488. u64 in_param = vhcr->in_param;
  489. int err = 0;
  490. int i;
  491. if (slave == dev->caps.function)
  492. err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
  493. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  494. MLX4_CMD_NATIVE);
  495. if (!err)
  496. for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
  497. if (in_param & (1LL << i))
  498. event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
  499. return err;
  500. }
  501. static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
  502. int eq_num)
  503. {
  504. return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
  505. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  506. MLX4_CMD_WRAPPED);
  507. }
  508. static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  509. int eq_num)
  510. {
  511. return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
  512. MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
  513. MLX4_CMD_WRAPPED);
  514. }
  515. static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  516. int eq_num)
  517. {
  518. return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
  519. 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
  520. MLX4_CMD_WRAPPED);
  521. }
  522. static int mlx4_num_eq_uar(struct mlx4_dev *dev)
  523. {
  524. /*
  525. * Each UAR holds 4 EQ doorbells. To figure out how many UARs
  526. * we need to map, take the difference of highest index and
  527. * the lowest index we'll use and add 1.
  528. */
  529. return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
  530. dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
  531. }
  532. static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
  533. {
  534. struct mlx4_priv *priv = mlx4_priv(dev);
  535. int index;
  536. index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
  537. if (!priv->eq_table.uar_map[index]) {
  538. priv->eq_table.uar_map[index] =
  539. ioremap(pci_resource_start(dev->pdev, 2) +
  540. ((eq->eqn / 4) << PAGE_SHIFT),
  541. PAGE_SIZE);
  542. if (!priv->eq_table.uar_map[index]) {
  543. mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
  544. eq->eqn);
  545. return NULL;
  546. }
  547. }
  548. return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
  549. }
  550. static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
  551. u8 intr, struct mlx4_eq *eq)
  552. {
  553. struct mlx4_priv *priv = mlx4_priv(dev);
  554. struct mlx4_cmd_mailbox *mailbox;
  555. struct mlx4_eq_context *eq_context;
  556. int npages;
  557. u64 *dma_list = NULL;
  558. dma_addr_t t;
  559. u64 mtt_addr;
  560. int err = -ENOMEM;
  561. int i;
  562. eq->dev = dev;
  563. eq->nent = roundup_pow_of_two(max(nent, 2));
  564. npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
  565. eq->page_list = kmalloc(npages * sizeof *eq->page_list,
  566. GFP_KERNEL);
  567. if (!eq->page_list)
  568. goto err_out;
  569. for (i = 0; i < npages; ++i)
  570. eq->page_list[i].buf = NULL;
  571. dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
  572. if (!dma_list)
  573. goto err_out_free;
  574. mailbox = mlx4_alloc_cmd_mailbox(dev);
  575. if (IS_ERR(mailbox))
  576. goto err_out_free;
  577. eq_context = mailbox->buf;
  578. for (i = 0; i < npages; ++i) {
  579. eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
  580. PAGE_SIZE, &t, GFP_KERNEL);
  581. if (!eq->page_list[i].buf)
  582. goto err_out_free_pages;
  583. dma_list[i] = t;
  584. eq->page_list[i].map = t;
  585. memset(eq->page_list[i].buf, 0, PAGE_SIZE);
  586. }
  587. eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
  588. if (eq->eqn == -1)
  589. goto err_out_free_pages;
  590. eq->doorbell = mlx4_get_eq_uar(dev, eq);
  591. if (!eq->doorbell) {
  592. err = -ENOMEM;
  593. goto err_out_free_eq;
  594. }
  595. err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
  596. if (err)
  597. goto err_out_free_eq;
  598. err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
  599. if (err)
  600. goto err_out_free_mtt;
  601. memset(eq_context, 0, sizeof *eq_context);
  602. eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
  603. MLX4_EQ_STATE_ARMED);
  604. eq_context->log_eq_size = ilog2(eq->nent);
  605. eq_context->intr = intr;
  606. eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
  607. mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
  608. eq_context->mtt_base_addr_h = mtt_addr >> 32;
  609. eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
  610. err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
  611. if (err) {
  612. mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
  613. goto err_out_free_mtt;
  614. }
  615. kfree(dma_list);
  616. mlx4_free_cmd_mailbox(dev, mailbox);
  617. eq->cons_index = 0;
  618. return err;
  619. err_out_free_mtt:
  620. mlx4_mtt_cleanup(dev, &eq->mtt);
  621. err_out_free_eq:
  622. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  623. err_out_free_pages:
  624. for (i = 0; i < npages; ++i)
  625. if (eq->page_list[i].buf)
  626. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  627. eq->page_list[i].buf,
  628. eq->page_list[i].map);
  629. mlx4_free_cmd_mailbox(dev, mailbox);
  630. err_out_free:
  631. kfree(eq->page_list);
  632. kfree(dma_list);
  633. err_out:
  634. return err;
  635. }
  636. static void mlx4_free_eq(struct mlx4_dev *dev,
  637. struct mlx4_eq *eq)
  638. {
  639. struct mlx4_priv *priv = mlx4_priv(dev);
  640. struct mlx4_cmd_mailbox *mailbox;
  641. int err;
  642. int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
  643. int i;
  644. mailbox = mlx4_alloc_cmd_mailbox(dev);
  645. if (IS_ERR(mailbox))
  646. return;
  647. err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
  648. if (err)
  649. mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
  650. if (0) {
  651. mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
  652. for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
  653. if (i % 4 == 0)
  654. pr_cont("[%02x] ", i * 4);
  655. pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
  656. if ((i + 1) % 4 == 0)
  657. pr_cont("\n");
  658. }
  659. }
  660. mlx4_mtt_cleanup(dev, &eq->mtt);
  661. for (i = 0; i < npages; ++i)
  662. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  663. eq->page_list[i].buf,
  664. eq->page_list[i].map);
  665. kfree(eq->page_list);
  666. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  667. mlx4_free_cmd_mailbox(dev, mailbox);
  668. }
  669. static void mlx4_free_irqs(struct mlx4_dev *dev)
  670. {
  671. struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
  672. struct mlx4_priv *priv = mlx4_priv(dev);
  673. int i, vec;
  674. if (eq_table->have_irq)
  675. free_irq(dev->pdev->irq, dev);
  676. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  677. if (eq_table->eq[i].have_irq) {
  678. free_irq(eq_table->eq[i].irq, eq_table->eq + i);
  679. eq_table->eq[i].have_irq = 0;
  680. }
  681. for (i = 0; i < dev->caps.comp_pool; i++) {
  682. /*
  683. * Freeing the assigned irq's
  684. * all bits should be 0, but we need to validate
  685. */
  686. if (priv->msix_ctl.pool_bm & 1ULL << i) {
  687. /* NO need protecting*/
  688. vec = dev->caps.num_comp_vectors + 1 + i;
  689. free_irq(priv->eq_table.eq[vec].irq,
  690. &priv->eq_table.eq[vec]);
  691. }
  692. }
  693. kfree(eq_table->irq_names);
  694. }
  695. static int mlx4_map_clr_int(struct mlx4_dev *dev)
  696. {
  697. struct mlx4_priv *priv = mlx4_priv(dev);
  698. priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
  699. priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
  700. if (!priv->clr_base) {
  701. mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
  702. return -ENOMEM;
  703. }
  704. return 0;
  705. }
  706. static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
  707. {
  708. struct mlx4_priv *priv = mlx4_priv(dev);
  709. iounmap(priv->clr_base);
  710. }
  711. int mlx4_alloc_eq_table(struct mlx4_dev *dev)
  712. {
  713. struct mlx4_priv *priv = mlx4_priv(dev);
  714. priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
  715. sizeof *priv->eq_table.eq, GFP_KERNEL);
  716. if (!priv->eq_table.eq)
  717. return -ENOMEM;
  718. return 0;
  719. }
  720. void mlx4_free_eq_table(struct mlx4_dev *dev)
  721. {
  722. kfree(mlx4_priv(dev)->eq_table.eq);
  723. }
  724. int mlx4_init_eq_table(struct mlx4_dev *dev)
  725. {
  726. struct mlx4_priv *priv = mlx4_priv(dev);
  727. int err;
  728. int i;
  729. priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
  730. sizeof *priv->eq_table.uar_map,
  731. GFP_KERNEL);
  732. if (!priv->eq_table.uar_map) {
  733. err = -ENOMEM;
  734. goto err_out_free;
  735. }
  736. err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
  737. dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
  738. if (err)
  739. goto err_out_free;
  740. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  741. priv->eq_table.uar_map[i] = NULL;
  742. if (!mlx4_is_slave(dev)) {
  743. err = mlx4_map_clr_int(dev);
  744. if (err)
  745. goto err_out_bitmap;
  746. priv->eq_table.clr_mask =
  747. swab32(1 << (priv->eq_table.inta_pin & 31));
  748. priv->eq_table.clr_int = priv->clr_base +
  749. (priv->eq_table.inta_pin < 32 ? 4 : 0);
  750. }
  751. priv->eq_table.irq_names =
  752. kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
  753. dev->caps.comp_pool),
  754. GFP_KERNEL);
  755. if (!priv->eq_table.irq_names) {
  756. err = -ENOMEM;
  757. goto err_out_bitmap;
  758. }
  759. for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
  760. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  761. dev->caps.reserved_cqs +
  762. MLX4_NUM_SPARE_EQE,
  763. (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
  764. &priv->eq_table.eq[i]);
  765. if (err) {
  766. --i;
  767. goto err_out_unmap;
  768. }
  769. }
  770. err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
  771. (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
  772. &priv->eq_table.eq[dev->caps.num_comp_vectors]);
  773. if (err)
  774. goto err_out_comp;
  775. /*if additional completion vectors poolsize is 0 this loop will not run*/
  776. for (i = dev->caps.num_comp_vectors + 1;
  777. i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
  778. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  779. dev->caps.reserved_cqs +
  780. MLX4_NUM_SPARE_EQE,
  781. (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
  782. &priv->eq_table.eq[i]);
  783. if (err) {
  784. --i;
  785. goto err_out_unmap;
  786. }
  787. }
  788. if (dev->flags & MLX4_FLAG_MSI_X) {
  789. const char *eq_name;
  790. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
  791. if (i < dev->caps.num_comp_vectors) {
  792. snprintf(priv->eq_table.irq_names +
  793. i * MLX4_IRQNAME_SIZE,
  794. MLX4_IRQNAME_SIZE,
  795. "mlx4-comp-%d@pci:%s", i,
  796. pci_name(dev->pdev));
  797. } else {
  798. snprintf(priv->eq_table.irq_names +
  799. i * MLX4_IRQNAME_SIZE,
  800. MLX4_IRQNAME_SIZE,
  801. "mlx4-async@pci:%s",
  802. pci_name(dev->pdev));
  803. }
  804. eq_name = priv->eq_table.irq_names +
  805. i * MLX4_IRQNAME_SIZE;
  806. err = request_irq(priv->eq_table.eq[i].irq,
  807. mlx4_msi_x_interrupt, 0, eq_name,
  808. priv->eq_table.eq + i);
  809. if (err)
  810. goto err_out_async;
  811. priv->eq_table.eq[i].have_irq = 1;
  812. }
  813. } else {
  814. snprintf(priv->eq_table.irq_names,
  815. MLX4_IRQNAME_SIZE,
  816. DRV_NAME "@pci:%s",
  817. pci_name(dev->pdev));
  818. err = request_irq(dev->pdev->irq, mlx4_interrupt,
  819. IRQF_SHARED, priv->eq_table.irq_names, dev);
  820. if (err)
  821. goto err_out_async;
  822. priv->eq_table.have_irq = 1;
  823. }
  824. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  825. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  826. if (err)
  827. mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
  828. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
  829. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  830. eq_set_ci(&priv->eq_table.eq[i], 1);
  831. return 0;
  832. err_out_async:
  833. mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
  834. err_out_comp:
  835. i = dev->caps.num_comp_vectors - 1;
  836. err_out_unmap:
  837. while (i >= 0) {
  838. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  839. --i;
  840. }
  841. if (!mlx4_is_slave(dev))
  842. mlx4_unmap_clr_int(dev);
  843. mlx4_free_irqs(dev);
  844. err_out_bitmap:
  845. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  846. err_out_free:
  847. kfree(priv->eq_table.uar_map);
  848. return err;
  849. }
  850. void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
  851. {
  852. struct mlx4_priv *priv = mlx4_priv(dev);
  853. int i;
  854. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
  855. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  856. mlx4_free_irqs(dev);
  857. for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
  858. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  859. if (!mlx4_is_slave(dev))
  860. mlx4_unmap_clr_int(dev);
  861. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  862. if (priv->eq_table.uar_map[i])
  863. iounmap(priv->eq_table.uar_map[i]);
  864. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  865. kfree(priv->eq_table.uar_map);
  866. }
  867. /* A test that verifies that we can accept interrupts on all
  868. * the irq vectors of the device.
  869. * Interrupts are checked using the NOP command.
  870. */
  871. int mlx4_test_interrupts(struct mlx4_dev *dev)
  872. {
  873. struct mlx4_priv *priv = mlx4_priv(dev);
  874. int i;
  875. int err;
  876. err = mlx4_NOP(dev);
  877. /* When not in MSI_X, there is only one irq to check */
  878. if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
  879. return err;
  880. /* A loop over all completion vectors, for each vector we will check
  881. * whether it works by mapping command completions to that vector
  882. * and performing a NOP command
  883. */
  884. for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
  885. /* Temporary use polling for command completions */
  886. mlx4_cmd_use_polling(dev);
  887. /* Map the new eq to handle all asyncronous events */
  888. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  889. priv->eq_table.eq[i].eqn);
  890. if (err) {
  891. mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
  892. mlx4_cmd_use_events(dev);
  893. break;
  894. }
  895. /* Go back to using events */
  896. mlx4_cmd_use_events(dev);
  897. err = mlx4_NOP(dev);
  898. }
  899. /* Return to default */
  900. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  901. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  902. return err;
  903. }
  904. EXPORT_SYMBOL(mlx4_test_interrupts);
  905. int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
  906. {
  907. struct mlx4_priv *priv = mlx4_priv(dev);
  908. int vec = 0, err = 0, i;
  909. mutex_lock(&priv->msix_ctl.pool_lock);
  910. for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
  911. if (~priv->msix_ctl.pool_bm & 1ULL << i) {
  912. priv->msix_ctl.pool_bm |= 1ULL << i;
  913. vec = dev->caps.num_comp_vectors + 1 + i;
  914. snprintf(priv->eq_table.irq_names +
  915. vec * MLX4_IRQNAME_SIZE,
  916. MLX4_IRQNAME_SIZE, "%s", name);
  917. err = request_irq(priv->eq_table.eq[vec].irq,
  918. mlx4_msi_x_interrupt, 0,
  919. &priv->eq_table.irq_names[vec<<5],
  920. priv->eq_table.eq + vec);
  921. if (err) {
  922. /*zero out bit by fliping it*/
  923. priv->msix_ctl.pool_bm ^= 1 << i;
  924. vec = 0;
  925. continue;
  926. /*we dont want to break here*/
  927. }
  928. eq_set_ci(&priv->eq_table.eq[vec], 1);
  929. }
  930. }
  931. mutex_unlock(&priv->msix_ctl.pool_lock);
  932. if (vec) {
  933. *vector = vec;
  934. } else {
  935. *vector = 0;
  936. err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
  937. }
  938. return err;
  939. }
  940. EXPORT_SYMBOL(mlx4_assign_eq);
  941. void mlx4_release_eq(struct mlx4_dev *dev, int vec)
  942. {
  943. struct mlx4_priv *priv = mlx4_priv(dev);
  944. /*bm index*/
  945. int i = vec - dev->caps.num_comp_vectors - 1;
  946. if (likely(i >= 0)) {
  947. /*sanity check , making sure were not trying to free irq's
  948. Belonging to a legacy EQ*/
  949. mutex_lock(&priv->msix_ctl.pool_lock);
  950. if (priv->msix_ctl.pool_bm & 1ULL << i) {
  951. free_irq(priv->eq_table.eq[vec].irq,
  952. &priv->eq_table.eq[vec]);
  953. priv->msix_ctl.pool_bm &= ~(1ULL << i);
  954. }
  955. mutex_unlock(&priv->msix_ctl.pool_lock);
  956. }
  957. }
  958. EXPORT_SYMBOL(mlx4_release_eq);