eq.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133
  1. /*
  2. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/init.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/slab.h>
  36. #include <linux/export.h>
  37. #include <linux/mm.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/mlx4/cmd.h>
  40. #include <linux/cpu_rmap.h>
  41. #include "mlx4.h"
  42. #include "fw.h"
  43. enum {
  44. MLX4_IRQNAME_SIZE = 32
  45. };
  46. enum {
  47. MLX4_NUM_ASYNC_EQE = 0x100,
  48. MLX4_NUM_SPARE_EQE = 0x80,
  49. MLX4_EQ_ENTRY_SIZE = 0x20
  50. };
  51. #define MLX4_EQ_STATUS_OK ( 0 << 28)
  52. #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
  53. #define MLX4_EQ_OWNER_SW ( 0 << 24)
  54. #define MLX4_EQ_OWNER_HW ( 1 << 24)
  55. #define MLX4_EQ_FLAG_EC ( 1 << 18)
  56. #define MLX4_EQ_FLAG_OI ( 1 << 17)
  57. #define MLX4_EQ_STATE_ARMED ( 9 << 8)
  58. #define MLX4_EQ_STATE_FIRED (10 << 8)
  59. #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
  60. #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
  61. (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
  62. (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
  63. (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
  64. (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
  65. (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
  66. (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
  67. (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  68. (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
  69. (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
  70. (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
  71. (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
  72. (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
  73. (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
  74. (1ull << MLX4_EVENT_TYPE_CMD) | \
  75. (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
  76. (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
  77. (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
  78. static void eq_set_ci(struct mlx4_eq *eq, int req_not)
  79. {
  80. __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
  81. req_not << 31),
  82. eq->doorbell);
  83. /* We still want ordering, just not swabbing, so add a barrier */
  84. mb();
  85. }
  86. static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
  87. {
  88. unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
  89. return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
  90. }
  91. static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
  92. {
  93. struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
  94. return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
  95. }
  96. static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
  97. {
  98. struct mlx4_eqe *eqe =
  99. &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
  100. return (!!(eqe->owner & 0x80) ^
  101. !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
  102. eqe : NULL;
  103. }
  104. void mlx4_gen_slave_eqe(struct work_struct *work)
  105. {
  106. struct mlx4_mfunc_master_ctx *master =
  107. container_of(work, struct mlx4_mfunc_master_ctx,
  108. slave_event_work);
  109. struct mlx4_mfunc *mfunc =
  110. container_of(master, struct mlx4_mfunc, master);
  111. struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
  112. struct mlx4_dev *dev = &priv->dev;
  113. struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
  114. struct mlx4_eqe *eqe;
  115. u8 slave;
  116. int i;
  117. for (eqe = next_slave_event_eqe(slave_eq); eqe;
  118. eqe = next_slave_event_eqe(slave_eq)) {
  119. slave = eqe->slave_id;
  120. /* All active slaves need to receive the event */
  121. if (slave == ALL_SLAVES) {
  122. for (i = 0; i < dev->num_slaves; i++) {
  123. if (i != dev->caps.function &&
  124. master->slave_state[i].active)
  125. if (mlx4_GEN_EQE(dev, i, eqe))
  126. mlx4_warn(dev, "Failed to "
  127. " generate event "
  128. "for slave %d\n", i);
  129. }
  130. } else {
  131. if (mlx4_GEN_EQE(dev, slave, eqe))
  132. mlx4_warn(dev, "Failed to generate event "
  133. "for slave %d\n", slave);
  134. }
  135. ++slave_eq->cons;
  136. }
  137. }
  138. static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
  139. {
  140. struct mlx4_priv *priv = mlx4_priv(dev);
  141. struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
  142. struct mlx4_eqe *s_eqe =
  143. &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
  144. if ((!!(s_eqe->owner & 0x80)) ^
  145. (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
  146. mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
  147. "No free EQE on slave events queue\n", slave);
  148. return;
  149. }
  150. memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
  151. s_eqe->slave_id = slave;
  152. /* ensure all information is written before setting the ownersip bit */
  153. wmb();
  154. s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
  155. ++slave_eq->prod;
  156. queue_work(priv->mfunc.master.comm_wq,
  157. &priv->mfunc.master.slave_event_work);
  158. }
  159. static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
  160. struct mlx4_eqe *eqe)
  161. {
  162. struct mlx4_priv *priv = mlx4_priv(dev);
  163. struct mlx4_slave_state *s_slave =
  164. &priv->mfunc.master.slave_state[slave];
  165. if (!s_slave->active) {
  166. /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
  167. return;
  168. }
  169. slave_event(dev, slave, eqe);
  170. }
  171. void mlx4_master_handle_slave_flr(struct work_struct *work)
  172. {
  173. struct mlx4_mfunc_master_ctx *master =
  174. container_of(work, struct mlx4_mfunc_master_ctx,
  175. slave_flr_event_work);
  176. struct mlx4_mfunc *mfunc =
  177. container_of(master, struct mlx4_mfunc, master);
  178. struct mlx4_priv *priv =
  179. container_of(mfunc, struct mlx4_priv, mfunc);
  180. struct mlx4_dev *dev = &priv->dev;
  181. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  182. int i;
  183. int err;
  184. mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
  185. for (i = 0 ; i < dev->num_slaves; i++) {
  186. if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
  187. mlx4_dbg(dev, "mlx4_handle_slave_flr: "
  188. "clean slave: %d\n", i);
  189. mlx4_delete_all_resources_for_slave(dev, i);
  190. /*return the slave to running mode*/
  191. spin_lock(&priv->mfunc.master.slave_state_lock);
  192. slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
  193. slave_state[i].is_slave_going_down = 0;
  194. spin_unlock(&priv->mfunc.master.slave_state_lock);
  195. /*notify the FW:*/
  196. err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
  197. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  198. if (err)
  199. mlx4_warn(dev, "Failed to notify FW on "
  200. "FLR done (slave:%d)\n", i);
  201. }
  202. }
  203. }
  204. static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
  205. {
  206. struct mlx4_priv *priv = mlx4_priv(dev);
  207. struct mlx4_eqe *eqe;
  208. int cqn;
  209. int eqes_found = 0;
  210. int set_ci = 0;
  211. int port;
  212. int slave = 0;
  213. int ret;
  214. u32 flr_slave;
  215. u8 update_slave_state;
  216. int i;
  217. while ((eqe = next_eqe_sw(eq))) {
  218. /*
  219. * Make sure we read EQ entry contents after we've
  220. * checked the ownership bit.
  221. */
  222. rmb();
  223. switch (eqe->type) {
  224. case MLX4_EVENT_TYPE_COMP:
  225. cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
  226. mlx4_cq_completion(dev, cqn);
  227. break;
  228. case MLX4_EVENT_TYPE_PATH_MIG:
  229. case MLX4_EVENT_TYPE_COMM_EST:
  230. case MLX4_EVENT_TYPE_SQ_DRAINED:
  231. case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
  232. case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
  233. case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
  234. case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  235. case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
  236. mlx4_dbg(dev, "event %d arrived\n", eqe->type);
  237. if (mlx4_is_master(dev)) {
  238. /* forward only to slave owning the QP */
  239. ret = mlx4_get_slave_from_resource_id(dev,
  240. RES_QP,
  241. be32_to_cpu(eqe->event.qp.qpn)
  242. & 0xffffff, &slave);
  243. if (ret && ret != -ENOENT) {
  244. mlx4_dbg(dev, "QP event %02x(%02x) on "
  245. "EQ %d at index %u: could "
  246. "not get slave id (%d)\n",
  247. eqe->type, eqe->subtype,
  248. eq->eqn, eq->cons_index, ret);
  249. break;
  250. }
  251. if (!ret && slave != dev->caps.function) {
  252. mlx4_slave_event(dev, slave, eqe);
  253. break;
  254. }
  255. }
  256. mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
  257. 0xffffff, eqe->type);
  258. break;
  259. case MLX4_EVENT_TYPE_SRQ_LIMIT:
  260. mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
  261. __func__);
  262. case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
  263. if (mlx4_is_master(dev)) {
  264. /* forward only to slave owning the SRQ */
  265. ret = mlx4_get_slave_from_resource_id(dev,
  266. RES_SRQ,
  267. be32_to_cpu(eqe->event.srq.srqn)
  268. & 0xffffff,
  269. &slave);
  270. if (ret && ret != -ENOENT) {
  271. mlx4_warn(dev, "SRQ event %02x(%02x) "
  272. "on EQ %d at index %u: could"
  273. " not get slave id (%d)\n",
  274. eqe->type, eqe->subtype,
  275. eq->eqn, eq->cons_index, ret);
  276. break;
  277. }
  278. mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
  279. " event: %02x(%02x)\n", __func__,
  280. slave,
  281. be32_to_cpu(eqe->event.srq.srqn),
  282. eqe->type, eqe->subtype);
  283. if (!ret && slave != dev->caps.function) {
  284. mlx4_warn(dev, "%s: sending event "
  285. "%02x(%02x) to slave:%d\n",
  286. __func__, eqe->type,
  287. eqe->subtype, slave);
  288. mlx4_slave_event(dev, slave, eqe);
  289. break;
  290. }
  291. }
  292. mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
  293. 0xffffff, eqe->type);
  294. break;
  295. case MLX4_EVENT_TYPE_CMD:
  296. mlx4_cmd_event(dev,
  297. be16_to_cpu(eqe->event.cmd.token),
  298. eqe->event.cmd.status,
  299. be64_to_cpu(eqe->event.cmd.out_param));
  300. break;
  301. case MLX4_EVENT_TYPE_PORT_CHANGE:
  302. port = be32_to_cpu(eqe->event.port_change.port) >> 28;
  303. if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
  304. mlx4_dispatch_event(dev,
  305. MLX4_DEV_EVENT_PORT_DOWN,
  306. port);
  307. mlx4_priv(dev)->sense.do_sense_port[port] = 1;
  308. if (mlx4_is_master(dev))
  309. /*change the state of all slave's port
  310. * to down:*/
  311. for (i = 0; i < dev->num_slaves; i++) {
  312. mlx4_dbg(dev, "%s: Sending "
  313. "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
  314. " to slave: %d, port:%d\n",
  315. __func__, i, port);
  316. if (i == dev->caps.function)
  317. continue;
  318. mlx4_slave_event(dev, i, eqe);
  319. }
  320. } else {
  321. mlx4_dispatch_event(dev,
  322. MLX4_DEV_EVENT_PORT_UP,
  323. port);
  324. mlx4_priv(dev)->sense.do_sense_port[port] = 0;
  325. if (mlx4_is_master(dev)) {
  326. for (i = 0; i < dev->num_slaves; i++) {
  327. if (i == dev->caps.function)
  328. continue;
  329. mlx4_slave_event(dev, i, eqe);
  330. }
  331. }
  332. }
  333. break;
  334. case MLX4_EVENT_TYPE_CQ_ERROR:
  335. mlx4_warn(dev, "CQ %s on CQN %06x\n",
  336. eqe->event.cq_err.syndrome == 1 ?
  337. "overrun" : "access violation",
  338. be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
  339. if (mlx4_is_master(dev)) {
  340. ret = mlx4_get_slave_from_resource_id(dev,
  341. RES_CQ,
  342. be32_to_cpu(eqe->event.cq_err.cqn)
  343. & 0xffffff, &slave);
  344. if (ret && ret != -ENOENT) {
  345. mlx4_dbg(dev, "CQ event %02x(%02x) on "
  346. "EQ %d at index %u: could "
  347. "not get slave id (%d)\n",
  348. eqe->type, eqe->subtype,
  349. eq->eqn, eq->cons_index, ret);
  350. break;
  351. }
  352. if (!ret && slave != dev->caps.function) {
  353. mlx4_slave_event(dev, slave, eqe);
  354. break;
  355. }
  356. }
  357. mlx4_cq_event(dev,
  358. be32_to_cpu(eqe->event.cq_err.cqn)
  359. & 0xffffff,
  360. eqe->type);
  361. break;
  362. case MLX4_EVENT_TYPE_EQ_OVERFLOW:
  363. mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
  364. break;
  365. case MLX4_EVENT_TYPE_COMM_CHANNEL:
  366. if (!mlx4_is_master(dev)) {
  367. mlx4_warn(dev, "Received comm channel event "
  368. "for non master device\n");
  369. break;
  370. }
  371. memcpy(&priv->mfunc.master.comm_arm_bit_vector,
  372. eqe->event.comm_channel_arm.bit_vec,
  373. sizeof eqe->event.comm_channel_arm.bit_vec);
  374. queue_work(priv->mfunc.master.comm_wq,
  375. &priv->mfunc.master.comm_work);
  376. break;
  377. case MLX4_EVENT_TYPE_FLR_EVENT:
  378. flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
  379. if (!mlx4_is_master(dev)) {
  380. mlx4_warn(dev, "Non-master function received"
  381. "FLR event\n");
  382. break;
  383. }
  384. mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
  385. if (flr_slave >= dev->num_slaves) {
  386. mlx4_warn(dev,
  387. "Got FLR for unknown function: %d\n",
  388. flr_slave);
  389. update_slave_state = 0;
  390. } else
  391. update_slave_state = 1;
  392. spin_lock(&priv->mfunc.master.slave_state_lock);
  393. if (update_slave_state) {
  394. priv->mfunc.master.slave_state[flr_slave].active = false;
  395. priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
  396. priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
  397. }
  398. spin_unlock(&priv->mfunc.master.slave_state_lock);
  399. queue_work(priv->mfunc.master.comm_wq,
  400. &priv->mfunc.master.slave_flr_event_work);
  401. break;
  402. case MLX4_EVENT_TYPE_FATAL_WARNING:
  403. if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
  404. if (mlx4_is_master(dev))
  405. for (i = 0; i < dev->num_slaves; i++) {
  406. mlx4_dbg(dev, "%s: Sending "
  407. "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
  408. " to slave: %d\n", __func__, i);
  409. if (i == dev->caps.function)
  410. continue;
  411. mlx4_slave_event(dev, i, eqe);
  412. }
  413. mlx4_err(dev, "Temperature Threshold was reached! "
  414. "Threshold: %d celsius degrees; "
  415. "Current Temperature: %d\n",
  416. be16_to_cpu(eqe->event.warming.warning_threshold),
  417. be16_to_cpu(eqe->event.warming.current_temperature));
  418. } else
  419. mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
  420. "subtype %02x on EQ %d at index %u. owner=%x, "
  421. "nent=0x%x, slave=%x, ownership=%s\n",
  422. eqe->type, eqe->subtype, eq->eqn,
  423. eq->cons_index, eqe->owner, eq->nent,
  424. eqe->slave_id,
  425. !!(eqe->owner & 0x80) ^
  426. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  427. break;
  428. case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
  429. case MLX4_EVENT_TYPE_ECC_DETECT:
  430. default:
  431. mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
  432. "index %u. owner=%x, nent=0x%x, slave=%x, "
  433. "ownership=%s\n",
  434. eqe->type, eqe->subtype, eq->eqn,
  435. eq->cons_index, eqe->owner, eq->nent,
  436. eqe->slave_id,
  437. !!(eqe->owner & 0x80) ^
  438. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  439. break;
  440. };
  441. ++eq->cons_index;
  442. eqes_found = 1;
  443. ++set_ci;
  444. /*
  445. * The HCA will think the queue has overflowed if we
  446. * don't tell it we've been processing events. We
  447. * create our EQs with MLX4_NUM_SPARE_EQE extra
  448. * entries, so we must update our consumer index at
  449. * least that often.
  450. */
  451. if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
  452. eq_set_ci(eq, 0);
  453. set_ci = 0;
  454. }
  455. }
  456. eq_set_ci(eq, 1);
  457. return eqes_found;
  458. }
  459. static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
  460. {
  461. struct mlx4_dev *dev = dev_ptr;
  462. struct mlx4_priv *priv = mlx4_priv(dev);
  463. int work = 0;
  464. int i;
  465. writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
  466. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  467. work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
  468. return IRQ_RETVAL(work);
  469. }
  470. static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
  471. {
  472. struct mlx4_eq *eq = eq_ptr;
  473. struct mlx4_dev *dev = eq->dev;
  474. mlx4_eq_int(dev, eq);
  475. /* MSI-X vectors always belong to us */
  476. return IRQ_HANDLED;
  477. }
  478. int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
  479. struct mlx4_vhcr *vhcr,
  480. struct mlx4_cmd_mailbox *inbox,
  481. struct mlx4_cmd_mailbox *outbox,
  482. struct mlx4_cmd_info *cmd)
  483. {
  484. struct mlx4_priv *priv = mlx4_priv(dev);
  485. struct mlx4_slave_event_eq_info *event_eq =
  486. priv->mfunc.master.slave_state[slave].event_eq;
  487. u32 in_modifier = vhcr->in_modifier;
  488. u32 eqn = in_modifier & 0x1FF;
  489. u64 in_param = vhcr->in_param;
  490. int err = 0;
  491. int i;
  492. if (slave == dev->caps.function)
  493. err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
  494. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  495. MLX4_CMD_NATIVE);
  496. if (!err)
  497. for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
  498. if (in_param & (1LL << i))
  499. event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
  500. return err;
  501. }
  502. static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
  503. int eq_num)
  504. {
  505. return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
  506. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  507. MLX4_CMD_WRAPPED);
  508. }
  509. static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  510. int eq_num)
  511. {
  512. return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
  513. MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
  514. MLX4_CMD_WRAPPED);
  515. }
  516. static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  517. int eq_num)
  518. {
  519. return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
  520. 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
  521. MLX4_CMD_WRAPPED);
  522. }
  523. static int mlx4_num_eq_uar(struct mlx4_dev *dev)
  524. {
  525. /*
  526. * Each UAR holds 4 EQ doorbells. To figure out how many UARs
  527. * we need to map, take the difference of highest index and
  528. * the lowest index we'll use and add 1.
  529. */
  530. return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
  531. dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
  532. }
  533. static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
  534. {
  535. struct mlx4_priv *priv = mlx4_priv(dev);
  536. int index;
  537. index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
  538. if (!priv->eq_table.uar_map[index]) {
  539. priv->eq_table.uar_map[index] =
  540. ioremap(pci_resource_start(dev->pdev, 2) +
  541. ((eq->eqn / 4) << PAGE_SHIFT),
  542. PAGE_SIZE);
  543. if (!priv->eq_table.uar_map[index]) {
  544. mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
  545. eq->eqn);
  546. return NULL;
  547. }
  548. }
  549. return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
  550. }
  551. static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
  552. u8 intr, struct mlx4_eq *eq)
  553. {
  554. struct mlx4_priv *priv = mlx4_priv(dev);
  555. struct mlx4_cmd_mailbox *mailbox;
  556. struct mlx4_eq_context *eq_context;
  557. int npages;
  558. u64 *dma_list = NULL;
  559. dma_addr_t t;
  560. u64 mtt_addr;
  561. int err = -ENOMEM;
  562. int i;
  563. eq->dev = dev;
  564. eq->nent = roundup_pow_of_two(max(nent, 2));
  565. npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
  566. eq->page_list = kmalloc(npages * sizeof *eq->page_list,
  567. GFP_KERNEL);
  568. if (!eq->page_list)
  569. goto err_out;
  570. for (i = 0; i < npages; ++i)
  571. eq->page_list[i].buf = NULL;
  572. dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
  573. if (!dma_list)
  574. goto err_out_free;
  575. mailbox = mlx4_alloc_cmd_mailbox(dev);
  576. if (IS_ERR(mailbox))
  577. goto err_out_free;
  578. eq_context = mailbox->buf;
  579. for (i = 0; i < npages; ++i) {
  580. eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
  581. PAGE_SIZE, &t, GFP_KERNEL);
  582. if (!eq->page_list[i].buf)
  583. goto err_out_free_pages;
  584. dma_list[i] = t;
  585. eq->page_list[i].map = t;
  586. memset(eq->page_list[i].buf, 0, PAGE_SIZE);
  587. }
  588. eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
  589. if (eq->eqn == -1)
  590. goto err_out_free_pages;
  591. eq->doorbell = mlx4_get_eq_uar(dev, eq);
  592. if (!eq->doorbell) {
  593. err = -ENOMEM;
  594. goto err_out_free_eq;
  595. }
  596. err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
  597. if (err)
  598. goto err_out_free_eq;
  599. err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
  600. if (err)
  601. goto err_out_free_mtt;
  602. memset(eq_context, 0, sizeof *eq_context);
  603. eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
  604. MLX4_EQ_STATE_ARMED);
  605. eq_context->log_eq_size = ilog2(eq->nent);
  606. eq_context->intr = intr;
  607. eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
  608. mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
  609. eq_context->mtt_base_addr_h = mtt_addr >> 32;
  610. eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
  611. err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
  612. if (err) {
  613. mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
  614. goto err_out_free_mtt;
  615. }
  616. kfree(dma_list);
  617. mlx4_free_cmd_mailbox(dev, mailbox);
  618. eq->cons_index = 0;
  619. return err;
  620. err_out_free_mtt:
  621. mlx4_mtt_cleanup(dev, &eq->mtt);
  622. err_out_free_eq:
  623. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  624. err_out_free_pages:
  625. for (i = 0; i < npages; ++i)
  626. if (eq->page_list[i].buf)
  627. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  628. eq->page_list[i].buf,
  629. eq->page_list[i].map);
  630. mlx4_free_cmd_mailbox(dev, mailbox);
  631. err_out_free:
  632. kfree(eq->page_list);
  633. kfree(dma_list);
  634. err_out:
  635. return err;
  636. }
  637. static void mlx4_free_eq(struct mlx4_dev *dev,
  638. struct mlx4_eq *eq)
  639. {
  640. struct mlx4_priv *priv = mlx4_priv(dev);
  641. struct mlx4_cmd_mailbox *mailbox;
  642. int err;
  643. int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
  644. int i;
  645. mailbox = mlx4_alloc_cmd_mailbox(dev);
  646. if (IS_ERR(mailbox))
  647. return;
  648. err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
  649. if (err)
  650. mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
  651. if (0) {
  652. mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
  653. for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
  654. if (i % 4 == 0)
  655. pr_cont("[%02x] ", i * 4);
  656. pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
  657. if ((i + 1) % 4 == 0)
  658. pr_cont("\n");
  659. }
  660. }
  661. mlx4_mtt_cleanup(dev, &eq->mtt);
  662. for (i = 0; i < npages; ++i)
  663. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  664. eq->page_list[i].buf,
  665. eq->page_list[i].map);
  666. kfree(eq->page_list);
  667. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  668. mlx4_free_cmd_mailbox(dev, mailbox);
  669. }
  670. static void mlx4_free_irqs(struct mlx4_dev *dev)
  671. {
  672. struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
  673. struct mlx4_priv *priv = mlx4_priv(dev);
  674. int i, vec;
  675. if (eq_table->have_irq)
  676. free_irq(dev->pdev->irq, dev);
  677. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  678. if (eq_table->eq[i].have_irq) {
  679. free_irq(eq_table->eq[i].irq, eq_table->eq + i);
  680. eq_table->eq[i].have_irq = 0;
  681. }
  682. for (i = 0; i < dev->caps.comp_pool; i++) {
  683. /*
  684. * Freeing the assigned irq's
  685. * all bits should be 0, but we need to validate
  686. */
  687. if (priv->msix_ctl.pool_bm & 1ULL << i) {
  688. /* NO need protecting*/
  689. vec = dev->caps.num_comp_vectors + 1 + i;
  690. free_irq(priv->eq_table.eq[vec].irq,
  691. &priv->eq_table.eq[vec]);
  692. }
  693. }
  694. kfree(eq_table->irq_names);
  695. }
  696. static int mlx4_map_clr_int(struct mlx4_dev *dev)
  697. {
  698. struct mlx4_priv *priv = mlx4_priv(dev);
  699. priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
  700. priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
  701. if (!priv->clr_base) {
  702. mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
  703. return -ENOMEM;
  704. }
  705. return 0;
  706. }
  707. static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
  708. {
  709. struct mlx4_priv *priv = mlx4_priv(dev);
  710. iounmap(priv->clr_base);
  711. }
  712. int mlx4_alloc_eq_table(struct mlx4_dev *dev)
  713. {
  714. struct mlx4_priv *priv = mlx4_priv(dev);
  715. priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
  716. sizeof *priv->eq_table.eq, GFP_KERNEL);
  717. if (!priv->eq_table.eq)
  718. return -ENOMEM;
  719. return 0;
  720. }
  721. void mlx4_free_eq_table(struct mlx4_dev *dev)
  722. {
  723. kfree(mlx4_priv(dev)->eq_table.eq);
  724. }
  725. int mlx4_init_eq_table(struct mlx4_dev *dev)
  726. {
  727. struct mlx4_priv *priv = mlx4_priv(dev);
  728. int err;
  729. int i;
  730. priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
  731. sizeof *priv->eq_table.uar_map,
  732. GFP_KERNEL);
  733. if (!priv->eq_table.uar_map) {
  734. err = -ENOMEM;
  735. goto err_out_free;
  736. }
  737. err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
  738. dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
  739. if (err)
  740. goto err_out_free;
  741. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  742. priv->eq_table.uar_map[i] = NULL;
  743. if (!mlx4_is_slave(dev)) {
  744. err = mlx4_map_clr_int(dev);
  745. if (err)
  746. goto err_out_bitmap;
  747. priv->eq_table.clr_mask =
  748. swab32(1 << (priv->eq_table.inta_pin & 31));
  749. priv->eq_table.clr_int = priv->clr_base +
  750. (priv->eq_table.inta_pin < 32 ? 4 : 0);
  751. }
  752. priv->eq_table.irq_names =
  753. kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
  754. dev->caps.comp_pool),
  755. GFP_KERNEL);
  756. if (!priv->eq_table.irq_names) {
  757. err = -ENOMEM;
  758. goto err_out_bitmap;
  759. }
  760. for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
  761. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  762. dev->caps.reserved_cqs +
  763. MLX4_NUM_SPARE_EQE,
  764. (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
  765. &priv->eq_table.eq[i]);
  766. if (err) {
  767. --i;
  768. goto err_out_unmap;
  769. }
  770. }
  771. err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
  772. (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
  773. &priv->eq_table.eq[dev->caps.num_comp_vectors]);
  774. if (err)
  775. goto err_out_comp;
  776. /*if additional completion vectors poolsize is 0 this loop will not run*/
  777. for (i = dev->caps.num_comp_vectors + 1;
  778. i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
  779. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  780. dev->caps.reserved_cqs +
  781. MLX4_NUM_SPARE_EQE,
  782. (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
  783. &priv->eq_table.eq[i]);
  784. if (err) {
  785. --i;
  786. goto err_out_unmap;
  787. }
  788. }
  789. if (dev->flags & MLX4_FLAG_MSI_X) {
  790. const char *eq_name;
  791. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
  792. if (i < dev->caps.num_comp_vectors) {
  793. snprintf(priv->eq_table.irq_names +
  794. i * MLX4_IRQNAME_SIZE,
  795. MLX4_IRQNAME_SIZE,
  796. "mlx4-comp-%d@pci:%s", i,
  797. pci_name(dev->pdev));
  798. } else {
  799. snprintf(priv->eq_table.irq_names +
  800. i * MLX4_IRQNAME_SIZE,
  801. MLX4_IRQNAME_SIZE,
  802. "mlx4-async@pci:%s",
  803. pci_name(dev->pdev));
  804. }
  805. eq_name = priv->eq_table.irq_names +
  806. i * MLX4_IRQNAME_SIZE;
  807. err = request_irq(priv->eq_table.eq[i].irq,
  808. mlx4_msi_x_interrupt, 0, eq_name,
  809. priv->eq_table.eq + i);
  810. if (err)
  811. goto err_out_async;
  812. priv->eq_table.eq[i].have_irq = 1;
  813. }
  814. } else {
  815. snprintf(priv->eq_table.irq_names,
  816. MLX4_IRQNAME_SIZE,
  817. DRV_NAME "@pci:%s",
  818. pci_name(dev->pdev));
  819. err = request_irq(dev->pdev->irq, mlx4_interrupt,
  820. IRQF_SHARED, priv->eq_table.irq_names, dev);
  821. if (err)
  822. goto err_out_async;
  823. priv->eq_table.have_irq = 1;
  824. }
  825. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  826. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  827. if (err)
  828. mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
  829. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
  830. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  831. eq_set_ci(&priv->eq_table.eq[i], 1);
  832. return 0;
  833. err_out_async:
  834. mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
  835. err_out_comp:
  836. i = dev->caps.num_comp_vectors - 1;
  837. err_out_unmap:
  838. while (i >= 0) {
  839. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  840. --i;
  841. }
  842. if (!mlx4_is_slave(dev))
  843. mlx4_unmap_clr_int(dev);
  844. mlx4_free_irqs(dev);
  845. err_out_bitmap:
  846. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  847. err_out_free:
  848. kfree(priv->eq_table.uar_map);
  849. return err;
  850. }
  851. void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
  852. {
  853. struct mlx4_priv *priv = mlx4_priv(dev);
  854. int i;
  855. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
  856. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  857. mlx4_free_irqs(dev);
  858. for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
  859. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  860. if (!mlx4_is_slave(dev))
  861. mlx4_unmap_clr_int(dev);
  862. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  863. if (priv->eq_table.uar_map[i])
  864. iounmap(priv->eq_table.uar_map[i]);
  865. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  866. kfree(priv->eq_table.uar_map);
  867. }
  868. /* A test that verifies that we can accept interrupts on all
  869. * the irq vectors of the device.
  870. * Interrupts are checked using the NOP command.
  871. */
  872. int mlx4_test_interrupts(struct mlx4_dev *dev)
  873. {
  874. struct mlx4_priv *priv = mlx4_priv(dev);
  875. int i;
  876. int err;
  877. err = mlx4_NOP(dev);
  878. /* When not in MSI_X, there is only one irq to check */
  879. if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
  880. return err;
  881. /* A loop over all completion vectors, for each vector we will check
  882. * whether it works by mapping command completions to that vector
  883. * and performing a NOP command
  884. */
  885. for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
  886. /* Temporary use polling for command completions */
  887. mlx4_cmd_use_polling(dev);
  888. /* Map the new eq to handle all asyncronous events */
  889. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  890. priv->eq_table.eq[i].eqn);
  891. if (err) {
  892. mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
  893. mlx4_cmd_use_events(dev);
  894. break;
  895. }
  896. /* Go back to using events */
  897. mlx4_cmd_use_events(dev);
  898. err = mlx4_NOP(dev);
  899. }
  900. /* Return to default */
  901. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  902. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  903. return err;
  904. }
  905. EXPORT_SYMBOL(mlx4_test_interrupts);
  906. int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
  907. int *vector)
  908. {
  909. struct mlx4_priv *priv = mlx4_priv(dev);
  910. int vec = 0, err = 0, i;
  911. mutex_lock(&priv->msix_ctl.pool_lock);
  912. for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
  913. if (~priv->msix_ctl.pool_bm & 1ULL << i) {
  914. priv->msix_ctl.pool_bm |= 1ULL << i;
  915. vec = dev->caps.num_comp_vectors + 1 + i;
  916. snprintf(priv->eq_table.irq_names +
  917. vec * MLX4_IRQNAME_SIZE,
  918. MLX4_IRQNAME_SIZE, "%s", name);
  919. #ifdef CONFIG_RFS_ACCEL
  920. if (rmap) {
  921. err = irq_cpu_rmap_add(rmap,
  922. priv->eq_table.eq[vec].irq);
  923. if (err)
  924. mlx4_warn(dev, "Failed adding irq rmap\n");
  925. }
  926. #endif
  927. err = request_irq(priv->eq_table.eq[vec].irq,
  928. mlx4_msi_x_interrupt, 0,
  929. &priv->eq_table.irq_names[vec<<5],
  930. priv->eq_table.eq + vec);
  931. if (err) {
  932. /*zero out bit by fliping it*/
  933. priv->msix_ctl.pool_bm ^= 1 << i;
  934. vec = 0;
  935. continue;
  936. /*we dont want to break here*/
  937. }
  938. eq_set_ci(&priv->eq_table.eq[vec], 1);
  939. }
  940. }
  941. mutex_unlock(&priv->msix_ctl.pool_lock);
  942. if (vec) {
  943. *vector = vec;
  944. } else {
  945. *vector = 0;
  946. err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
  947. }
  948. return err;
  949. }
  950. EXPORT_SYMBOL(mlx4_assign_eq);
  951. void mlx4_release_eq(struct mlx4_dev *dev, int vec)
  952. {
  953. struct mlx4_priv *priv = mlx4_priv(dev);
  954. /*bm index*/
  955. int i = vec - dev->caps.num_comp_vectors - 1;
  956. if (likely(i >= 0)) {
  957. /*sanity check , making sure were not trying to free irq's
  958. Belonging to a legacy EQ*/
  959. mutex_lock(&priv->msix_ctl.pool_lock);
  960. if (priv->msix_ctl.pool_bm & 1ULL << i) {
  961. free_irq(priv->eq_table.eq[vec].irq,
  962. &priv->eq_table.eq[vec]);
  963. priv->msix_ctl.pool_bm &= ~(1ULL << i);
  964. }
  965. mutex_unlock(&priv->msix_ctl.pool_lock);
  966. }
  967. }
  968. EXPORT_SYMBOL(mlx4_release_eq);