eq.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095
  1. /*
  2. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/init.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/slab.h>
  36. #include <linux/export.h>
  37. #include <linux/mm.h>
  38. #include <linux/dma-mapping.h>
  39. #include <linux/mlx4/cmd.h>
  40. #include "mlx4.h"
  41. #include "fw.h"
  42. enum {
  43. MLX4_IRQNAME_SIZE = 32
  44. };
  45. enum {
  46. MLX4_NUM_ASYNC_EQE = 0x100,
  47. MLX4_NUM_SPARE_EQE = 0x80,
  48. MLX4_EQ_ENTRY_SIZE = 0x20
  49. };
  50. #define MLX4_EQ_STATUS_OK ( 0 << 28)
  51. #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
  52. #define MLX4_EQ_OWNER_SW ( 0 << 24)
  53. #define MLX4_EQ_OWNER_HW ( 1 << 24)
  54. #define MLX4_EQ_FLAG_EC ( 1 << 18)
  55. #define MLX4_EQ_FLAG_OI ( 1 << 17)
  56. #define MLX4_EQ_STATE_ARMED ( 9 << 8)
  57. #define MLX4_EQ_STATE_FIRED (10 << 8)
  58. #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
  59. #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
  60. (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
  61. (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
  62. (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
  63. (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
  64. (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
  65. (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
  66. (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  67. (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
  68. (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
  69. (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
  70. (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
  71. (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
  72. (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
  73. (1ull << MLX4_EVENT_TYPE_CMD) | \
  74. (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
  75. (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
  76. static void eq_set_ci(struct mlx4_eq *eq, int req_not)
  77. {
  78. __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
  79. req_not << 31),
  80. eq->doorbell);
  81. /* We still want ordering, just not swabbing, so add a barrier */
  82. mb();
  83. }
  84. static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
  85. {
  86. unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
  87. return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
  88. }
  89. static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
  90. {
  91. struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
  92. return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
  93. }
  94. static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
  95. {
  96. struct mlx4_eqe *eqe =
  97. &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
  98. return (!!(eqe->owner & 0x80) ^
  99. !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
  100. eqe : NULL;
  101. }
  102. void mlx4_gen_slave_eqe(struct work_struct *work)
  103. {
  104. struct mlx4_mfunc_master_ctx *master =
  105. container_of(work, struct mlx4_mfunc_master_ctx,
  106. slave_event_work);
  107. struct mlx4_mfunc *mfunc =
  108. container_of(master, struct mlx4_mfunc, master);
  109. struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
  110. struct mlx4_dev *dev = &priv->dev;
  111. struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
  112. struct mlx4_eqe *eqe;
  113. u8 slave;
  114. int i;
  115. for (eqe = next_slave_event_eqe(slave_eq); eqe;
  116. eqe = next_slave_event_eqe(slave_eq)) {
  117. slave = eqe->slave_id;
  118. /* All active slaves need to receive the event */
  119. if (slave == ALL_SLAVES) {
  120. for (i = 0; i < dev->num_slaves; i++) {
  121. if (i != dev->caps.function &&
  122. master->slave_state[i].active)
  123. if (mlx4_GEN_EQE(dev, i, eqe))
  124. mlx4_warn(dev, "Failed to "
  125. " generate event "
  126. "for slave %d\n", i);
  127. }
  128. } else {
  129. if (mlx4_GEN_EQE(dev, slave, eqe))
  130. mlx4_warn(dev, "Failed to generate event "
  131. "for slave %d\n", slave);
  132. }
  133. ++slave_eq->cons;
  134. }
  135. }
  136. static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
  137. {
  138. struct mlx4_priv *priv = mlx4_priv(dev);
  139. struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
  140. struct mlx4_eqe *s_eqe =
  141. &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
  142. if ((!!(s_eqe->owner & 0x80)) ^
  143. (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
  144. mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
  145. "No free EQE on slave events queue\n", slave);
  146. return;
  147. }
  148. memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
  149. s_eqe->slave_id = slave;
  150. /* ensure all information is written before setting the ownersip bit */
  151. wmb();
  152. s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
  153. ++slave_eq->prod;
  154. queue_work(priv->mfunc.master.comm_wq,
  155. &priv->mfunc.master.slave_event_work);
  156. }
  157. static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
  158. struct mlx4_eqe *eqe)
  159. {
  160. struct mlx4_priv *priv = mlx4_priv(dev);
  161. struct mlx4_slave_state *s_slave =
  162. &priv->mfunc.master.slave_state[slave];
  163. if (!s_slave->active) {
  164. /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
  165. return;
  166. }
  167. slave_event(dev, slave, eqe);
  168. }
  169. void mlx4_master_handle_slave_flr(struct work_struct *work)
  170. {
  171. struct mlx4_mfunc_master_ctx *master =
  172. container_of(work, struct mlx4_mfunc_master_ctx,
  173. slave_flr_event_work);
  174. struct mlx4_mfunc *mfunc =
  175. container_of(master, struct mlx4_mfunc, master);
  176. struct mlx4_priv *priv =
  177. container_of(mfunc, struct mlx4_priv, mfunc);
  178. struct mlx4_dev *dev = &priv->dev;
  179. struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
  180. int i;
  181. int err;
  182. mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
  183. for (i = 0 ; i < dev->num_slaves; i++) {
  184. if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
  185. mlx4_dbg(dev, "mlx4_handle_slave_flr: "
  186. "clean slave: %d\n", i);
  187. mlx4_delete_all_resources_for_slave(dev, i);
  188. /*return the slave to running mode*/
  189. spin_lock(&priv->mfunc.master.slave_state_lock);
  190. slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
  191. slave_state[i].is_slave_going_down = 0;
  192. spin_unlock(&priv->mfunc.master.slave_state_lock);
  193. /*notify the FW:*/
  194. err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
  195. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  196. if (err)
  197. mlx4_warn(dev, "Failed to notify FW on "
  198. "FLR done (slave:%d)\n", i);
  199. }
  200. }
  201. }
  202. static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
  203. {
  204. struct mlx4_priv *priv = mlx4_priv(dev);
  205. struct mlx4_eqe *eqe;
  206. int cqn;
  207. int eqes_found = 0;
  208. int set_ci = 0;
  209. int port;
  210. int slave = 0;
  211. int ret;
  212. u32 flr_slave;
  213. u8 update_slave_state;
  214. int i;
  215. while ((eqe = next_eqe_sw(eq))) {
  216. /*
  217. * Make sure we read EQ entry contents after we've
  218. * checked the ownership bit.
  219. */
  220. rmb();
  221. switch (eqe->type) {
  222. case MLX4_EVENT_TYPE_COMP:
  223. cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
  224. mlx4_cq_completion(dev, cqn);
  225. break;
  226. case MLX4_EVENT_TYPE_PATH_MIG:
  227. case MLX4_EVENT_TYPE_COMM_EST:
  228. case MLX4_EVENT_TYPE_SQ_DRAINED:
  229. case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
  230. case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
  231. case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
  232. case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  233. case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
  234. mlx4_dbg(dev, "event %d arrived\n", eqe->type);
  235. if (mlx4_is_master(dev)) {
  236. /* forward only to slave owning the QP */
  237. ret = mlx4_get_slave_from_resource_id(dev,
  238. RES_QP,
  239. be32_to_cpu(eqe->event.qp.qpn)
  240. & 0xffffff, &slave);
  241. if (ret && ret != -ENOENT) {
  242. mlx4_dbg(dev, "QP event %02x(%02x) on "
  243. "EQ %d at index %u: could "
  244. "not get slave id (%d)\n",
  245. eqe->type, eqe->subtype,
  246. eq->eqn, eq->cons_index, ret);
  247. break;
  248. }
  249. if (!ret && slave != dev->caps.function) {
  250. mlx4_slave_event(dev, slave, eqe);
  251. break;
  252. }
  253. }
  254. mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
  255. 0xffffff, eqe->type);
  256. break;
  257. case MLX4_EVENT_TYPE_SRQ_LIMIT:
  258. mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
  259. __func__);
  260. case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
  261. if (mlx4_is_master(dev)) {
  262. /* forward only to slave owning the SRQ */
  263. ret = mlx4_get_slave_from_resource_id(dev,
  264. RES_SRQ,
  265. be32_to_cpu(eqe->event.srq.srqn)
  266. & 0xffffff,
  267. &slave);
  268. if (ret && ret != -ENOENT) {
  269. mlx4_warn(dev, "SRQ event %02x(%02x) "
  270. "on EQ %d at index %u: could"
  271. " not get slave id (%d)\n",
  272. eqe->type, eqe->subtype,
  273. eq->eqn, eq->cons_index, ret);
  274. break;
  275. }
  276. mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
  277. " event: %02x(%02x)\n", __func__,
  278. slave,
  279. be32_to_cpu(eqe->event.srq.srqn),
  280. eqe->type, eqe->subtype);
  281. if (!ret && slave != dev->caps.function) {
  282. mlx4_warn(dev, "%s: sending event "
  283. "%02x(%02x) to slave:%d\n",
  284. __func__, eqe->type,
  285. eqe->subtype, slave);
  286. mlx4_slave_event(dev, slave, eqe);
  287. break;
  288. }
  289. }
  290. mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
  291. 0xffffff, eqe->type);
  292. break;
  293. case MLX4_EVENT_TYPE_CMD:
  294. mlx4_cmd_event(dev,
  295. be16_to_cpu(eqe->event.cmd.token),
  296. eqe->event.cmd.status,
  297. be64_to_cpu(eqe->event.cmd.out_param));
  298. break;
  299. case MLX4_EVENT_TYPE_PORT_CHANGE:
  300. port = be32_to_cpu(eqe->event.port_change.port) >> 28;
  301. if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
  302. mlx4_dispatch_event(dev,
  303. MLX4_DEV_EVENT_PORT_DOWN,
  304. port);
  305. mlx4_priv(dev)->sense.do_sense_port[port] = 1;
  306. if (mlx4_is_master(dev))
  307. /*change the state of all slave's port
  308. * to down:*/
  309. for (i = 0; i < dev->num_slaves; i++) {
  310. mlx4_dbg(dev, "%s: Sending "
  311. "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
  312. " to slave: %d, port:%d\n",
  313. __func__, i, port);
  314. if (i == dev->caps.function)
  315. continue;
  316. mlx4_slave_event(dev, i, eqe);
  317. }
  318. } else {
  319. mlx4_dispatch_event(dev,
  320. MLX4_DEV_EVENT_PORT_UP,
  321. port);
  322. mlx4_priv(dev)->sense.do_sense_port[port] = 0;
  323. if (mlx4_is_master(dev)) {
  324. for (i = 0; i < dev->num_slaves; i++) {
  325. if (i == dev->caps.function)
  326. continue;
  327. mlx4_slave_event(dev, i, eqe);
  328. }
  329. }
  330. }
  331. break;
  332. case MLX4_EVENT_TYPE_CQ_ERROR:
  333. mlx4_warn(dev, "CQ %s on CQN %06x\n",
  334. eqe->event.cq_err.syndrome == 1 ?
  335. "overrun" : "access violation",
  336. be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
  337. if (mlx4_is_master(dev)) {
  338. ret = mlx4_get_slave_from_resource_id(dev,
  339. RES_CQ,
  340. be32_to_cpu(eqe->event.cq_err.cqn)
  341. & 0xffffff, &slave);
  342. if (ret && ret != -ENOENT) {
  343. mlx4_dbg(dev, "CQ event %02x(%02x) on "
  344. "EQ %d at index %u: could "
  345. "not get slave id (%d)\n",
  346. eqe->type, eqe->subtype,
  347. eq->eqn, eq->cons_index, ret);
  348. break;
  349. }
  350. if (!ret && slave != dev->caps.function) {
  351. mlx4_slave_event(dev, slave, eqe);
  352. break;
  353. }
  354. }
  355. mlx4_cq_event(dev,
  356. be32_to_cpu(eqe->event.cq_err.cqn)
  357. & 0xffffff,
  358. eqe->type);
  359. break;
  360. case MLX4_EVENT_TYPE_EQ_OVERFLOW:
  361. mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
  362. break;
  363. case MLX4_EVENT_TYPE_COMM_CHANNEL:
  364. if (!mlx4_is_master(dev)) {
  365. mlx4_warn(dev, "Received comm channel event "
  366. "for non master device\n");
  367. break;
  368. }
  369. memcpy(&priv->mfunc.master.comm_arm_bit_vector,
  370. eqe->event.comm_channel_arm.bit_vec,
  371. sizeof eqe->event.comm_channel_arm.bit_vec);
  372. queue_work(priv->mfunc.master.comm_wq,
  373. &priv->mfunc.master.comm_work);
  374. break;
  375. case MLX4_EVENT_TYPE_FLR_EVENT:
  376. flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
  377. if (!mlx4_is_master(dev)) {
  378. mlx4_warn(dev, "Non-master function received"
  379. "FLR event\n");
  380. break;
  381. }
  382. mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
  383. if (flr_slave > dev->num_slaves) {
  384. mlx4_warn(dev,
  385. "Got FLR for unknown function: %d\n",
  386. flr_slave);
  387. update_slave_state = 0;
  388. } else
  389. update_slave_state = 1;
  390. spin_lock(&priv->mfunc.master.slave_state_lock);
  391. if (update_slave_state) {
  392. priv->mfunc.master.slave_state[flr_slave].active = false;
  393. priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
  394. priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
  395. }
  396. spin_unlock(&priv->mfunc.master.slave_state_lock);
  397. queue_work(priv->mfunc.master.comm_wq,
  398. &priv->mfunc.master.slave_flr_event_work);
  399. break;
  400. case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
  401. case MLX4_EVENT_TYPE_ECC_DETECT:
  402. default:
  403. mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
  404. "index %u. owner=%x, nent=0x%x, slave=%x, "
  405. "ownership=%s\n",
  406. eqe->type, eqe->subtype, eq->eqn,
  407. eq->cons_index, eqe->owner, eq->nent,
  408. eqe->slave_id,
  409. !!(eqe->owner & 0x80) ^
  410. !!(eq->cons_index & eq->nent) ? "HW" : "SW");
  411. break;
  412. };
  413. ++eq->cons_index;
  414. eqes_found = 1;
  415. ++set_ci;
  416. /*
  417. * The HCA will think the queue has overflowed if we
  418. * don't tell it we've been processing events. We
  419. * create our EQs with MLX4_NUM_SPARE_EQE extra
  420. * entries, so we must update our consumer index at
  421. * least that often.
  422. */
  423. if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
  424. eq_set_ci(eq, 0);
  425. set_ci = 0;
  426. }
  427. }
  428. eq_set_ci(eq, 1);
  429. return eqes_found;
  430. }
  431. static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
  432. {
  433. struct mlx4_dev *dev = dev_ptr;
  434. struct mlx4_priv *priv = mlx4_priv(dev);
  435. int work = 0;
  436. int i;
  437. writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
  438. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  439. work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
  440. return IRQ_RETVAL(work);
  441. }
  442. static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
  443. {
  444. struct mlx4_eq *eq = eq_ptr;
  445. struct mlx4_dev *dev = eq->dev;
  446. mlx4_eq_int(dev, eq);
  447. /* MSI-X vectors always belong to us */
  448. return IRQ_HANDLED;
  449. }
  450. int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
  451. struct mlx4_vhcr *vhcr,
  452. struct mlx4_cmd_mailbox *inbox,
  453. struct mlx4_cmd_mailbox *outbox,
  454. struct mlx4_cmd_info *cmd)
  455. {
  456. struct mlx4_priv *priv = mlx4_priv(dev);
  457. struct mlx4_slave_event_eq_info *event_eq =
  458. &priv->mfunc.master.slave_state[slave].event_eq;
  459. u32 in_modifier = vhcr->in_modifier;
  460. u32 eqn = in_modifier & 0x1FF;
  461. u64 in_param = vhcr->in_param;
  462. int err = 0;
  463. if (slave == dev->caps.function)
  464. err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
  465. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  466. MLX4_CMD_NATIVE);
  467. if (!err) {
  468. if (in_modifier >> 31) {
  469. /* unmap */
  470. event_eq->event_type &= ~in_param;
  471. } else {
  472. event_eq->eqn = eqn;
  473. event_eq->event_type = in_param;
  474. }
  475. }
  476. return err;
  477. }
  478. static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
  479. int eq_num)
  480. {
  481. return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
  482. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
  483. MLX4_CMD_WRAPPED);
  484. }
  485. static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  486. int eq_num)
  487. {
  488. return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
  489. MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
  490. MLX4_CMD_WRAPPED);
  491. }
  492. static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  493. int eq_num)
  494. {
  495. return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
  496. 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
  497. MLX4_CMD_WRAPPED);
  498. }
  499. static int mlx4_num_eq_uar(struct mlx4_dev *dev)
  500. {
  501. /*
  502. * Each UAR holds 4 EQ doorbells. To figure out how many UARs
  503. * we need to map, take the difference of highest index and
  504. * the lowest index we'll use and add 1.
  505. */
  506. return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
  507. dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
  508. }
  509. static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
  510. {
  511. struct mlx4_priv *priv = mlx4_priv(dev);
  512. int index;
  513. index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
  514. if (!priv->eq_table.uar_map[index]) {
  515. priv->eq_table.uar_map[index] =
  516. ioremap(pci_resource_start(dev->pdev, 2) +
  517. ((eq->eqn / 4) << PAGE_SHIFT),
  518. PAGE_SIZE);
  519. if (!priv->eq_table.uar_map[index]) {
  520. mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
  521. eq->eqn);
  522. return NULL;
  523. }
  524. }
  525. return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
  526. }
  527. static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
  528. u8 intr, struct mlx4_eq *eq)
  529. {
  530. struct mlx4_priv *priv = mlx4_priv(dev);
  531. struct mlx4_cmd_mailbox *mailbox;
  532. struct mlx4_eq_context *eq_context;
  533. int npages;
  534. u64 *dma_list = NULL;
  535. dma_addr_t t;
  536. u64 mtt_addr;
  537. int err = -ENOMEM;
  538. int i;
  539. eq->dev = dev;
  540. eq->nent = roundup_pow_of_two(max(nent, 2));
  541. npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
  542. eq->page_list = kmalloc(npages * sizeof *eq->page_list,
  543. GFP_KERNEL);
  544. if (!eq->page_list)
  545. goto err_out;
  546. for (i = 0; i < npages; ++i)
  547. eq->page_list[i].buf = NULL;
  548. dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
  549. if (!dma_list)
  550. goto err_out_free;
  551. mailbox = mlx4_alloc_cmd_mailbox(dev);
  552. if (IS_ERR(mailbox))
  553. goto err_out_free;
  554. eq_context = mailbox->buf;
  555. for (i = 0; i < npages; ++i) {
  556. eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
  557. PAGE_SIZE, &t, GFP_KERNEL);
  558. if (!eq->page_list[i].buf)
  559. goto err_out_free_pages;
  560. dma_list[i] = t;
  561. eq->page_list[i].map = t;
  562. memset(eq->page_list[i].buf, 0, PAGE_SIZE);
  563. }
  564. eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
  565. if (eq->eqn == -1)
  566. goto err_out_free_pages;
  567. eq->doorbell = mlx4_get_eq_uar(dev, eq);
  568. if (!eq->doorbell) {
  569. err = -ENOMEM;
  570. goto err_out_free_eq;
  571. }
  572. err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
  573. if (err)
  574. goto err_out_free_eq;
  575. err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
  576. if (err)
  577. goto err_out_free_mtt;
  578. memset(eq_context, 0, sizeof *eq_context);
  579. eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
  580. MLX4_EQ_STATE_ARMED);
  581. eq_context->log_eq_size = ilog2(eq->nent);
  582. eq_context->intr = intr;
  583. eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
  584. mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
  585. eq_context->mtt_base_addr_h = mtt_addr >> 32;
  586. eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
  587. err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
  588. if (err) {
  589. mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
  590. goto err_out_free_mtt;
  591. }
  592. kfree(dma_list);
  593. mlx4_free_cmd_mailbox(dev, mailbox);
  594. eq->cons_index = 0;
  595. return err;
  596. err_out_free_mtt:
  597. mlx4_mtt_cleanup(dev, &eq->mtt);
  598. err_out_free_eq:
  599. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  600. err_out_free_pages:
  601. for (i = 0; i < npages; ++i)
  602. if (eq->page_list[i].buf)
  603. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  604. eq->page_list[i].buf,
  605. eq->page_list[i].map);
  606. mlx4_free_cmd_mailbox(dev, mailbox);
  607. err_out_free:
  608. kfree(eq->page_list);
  609. kfree(dma_list);
  610. err_out:
  611. return err;
  612. }
  613. static void mlx4_free_eq(struct mlx4_dev *dev,
  614. struct mlx4_eq *eq)
  615. {
  616. struct mlx4_priv *priv = mlx4_priv(dev);
  617. struct mlx4_cmd_mailbox *mailbox;
  618. int err;
  619. int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
  620. int i;
  621. mailbox = mlx4_alloc_cmd_mailbox(dev);
  622. if (IS_ERR(mailbox))
  623. return;
  624. err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
  625. if (err)
  626. mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
  627. if (0) {
  628. mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
  629. for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
  630. if (i % 4 == 0)
  631. pr_cont("[%02x] ", i * 4);
  632. pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
  633. if ((i + 1) % 4 == 0)
  634. pr_cont("\n");
  635. }
  636. }
  637. mlx4_mtt_cleanup(dev, &eq->mtt);
  638. for (i = 0; i < npages; ++i)
  639. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  640. eq->page_list[i].buf,
  641. eq->page_list[i].map);
  642. kfree(eq->page_list);
  643. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  644. mlx4_free_cmd_mailbox(dev, mailbox);
  645. }
  646. static void mlx4_free_irqs(struct mlx4_dev *dev)
  647. {
  648. struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
  649. struct mlx4_priv *priv = mlx4_priv(dev);
  650. int i, vec;
  651. if (eq_table->have_irq)
  652. free_irq(dev->pdev->irq, dev);
  653. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  654. if (eq_table->eq[i].have_irq) {
  655. free_irq(eq_table->eq[i].irq, eq_table->eq + i);
  656. eq_table->eq[i].have_irq = 0;
  657. }
  658. for (i = 0; i < dev->caps.comp_pool; i++) {
  659. /*
  660. * Freeing the assigned irq's
  661. * all bits should be 0, but we need to validate
  662. */
  663. if (priv->msix_ctl.pool_bm & 1ULL << i) {
  664. /* NO need protecting*/
  665. vec = dev->caps.num_comp_vectors + 1 + i;
  666. free_irq(priv->eq_table.eq[vec].irq,
  667. &priv->eq_table.eq[vec]);
  668. }
  669. }
  670. kfree(eq_table->irq_names);
  671. }
  672. static int mlx4_map_clr_int(struct mlx4_dev *dev)
  673. {
  674. struct mlx4_priv *priv = mlx4_priv(dev);
  675. priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
  676. priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
  677. if (!priv->clr_base) {
  678. mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
  679. return -ENOMEM;
  680. }
  681. return 0;
  682. }
  683. static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
  684. {
  685. struct mlx4_priv *priv = mlx4_priv(dev);
  686. iounmap(priv->clr_base);
  687. }
  688. int mlx4_alloc_eq_table(struct mlx4_dev *dev)
  689. {
  690. struct mlx4_priv *priv = mlx4_priv(dev);
  691. priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
  692. sizeof *priv->eq_table.eq, GFP_KERNEL);
  693. if (!priv->eq_table.eq)
  694. return -ENOMEM;
  695. return 0;
  696. }
  697. void mlx4_free_eq_table(struct mlx4_dev *dev)
  698. {
  699. kfree(mlx4_priv(dev)->eq_table.eq);
  700. }
  701. int mlx4_init_eq_table(struct mlx4_dev *dev)
  702. {
  703. struct mlx4_priv *priv = mlx4_priv(dev);
  704. int err;
  705. int i;
  706. priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
  707. mlx4_num_eq_uar(dev), GFP_KERNEL);
  708. if (!priv->eq_table.uar_map) {
  709. err = -ENOMEM;
  710. goto err_out_free;
  711. }
  712. err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
  713. dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
  714. if (err)
  715. goto err_out_free;
  716. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  717. priv->eq_table.uar_map[i] = NULL;
  718. if (!mlx4_is_slave(dev)) {
  719. err = mlx4_map_clr_int(dev);
  720. if (err)
  721. goto err_out_bitmap;
  722. priv->eq_table.clr_mask =
  723. swab32(1 << (priv->eq_table.inta_pin & 31));
  724. priv->eq_table.clr_int = priv->clr_base +
  725. (priv->eq_table.inta_pin < 32 ? 4 : 0);
  726. }
  727. priv->eq_table.irq_names =
  728. kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
  729. dev->caps.comp_pool),
  730. GFP_KERNEL);
  731. if (!priv->eq_table.irq_names) {
  732. err = -ENOMEM;
  733. goto err_out_bitmap;
  734. }
  735. for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
  736. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  737. dev->caps.reserved_cqs +
  738. MLX4_NUM_SPARE_EQE,
  739. (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
  740. &priv->eq_table.eq[i]);
  741. if (err) {
  742. --i;
  743. goto err_out_unmap;
  744. }
  745. }
  746. err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
  747. (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
  748. &priv->eq_table.eq[dev->caps.num_comp_vectors]);
  749. if (err)
  750. goto err_out_comp;
  751. /*if additional completion vectors poolsize is 0 this loop will not run*/
  752. for (i = dev->caps.num_comp_vectors + 1;
  753. i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
  754. err = mlx4_create_eq(dev, dev->caps.num_cqs -
  755. dev->caps.reserved_cqs +
  756. MLX4_NUM_SPARE_EQE,
  757. (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
  758. &priv->eq_table.eq[i]);
  759. if (err) {
  760. --i;
  761. goto err_out_unmap;
  762. }
  763. }
  764. if (dev->flags & MLX4_FLAG_MSI_X) {
  765. const char *eq_name;
  766. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
  767. if (i < dev->caps.num_comp_vectors) {
  768. snprintf(priv->eq_table.irq_names +
  769. i * MLX4_IRQNAME_SIZE,
  770. MLX4_IRQNAME_SIZE,
  771. "mlx4-comp-%d@pci:%s", i,
  772. pci_name(dev->pdev));
  773. } else {
  774. snprintf(priv->eq_table.irq_names +
  775. i * MLX4_IRQNAME_SIZE,
  776. MLX4_IRQNAME_SIZE,
  777. "mlx4-async@pci:%s",
  778. pci_name(dev->pdev));
  779. }
  780. eq_name = priv->eq_table.irq_names +
  781. i * MLX4_IRQNAME_SIZE;
  782. err = request_irq(priv->eq_table.eq[i].irq,
  783. mlx4_msi_x_interrupt, 0, eq_name,
  784. priv->eq_table.eq + i);
  785. if (err)
  786. goto err_out_async;
  787. priv->eq_table.eq[i].have_irq = 1;
  788. }
  789. } else {
  790. snprintf(priv->eq_table.irq_names,
  791. MLX4_IRQNAME_SIZE,
  792. DRV_NAME "@pci:%s",
  793. pci_name(dev->pdev));
  794. err = request_irq(dev->pdev->irq, mlx4_interrupt,
  795. IRQF_SHARED, priv->eq_table.irq_names, dev);
  796. if (err)
  797. goto err_out_async;
  798. priv->eq_table.have_irq = 1;
  799. }
  800. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  801. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  802. if (err)
  803. mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
  804. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
  805. for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
  806. eq_set_ci(&priv->eq_table.eq[i], 1);
  807. return 0;
  808. err_out_async:
  809. mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
  810. err_out_comp:
  811. i = dev->caps.num_comp_vectors - 1;
  812. err_out_unmap:
  813. while (i >= 0) {
  814. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  815. --i;
  816. }
  817. if (!mlx4_is_slave(dev))
  818. mlx4_unmap_clr_int(dev);
  819. mlx4_free_irqs(dev);
  820. err_out_bitmap:
  821. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  822. err_out_free:
  823. kfree(priv->eq_table.uar_map);
  824. return err;
  825. }
  826. void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
  827. {
  828. struct mlx4_priv *priv = mlx4_priv(dev);
  829. int i;
  830. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
  831. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  832. mlx4_free_irqs(dev);
  833. for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
  834. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  835. if (!mlx4_is_slave(dev))
  836. mlx4_unmap_clr_int(dev);
  837. for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
  838. if (priv->eq_table.uar_map[i])
  839. iounmap(priv->eq_table.uar_map[i]);
  840. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  841. kfree(priv->eq_table.uar_map);
  842. }
  843. /* A test that verifies that we can accept interrupts on all
  844. * the irq vectors of the device.
  845. * Interrupts are checked using the NOP command.
  846. */
  847. int mlx4_test_interrupts(struct mlx4_dev *dev)
  848. {
  849. struct mlx4_priv *priv = mlx4_priv(dev);
  850. int i;
  851. int err;
  852. err = mlx4_NOP(dev);
  853. /* When not in MSI_X, there is only one irq to check */
  854. if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
  855. return err;
  856. /* A loop over all completion vectors, for each vector we will check
  857. * whether it works by mapping command completions to that vector
  858. * and performing a NOP command
  859. */
  860. for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
  861. /* Temporary use polling for command completions */
  862. mlx4_cmd_use_polling(dev);
  863. /* Map the new eq to handle all asyncronous events */
  864. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  865. priv->eq_table.eq[i].eqn);
  866. if (err) {
  867. mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
  868. mlx4_cmd_use_events(dev);
  869. break;
  870. }
  871. /* Go back to using events */
  872. mlx4_cmd_use_events(dev);
  873. err = mlx4_NOP(dev);
  874. }
  875. /* Return to default */
  876. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  877. priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
  878. return err;
  879. }
  880. EXPORT_SYMBOL(mlx4_test_interrupts);
  881. int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
  882. {
  883. struct mlx4_priv *priv = mlx4_priv(dev);
  884. int vec = 0, err = 0, i;
  885. spin_lock(&priv->msix_ctl.pool_lock);
  886. for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
  887. if (~priv->msix_ctl.pool_bm & 1ULL << i) {
  888. priv->msix_ctl.pool_bm |= 1ULL << i;
  889. vec = dev->caps.num_comp_vectors + 1 + i;
  890. snprintf(priv->eq_table.irq_names +
  891. vec * MLX4_IRQNAME_SIZE,
  892. MLX4_IRQNAME_SIZE, "%s", name);
  893. err = request_irq(priv->eq_table.eq[vec].irq,
  894. mlx4_msi_x_interrupt, 0,
  895. &priv->eq_table.irq_names[vec<<5],
  896. priv->eq_table.eq + vec);
  897. if (err) {
  898. /*zero out bit by fliping it*/
  899. priv->msix_ctl.pool_bm ^= 1 << i;
  900. vec = 0;
  901. continue;
  902. /*we dont want to break here*/
  903. }
  904. eq_set_ci(&priv->eq_table.eq[vec], 1);
  905. }
  906. }
  907. spin_unlock(&priv->msix_ctl.pool_lock);
  908. if (vec) {
  909. *vector = vec;
  910. } else {
  911. *vector = 0;
  912. err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
  913. }
  914. return err;
  915. }
  916. EXPORT_SYMBOL(mlx4_assign_eq);
  917. void mlx4_release_eq(struct mlx4_dev *dev, int vec)
  918. {
  919. struct mlx4_priv *priv = mlx4_priv(dev);
  920. /*bm index*/
  921. int i = vec - dev->caps.num_comp_vectors - 1;
  922. if (likely(i >= 0)) {
  923. /*sanity check , making sure were not trying to free irq's
  924. Belonging to a legacy EQ*/
  925. spin_lock(&priv->msix_ctl.pool_lock);
  926. if (priv->msix_ctl.pool_bm & 1ULL << i) {
  927. free_irq(priv->eq_table.eq[vec].irq,
  928. &priv->eq_table.eq[vec]);
  929. priv->msix_ctl.pool_bm &= ~(1ULL << i);
  930. }
  931. spin_unlock(&priv->msix_ctl.pool_lock);
  932. }
  933. }
  934. EXPORT_SYMBOL(mlx4_release_eq);