eq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660
  1. /*
  2. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/init.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/mm.h>
  36. #include <linux/dma-mapping.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include "mlx4.h"
  39. #include "fw.h"
  40. enum {
  41. MLX4_NUM_ASYNC_EQE = 0x100,
  42. MLX4_NUM_SPARE_EQE = 0x80,
  43. MLX4_EQ_ENTRY_SIZE = 0x20
  44. };
  45. /*
  46. * Must be packed because start is 64 bits but only aligned to 32 bits.
  47. */
  48. struct mlx4_eq_context {
  49. __be32 flags;
  50. u16 reserved1[3];
  51. __be16 page_offset;
  52. u8 log_eq_size;
  53. u8 reserved2[4];
  54. u8 eq_period;
  55. u8 reserved3;
  56. u8 eq_max_count;
  57. u8 reserved4[3];
  58. u8 intr;
  59. u8 log_page_size;
  60. u8 reserved5[2];
  61. u8 mtt_base_addr_h;
  62. __be32 mtt_base_addr_l;
  63. u32 reserved6[2];
  64. __be32 consumer_index;
  65. __be32 producer_index;
  66. u32 reserved7[4];
  67. };
  68. #define MLX4_EQ_STATUS_OK ( 0 << 28)
  69. #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
  70. #define MLX4_EQ_OWNER_SW ( 0 << 24)
  71. #define MLX4_EQ_OWNER_HW ( 1 << 24)
  72. #define MLX4_EQ_FLAG_EC ( 1 << 18)
  73. #define MLX4_EQ_FLAG_OI ( 1 << 17)
  74. #define MLX4_EQ_STATE_ARMED ( 9 << 8)
  75. #define MLX4_EQ_STATE_FIRED (10 << 8)
  76. #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
  77. #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
  78. (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
  79. (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
  80. (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
  81. (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
  82. (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
  83. (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
  84. (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  85. (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
  86. (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
  87. (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
  88. (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
  89. (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
  90. (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
  91. (1ull << MLX4_EVENT_TYPE_CMD))
  92. struct mlx4_eqe {
  93. u8 reserved1;
  94. u8 type;
  95. u8 reserved2;
  96. u8 subtype;
  97. union {
  98. u32 raw[6];
  99. struct {
  100. __be32 cqn;
  101. } __attribute__((packed)) comp;
  102. struct {
  103. u16 reserved1;
  104. __be16 token;
  105. u32 reserved2;
  106. u8 reserved3[3];
  107. u8 status;
  108. __be64 out_param;
  109. } __attribute__((packed)) cmd;
  110. struct {
  111. __be32 qpn;
  112. } __attribute__((packed)) qp;
  113. struct {
  114. __be32 srqn;
  115. } __attribute__((packed)) srq;
  116. struct {
  117. __be32 cqn;
  118. u32 reserved1;
  119. u8 reserved2[3];
  120. u8 syndrome;
  121. } __attribute__((packed)) cq_err;
  122. struct {
  123. u32 reserved1[2];
  124. __be32 port;
  125. } __attribute__((packed)) port_change;
  126. } event;
  127. u8 reserved3[3];
  128. u8 owner;
  129. } __attribute__((packed));
  130. static void eq_set_ci(struct mlx4_eq *eq, int req_not)
  131. {
  132. __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
  133. req_not << 31),
  134. eq->doorbell);
  135. /* We still want ordering, just not swabbing, so add a barrier */
  136. mb();
  137. }
  138. static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
  139. {
  140. unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
  141. return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
  142. }
  143. static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
  144. {
  145. struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
  146. return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
  147. }
  148. static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
  149. {
  150. struct mlx4_eqe *eqe;
  151. int cqn;
  152. int eqes_found = 0;
  153. int set_ci = 0;
  154. while ((eqe = next_eqe_sw(eq))) {
  155. /*
  156. * Make sure we read EQ entry contents after we've
  157. * checked the ownership bit.
  158. */
  159. rmb();
  160. switch (eqe->type) {
  161. case MLX4_EVENT_TYPE_COMP:
  162. cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
  163. mlx4_cq_completion(dev, cqn);
  164. break;
  165. case MLX4_EVENT_TYPE_PATH_MIG:
  166. case MLX4_EVENT_TYPE_COMM_EST:
  167. case MLX4_EVENT_TYPE_SQ_DRAINED:
  168. case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
  169. case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
  170. case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
  171. case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  172. case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
  173. mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  174. eqe->type);
  175. break;
  176. case MLX4_EVENT_TYPE_SRQ_LIMIT:
  177. case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
  178. mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
  179. eqe->type);
  180. break;
  181. case MLX4_EVENT_TYPE_CMD:
  182. mlx4_cmd_event(dev,
  183. be16_to_cpu(eqe->event.cmd.token),
  184. eqe->event.cmd.status,
  185. be64_to_cpu(eqe->event.cmd.out_param));
  186. break;
  187. case MLX4_EVENT_TYPE_PORT_CHANGE:
  188. mlx4_dispatch_event(dev,
  189. eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ?
  190. MLX4_DEV_EVENT_PORT_UP :
  191. MLX4_DEV_EVENT_PORT_DOWN,
  192. be32_to_cpu(eqe->event.port_change.port) >> 28);
  193. break;
  194. case MLX4_EVENT_TYPE_CQ_ERROR:
  195. mlx4_warn(dev, "CQ %s on CQN %06x\n",
  196. eqe->event.cq_err.syndrome == 1 ?
  197. "overrun" : "access violation",
  198. be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
  199. mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
  200. eqe->type);
  201. break;
  202. case MLX4_EVENT_TYPE_EQ_OVERFLOW:
  203. mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
  204. break;
  205. case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
  206. case MLX4_EVENT_TYPE_ECC_DETECT:
  207. default:
  208. mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
  209. eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
  210. break;
  211. };
  212. ++eq->cons_index;
  213. eqes_found = 1;
  214. ++set_ci;
  215. /*
  216. * The HCA will think the queue has overflowed if we
  217. * don't tell it we've been processing events. We
  218. * create our EQs with MLX4_NUM_SPARE_EQE extra
  219. * entries, so we must update our consumer index at
  220. * least that often.
  221. */
  222. if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
  223. /*
  224. * Conditional on hca_type is OK here because
  225. * this is a rare case, not the fast path.
  226. */
  227. eq_set_ci(eq, 0);
  228. set_ci = 0;
  229. }
  230. }
  231. eq_set_ci(eq, 1);
  232. return eqes_found;
  233. }
  234. static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
  235. {
  236. struct mlx4_dev *dev = dev_ptr;
  237. struct mlx4_priv *priv = mlx4_priv(dev);
  238. int work = 0;
  239. int i;
  240. writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
  241. for (i = 0; i < MLX4_NUM_EQ; ++i)
  242. work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
  243. return IRQ_RETVAL(work);
  244. }
  245. static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
  246. {
  247. struct mlx4_eq *eq = eq_ptr;
  248. struct mlx4_dev *dev = eq->dev;
  249. mlx4_eq_int(dev, eq);
  250. /* MSI-X vectors always belong to us */
  251. return IRQ_HANDLED;
  252. }
  253. static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
  254. int eq_num)
  255. {
  256. return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
  257. 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
  258. }
  259. static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  260. int eq_num)
  261. {
  262. return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
  263. MLX4_CMD_TIME_CLASS_A);
  264. }
  265. static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  266. int eq_num)
  267. {
  268. return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
  269. MLX4_CMD_TIME_CLASS_A);
  270. }
  271. static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
  272. {
  273. struct mlx4_priv *priv = mlx4_priv(dev);
  274. int index;
  275. index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
  276. if (!priv->eq_table.uar_map[index]) {
  277. priv->eq_table.uar_map[index] =
  278. ioremap(pci_resource_start(dev->pdev, 2) +
  279. ((eq->eqn / 4) << PAGE_SHIFT),
  280. PAGE_SIZE);
  281. if (!priv->eq_table.uar_map[index]) {
  282. mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
  283. eq->eqn);
  284. return NULL;
  285. }
  286. }
  287. return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
  288. }
  289. static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
  290. u8 intr, struct mlx4_eq *eq)
  291. {
  292. struct mlx4_priv *priv = mlx4_priv(dev);
  293. struct mlx4_cmd_mailbox *mailbox;
  294. struct mlx4_eq_context *eq_context;
  295. int npages;
  296. u64 *dma_list = NULL;
  297. dma_addr_t t;
  298. u64 mtt_addr;
  299. int err = -ENOMEM;
  300. int i;
  301. eq->dev = dev;
  302. eq->nent = roundup_pow_of_two(max(nent, 2));
  303. npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
  304. eq->page_list = kmalloc(npages * sizeof *eq->page_list,
  305. GFP_KERNEL);
  306. if (!eq->page_list)
  307. goto err_out;
  308. for (i = 0; i < npages; ++i)
  309. eq->page_list[i].buf = NULL;
  310. dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
  311. if (!dma_list)
  312. goto err_out_free;
  313. mailbox = mlx4_alloc_cmd_mailbox(dev);
  314. if (IS_ERR(mailbox))
  315. goto err_out_free;
  316. eq_context = mailbox->buf;
  317. for (i = 0; i < npages; ++i) {
  318. eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
  319. PAGE_SIZE, &t, GFP_KERNEL);
  320. if (!eq->page_list[i].buf)
  321. goto err_out_free_pages;
  322. dma_list[i] = t;
  323. eq->page_list[i].map = t;
  324. memset(eq->page_list[i].buf, 0, PAGE_SIZE);
  325. }
  326. eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
  327. if (eq->eqn == -1)
  328. goto err_out_free_pages;
  329. eq->doorbell = mlx4_get_eq_uar(dev, eq);
  330. if (!eq->doorbell) {
  331. err = -ENOMEM;
  332. goto err_out_free_eq;
  333. }
  334. err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
  335. if (err)
  336. goto err_out_free_eq;
  337. err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
  338. if (err)
  339. goto err_out_free_mtt;
  340. memset(eq_context, 0, sizeof *eq_context);
  341. eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
  342. MLX4_EQ_STATE_ARMED);
  343. eq_context->log_eq_size = ilog2(eq->nent);
  344. eq_context->intr = intr;
  345. eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
  346. mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
  347. eq_context->mtt_base_addr_h = mtt_addr >> 32;
  348. eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
  349. err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
  350. if (err) {
  351. mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
  352. goto err_out_free_mtt;
  353. }
  354. kfree(dma_list);
  355. mlx4_free_cmd_mailbox(dev, mailbox);
  356. eq->cons_index = 0;
  357. return err;
  358. err_out_free_mtt:
  359. mlx4_mtt_cleanup(dev, &eq->mtt);
  360. err_out_free_eq:
  361. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  362. err_out_free_pages:
  363. for (i = 0; i < npages; ++i)
  364. if (eq->page_list[i].buf)
  365. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  366. eq->page_list[i].buf,
  367. eq->page_list[i].map);
  368. mlx4_free_cmd_mailbox(dev, mailbox);
  369. err_out_free:
  370. kfree(eq->page_list);
  371. kfree(dma_list);
  372. err_out:
  373. return err;
  374. }
  375. static void mlx4_free_eq(struct mlx4_dev *dev,
  376. struct mlx4_eq *eq)
  377. {
  378. struct mlx4_priv *priv = mlx4_priv(dev);
  379. struct mlx4_cmd_mailbox *mailbox;
  380. int err;
  381. int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
  382. int i;
  383. mailbox = mlx4_alloc_cmd_mailbox(dev);
  384. if (IS_ERR(mailbox))
  385. return;
  386. err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
  387. if (err)
  388. mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
  389. if (0) {
  390. mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
  391. for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
  392. if (i % 4 == 0)
  393. printk("[%02x] ", i * 4);
  394. printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
  395. if ((i + 1) % 4 == 0)
  396. printk("\n");
  397. }
  398. }
  399. mlx4_mtt_cleanup(dev, &eq->mtt);
  400. for (i = 0; i < npages; ++i)
  401. pci_free_consistent(dev->pdev, PAGE_SIZE,
  402. eq->page_list[i].buf,
  403. eq->page_list[i].map);
  404. kfree(eq->page_list);
  405. mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
  406. mlx4_free_cmd_mailbox(dev, mailbox);
  407. }
  408. static void mlx4_free_irqs(struct mlx4_dev *dev)
  409. {
  410. struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
  411. int i;
  412. if (eq_table->have_irq)
  413. free_irq(dev->pdev->irq, dev);
  414. for (i = 0; i < MLX4_NUM_EQ; ++i)
  415. if (eq_table->eq[i].have_irq)
  416. free_irq(eq_table->eq[i].irq, eq_table->eq + i);
  417. }
  418. static int mlx4_map_clr_int(struct mlx4_dev *dev)
  419. {
  420. struct mlx4_priv *priv = mlx4_priv(dev);
  421. priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
  422. priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
  423. if (!priv->clr_base) {
  424. mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
  425. return -ENOMEM;
  426. }
  427. return 0;
  428. }
  429. static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
  430. {
  431. struct mlx4_priv *priv = mlx4_priv(dev);
  432. iounmap(priv->clr_base);
  433. }
  434. int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
  435. {
  436. struct mlx4_priv *priv = mlx4_priv(dev);
  437. int ret;
  438. /*
  439. * We assume that mapping one page is enough for the whole EQ
  440. * context table. This is fine with all current HCAs, because
  441. * we only use 32 EQs and each EQ uses 64 bytes of context
  442. * memory, or 1 KB total.
  443. */
  444. priv->eq_table.icm_virt = icm_virt;
  445. priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
  446. if (!priv->eq_table.icm_page)
  447. return -ENOMEM;
  448. priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
  449. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  450. if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
  451. __free_page(priv->eq_table.icm_page);
  452. return -ENOMEM;
  453. }
  454. ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
  455. if (ret) {
  456. pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
  457. PCI_DMA_BIDIRECTIONAL);
  458. __free_page(priv->eq_table.icm_page);
  459. }
  460. return ret;
  461. }
  462. void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
  463. {
  464. struct mlx4_priv *priv = mlx4_priv(dev);
  465. mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
  466. pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
  467. PCI_DMA_BIDIRECTIONAL);
  468. __free_page(priv->eq_table.icm_page);
  469. }
  470. int mlx4_init_eq_table(struct mlx4_dev *dev)
  471. {
  472. struct mlx4_priv *priv = mlx4_priv(dev);
  473. int err;
  474. int i;
  475. err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
  476. dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
  477. if (err)
  478. return err;
  479. for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
  480. priv->eq_table.uar_map[i] = NULL;
  481. err = mlx4_map_clr_int(dev);
  482. if (err)
  483. goto err_out_free;
  484. priv->eq_table.clr_mask =
  485. swab32(1 << (priv->eq_table.inta_pin & 31));
  486. priv->eq_table.clr_int = priv->clr_base +
  487. (priv->eq_table.inta_pin < 32 ? 4 : 0);
  488. err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
  489. (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
  490. &priv->eq_table.eq[MLX4_EQ_COMP]);
  491. if (err)
  492. goto err_out_unmap;
  493. err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
  494. (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
  495. &priv->eq_table.eq[MLX4_EQ_ASYNC]);
  496. if (err)
  497. goto err_out_comp;
  498. if (dev->flags & MLX4_FLAG_MSI_X) {
  499. static const char *eq_name[] = {
  500. [MLX4_EQ_COMP] = DRV_NAME " (comp)",
  501. [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
  502. };
  503. for (i = 0; i < MLX4_NUM_EQ; ++i) {
  504. err = request_irq(priv->eq_table.eq[i].irq,
  505. mlx4_msi_x_interrupt,
  506. 0, eq_name[i], priv->eq_table.eq + i);
  507. if (err)
  508. goto err_out_async;
  509. priv->eq_table.eq[i].have_irq = 1;
  510. }
  511. } else {
  512. err = request_irq(dev->pdev->irq, mlx4_interrupt,
  513. IRQF_SHARED, DRV_NAME, dev);
  514. if (err)
  515. goto err_out_async;
  516. priv->eq_table.have_irq = 1;
  517. }
  518. err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
  519. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
  520. if (err)
  521. mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
  522. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
  523. for (i = 0; i < MLX4_NUM_EQ; ++i)
  524. eq_set_ci(&priv->eq_table.eq[i], 1);
  525. return 0;
  526. err_out_async:
  527. mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
  528. err_out_comp:
  529. mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
  530. err_out_unmap:
  531. mlx4_unmap_clr_int(dev);
  532. mlx4_free_irqs(dev);
  533. err_out_free:
  534. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  535. return err;
  536. }
  537. void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
  538. {
  539. struct mlx4_priv *priv = mlx4_priv(dev);
  540. int i;
  541. mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
  542. priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
  543. mlx4_free_irqs(dev);
  544. for (i = 0; i < MLX4_NUM_EQ; ++i)
  545. mlx4_free_eq(dev, &priv->eq_table.eq[i]);
  546. mlx4_unmap_clr_int(dev);
  547. for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
  548. if (priv->eq_table.uar_map[i])
  549. iounmap(priv->eq_table.uar_map[i]);
  550. mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
  551. }