mthca_eq.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. * $Id: mthca_eq.c 1382 2004-12-24 02:21:02Z roland $
  34. */
  35. #include <linux/errno.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/pci.h>
  38. #include "mthca_dev.h"
  39. #include "mthca_cmd.h"
  40. #include "mthca_config_reg.h"
  41. enum {
  42. MTHCA_NUM_ASYNC_EQE = 0x80,
  43. MTHCA_NUM_CMD_EQE = 0x80,
  44. MTHCA_NUM_SPARE_EQE = 0x80,
  45. MTHCA_EQ_ENTRY_SIZE = 0x20
  46. };
  47. /*
  48. * Must be packed because start is 64 bits but only aligned to 32 bits.
  49. */
  50. struct mthca_eq_context {
  51. __be32 flags;
  52. __be64 start;
  53. __be32 logsize_usrpage;
  54. __be32 tavor_pd; /* reserved for Arbel */
  55. u8 reserved1[3];
  56. u8 intr;
  57. __be32 arbel_pd; /* lost_count for Tavor */
  58. __be32 lkey;
  59. u32 reserved2[2];
  60. __be32 consumer_index;
  61. __be32 producer_index;
  62. u32 reserved3[4];
  63. } __attribute__((packed));
  64. #define MTHCA_EQ_STATUS_OK ( 0 << 28)
  65. #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
  66. #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
  67. #define MTHCA_EQ_OWNER_SW ( 0 << 24)
  68. #define MTHCA_EQ_OWNER_HW ( 1 << 24)
  69. #define MTHCA_EQ_FLAG_TR ( 1 << 18)
  70. #define MTHCA_EQ_FLAG_OI ( 1 << 17)
  71. #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
  72. #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
  73. #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
  74. #define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
  75. enum {
  76. MTHCA_EVENT_TYPE_COMP = 0x00,
  77. MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
  78. MTHCA_EVENT_TYPE_COMM_EST = 0x02,
  79. MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
  80. MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
  81. MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
  82. MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
  83. MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
  84. MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
  85. MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
  86. MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
  87. MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
  88. MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
  89. MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08,
  90. MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09,
  91. MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f,
  92. MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e,
  93. MTHCA_EVENT_TYPE_CMD = 0x0a
  94. };
  95. #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
  96. (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
  97. (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
  98. (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
  99. (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
  100. (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
  101. (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
  102. (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
  103. (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
  104. (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
  105. (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
  106. (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
  107. #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
  108. (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
  109. (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
  110. #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
  111. #define MTHCA_EQ_DB_INC_CI (1 << 24)
  112. #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
  113. #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
  114. #define MTHCA_EQ_DB_SET_CI (4 << 24)
  115. #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
  116. struct mthca_eqe {
  117. u8 reserved1;
  118. u8 type;
  119. u8 reserved2;
  120. u8 subtype;
  121. union {
  122. u32 raw[6];
  123. struct {
  124. __be32 cqn;
  125. } __attribute__((packed)) comp;
  126. struct {
  127. u16 reserved1;
  128. __be16 token;
  129. u32 reserved2;
  130. u8 reserved3[3];
  131. u8 status;
  132. __be64 out_param;
  133. } __attribute__((packed)) cmd;
  134. struct {
  135. __be32 qpn;
  136. } __attribute__((packed)) qp;
  137. struct {
  138. __be32 srqn;
  139. } __attribute__((packed)) srq;
  140. struct {
  141. __be32 cqn;
  142. u32 reserved1;
  143. u8 reserved2[3];
  144. u8 syndrome;
  145. } __attribute__((packed)) cq_err;
  146. struct {
  147. u32 reserved1[2];
  148. __be32 port;
  149. } __attribute__((packed)) port_change;
  150. } event;
  151. u8 reserved3[3];
  152. u8 owner;
  153. } __attribute__((packed));
  154. #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
  155. #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
  156. static inline u64 async_mask(struct mthca_dev *dev)
  157. {
  158. return dev->mthca_flags & MTHCA_FLAG_SRQ ?
  159. MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK :
  160. MTHCA_ASYNC_EVENT_MASK;
  161. }
  162. static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
  163. {
  164. /*
  165. * This barrier makes sure that all updates to ownership bits
  166. * done by set_eqe_hw() hit memory before the consumer index
  167. * is updated. set_eq_ci() allows the HCA to possibly write
  168. * more EQ entries, and we want to avoid the exceedingly
  169. * unlikely possibility of the HCA writing an entry and then
  170. * having set_eqe_hw() overwrite the owner field.
  171. */
  172. wmb();
  173. mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
  174. dev->kar + MTHCA_EQ_DOORBELL,
  175. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  176. }
  177. static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
  178. {
  179. /* See comment in tavor_set_eq_ci() above. */
  180. wmb();
  181. __raw_writel((__force u32) cpu_to_be32(ci),
  182. dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8);
  183. /* We still want ordering, just not swabbing, so add a barrier */
  184. mb();
  185. }
  186. static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
  187. {
  188. if (mthca_is_memfree(dev))
  189. arbel_set_eq_ci(dev, eq, ci);
  190. else
  191. tavor_set_eq_ci(dev, eq, ci);
  192. }
  193. static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn)
  194. {
  195. mthca_write64(MTHCA_EQ_DB_REQ_NOT | eqn, 0,
  196. dev->kar + MTHCA_EQ_DOORBELL,
  197. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  198. }
  199. static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask)
  200. {
  201. writel(eqn_mask, dev->eq_regs.arbel.eq_arm);
  202. }
  203. static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
  204. {
  205. if (!mthca_is_memfree(dev)) {
  206. mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn,
  207. dev->kar + MTHCA_EQ_DOORBELL,
  208. MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
  209. }
  210. }
  211. static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry)
  212. {
  213. unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
  214. return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
  215. }
  216. static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq)
  217. {
  218. struct mthca_eqe* eqe;
  219. eqe = get_eqe(eq, eq->cons_index);
  220. return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe;
  221. }
  222. static inline void set_eqe_hw(struct mthca_eqe *eqe)
  223. {
  224. eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW;
  225. }
  226. static void port_change(struct mthca_dev *dev, int port, int active)
  227. {
  228. struct ib_event record;
  229. mthca_dbg(dev, "Port change to %s for port %d\n",
  230. active ? "active" : "down", port);
  231. record.device = &dev->ib_dev;
  232. record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
  233. record.element.port_num = port;
  234. ib_dispatch_event(&record);
  235. }
  236. static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
  237. {
  238. struct mthca_eqe *eqe;
  239. int disarm_cqn;
  240. int eqes_found = 0;
  241. int set_ci = 0;
  242. while ((eqe = next_eqe_sw(eq))) {
  243. /*
  244. * Make sure we read EQ entry contents after we've
  245. * checked the ownership bit.
  246. */
  247. rmb();
  248. switch (eqe->type) {
  249. case MTHCA_EVENT_TYPE_COMP:
  250. disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
  251. disarm_cq(dev, eq->eqn, disarm_cqn);
  252. mthca_cq_completion(dev, disarm_cqn);
  253. break;
  254. case MTHCA_EVENT_TYPE_PATH_MIG:
  255. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  256. IB_EVENT_PATH_MIG);
  257. break;
  258. case MTHCA_EVENT_TYPE_COMM_EST:
  259. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  260. IB_EVENT_COMM_EST);
  261. break;
  262. case MTHCA_EVENT_TYPE_SQ_DRAINED:
  263. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  264. IB_EVENT_SQ_DRAINED);
  265. break;
  266. case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
  267. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  268. IB_EVENT_QP_LAST_WQE_REACHED);
  269. break;
  270. case MTHCA_EVENT_TYPE_SRQ_LIMIT:
  271. mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
  272. IB_EVENT_SRQ_LIMIT_REACHED);
  273. break;
  274. case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
  275. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  276. IB_EVENT_QP_FATAL);
  277. break;
  278. case MTHCA_EVENT_TYPE_PATH_MIG_FAILED:
  279. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  280. IB_EVENT_PATH_MIG_ERR);
  281. break;
  282. case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
  283. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  284. IB_EVENT_QP_REQ_ERR);
  285. break;
  286. case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR:
  287. mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
  288. IB_EVENT_QP_ACCESS_ERR);
  289. break;
  290. case MTHCA_EVENT_TYPE_CMD:
  291. mthca_cmd_event(dev,
  292. be16_to_cpu(eqe->event.cmd.token),
  293. eqe->event.cmd.status,
  294. be64_to_cpu(eqe->event.cmd.out_param));
  295. break;
  296. case MTHCA_EVENT_TYPE_PORT_CHANGE:
  297. port_change(dev,
  298. (be32_to_cpu(eqe->event.port_change.port) >> 28) & 3,
  299. eqe->subtype == 0x4);
  300. break;
  301. case MTHCA_EVENT_TYPE_CQ_ERROR:
  302. mthca_warn(dev, "CQ %s on CQN %06x\n",
  303. eqe->event.cq_err.syndrome == 1 ?
  304. "overrun" : "access violation",
  305. be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
  306. mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
  307. IB_EVENT_CQ_ERR);
  308. break;
  309. case MTHCA_EVENT_TYPE_EQ_OVERFLOW:
  310. mthca_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
  311. break;
  312. case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR:
  313. case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR:
  314. case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR:
  315. case MTHCA_EVENT_TYPE_ECC_DETECT:
  316. default:
  317. mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
  318. eqe->type, eqe->subtype, eq->eqn);
  319. break;
  320. };
  321. set_eqe_hw(eqe);
  322. ++eq->cons_index;
  323. eqes_found = 1;
  324. ++set_ci;
  325. /*
  326. * The HCA will think the queue has overflowed if we
  327. * don't tell it we've been processing events. We
  328. * create our EQs with MTHCA_NUM_SPARE_EQE extra
  329. * entries, so we must update our consumer index at
  330. * least that often.
  331. */
  332. if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
  333. /*
  334. * Conditional on hca_type is OK here because
  335. * this is a rare case, not the fast path.
  336. */
  337. set_eq_ci(dev, eq, eq->cons_index);
  338. set_ci = 0;
  339. }
  340. }
  341. /*
  342. * Rely on caller to set consumer index so that we don't have
  343. * to test hca_type in our interrupt handling fast path.
  344. */
  345. return eqes_found;
  346. }
  347. static irqreturn_t mthca_tavor_interrupt(int irq, void *dev_ptr)
  348. {
  349. struct mthca_dev *dev = dev_ptr;
  350. u32 ecr;
  351. int i;
  352. if (dev->eq_table.clr_mask)
  353. writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
  354. ecr = readl(dev->eq_regs.tavor.ecr_base + 4);
  355. if (!ecr)
  356. return IRQ_NONE;
  357. writel(ecr, dev->eq_regs.tavor.ecr_base +
  358. MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4);
  359. for (i = 0; i < MTHCA_NUM_EQ; ++i)
  360. if (ecr & dev->eq_table.eq[i].eqn_mask) {
  361. if (mthca_eq_int(dev, &dev->eq_table.eq[i]))
  362. tavor_set_eq_ci(dev, &dev->eq_table.eq[i],
  363. dev->eq_table.eq[i].cons_index);
  364. tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
  365. }
  366. return IRQ_HANDLED;
  367. }
  368. static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr)
  369. {
  370. struct mthca_eq *eq = eq_ptr;
  371. struct mthca_dev *dev = eq->dev;
  372. mthca_eq_int(dev, eq);
  373. tavor_set_eq_ci(dev, eq, eq->cons_index);
  374. tavor_eq_req_not(dev, eq->eqn);
  375. /* MSI-X vectors always belong to us */
  376. return IRQ_HANDLED;
  377. }
  378. static irqreturn_t mthca_arbel_interrupt(int irq, void *dev_ptr)
  379. {
  380. struct mthca_dev *dev = dev_ptr;
  381. int work = 0;
  382. int i;
  383. if (dev->eq_table.clr_mask)
  384. writel(dev->eq_table.clr_mask, dev->eq_table.clr_int);
  385. for (i = 0; i < MTHCA_NUM_EQ; ++i)
  386. if (mthca_eq_int(dev, &dev->eq_table.eq[i])) {
  387. work = 1;
  388. arbel_set_eq_ci(dev, &dev->eq_table.eq[i],
  389. dev->eq_table.eq[i].cons_index);
  390. }
  391. arbel_eq_req_not(dev, dev->eq_table.arm_mask);
  392. return IRQ_RETVAL(work);
  393. }
  394. static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr)
  395. {
  396. struct mthca_eq *eq = eq_ptr;
  397. struct mthca_dev *dev = eq->dev;
  398. mthca_eq_int(dev, eq);
  399. arbel_set_eq_ci(dev, eq, eq->cons_index);
  400. arbel_eq_req_not(dev, eq->eqn_mask);
  401. /* MSI-X vectors always belong to us */
  402. return IRQ_HANDLED;
  403. }
  404. static int mthca_create_eq(struct mthca_dev *dev,
  405. int nent,
  406. u8 intr,
  407. struct mthca_eq *eq)
  408. {
  409. int npages;
  410. u64 *dma_list = NULL;
  411. dma_addr_t t;
  412. struct mthca_mailbox *mailbox;
  413. struct mthca_eq_context *eq_context;
  414. int err = -ENOMEM;
  415. int i;
  416. u8 status;
  417. eq->dev = dev;
  418. eq->nent = roundup_pow_of_two(max(nent, 2));
  419. npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
  420. eq->page_list = kmalloc(npages * sizeof *eq->page_list,
  421. GFP_KERNEL);
  422. if (!eq->page_list)
  423. goto err_out;
  424. for (i = 0; i < npages; ++i)
  425. eq->page_list[i].buf = NULL;
  426. dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
  427. if (!dma_list)
  428. goto err_out_free;
  429. mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  430. if (IS_ERR(mailbox))
  431. goto err_out_free;
  432. eq_context = mailbox->buf;
  433. for (i = 0; i < npages; ++i) {
  434. eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
  435. PAGE_SIZE, &t, GFP_KERNEL);
  436. if (!eq->page_list[i].buf)
  437. goto err_out_free_pages;
  438. dma_list[i] = t;
  439. pci_unmap_addr_set(&eq->page_list[i], mapping, t);
  440. clear_page(eq->page_list[i].buf);
  441. }
  442. for (i = 0; i < eq->nent; ++i)
  443. set_eqe_hw(get_eqe(eq, i));
  444. eq->eqn = mthca_alloc(&dev->eq_table.alloc);
  445. if (eq->eqn == -1)
  446. goto err_out_free_pages;
  447. err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
  448. dma_list, PAGE_SHIFT, npages,
  449. 0, npages * PAGE_SIZE,
  450. MTHCA_MPT_FLAG_LOCAL_WRITE |
  451. MTHCA_MPT_FLAG_LOCAL_READ,
  452. &eq->mr);
  453. if (err)
  454. goto err_out_free_eq;
  455. memset(eq_context, 0, sizeof *eq_context);
  456. eq_context->flags = cpu_to_be32(MTHCA_EQ_STATUS_OK |
  457. MTHCA_EQ_OWNER_HW |
  458. MTHCA_EQ_STATE_ARMED |
  459. MTHCA_EQ_FLAG_TR);
  460. if (mthca_is_memfree(dev))
  461. eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
  462. eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
  463. if (mthca_is_memfree(dev)) {
  464. eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
  465. } else {
  466. eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
  467. eq_context->tavor_pd = cpu_to_be32(dev->driver_pd.pd_num);
  468. }
  469. eq_context->intr = intr;
  470. eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
  471. err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
  472. if (err) {
  473. mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
  474. goto err_out_free_mr;
  475. }
  476. if (status) {
  477. mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
  478. status);
  479. err = -EINVAL;
  480. goto err_out_free_mr;
  481. }
  482. kfree(dma_list);
  483. mthca_free_mailbox(dev, mailbox);
  484. eq->eqn_mask = swab32(1 << eq->eqn);
  485. eq->cons_index = 0;
  486. dev->eq_table.arm_mask |= eq->eqn_mask;
  487. mthca_dbg(dev, "Allocated EQ %d with %d entries\n",
  488. eq->eqn, eq->nent);
  489. return err;
  490. err_out_free_mr:
  491. mthca_free_mr(dev, &eq->mr);
  492. err_out_free_eq:
  493. mthca_free(&dev->eq_table.alloc, eq->eqn);
  494. err_out_free_pages:
  495. for (i = 0; i < npages; ++i)
  496. if (eq->page_list[i].buf)
  497. dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
  498. eq->page_list[i].buf,
  499. pci_unmap_addr(&eq->page_list[i],
  500. mapping));
  501. mthca_free_mailbox(dev, mailbox);
  502. err_out_free:
  503. kfree(eq->page_list);
  504. kfree(dma_list);
  505. err_out:
  506. return err;
  507. }
  508. static void mthca_free_eq(struct mthca_dev *dev,
  509. struct mthca_eq *eq)
  510. {
  511. struct mthca_mailbox *mailbox;
  512. int err;
  513. u8 status;
  514. int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
  515. PAGE_SIZE;
  516. int i;
  517. mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
  518. if (IS_ERR(mailbox))
  519. return;
  520. err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
  521. if (err)
  522. mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
  523. if (status)
  524. mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
  525. dev->eq_table.arm_mask &= ~eq->eqn_mask;
  526. if (0) {
  527. mthca_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
  528. for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) {
  529. if (i % 4 == 0)
  530. printk("[%02x] ", i * 4);
  531. printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
  532. if ((i + 1) % 4 == 0)
  533. printk("\n");
  534. }
  535. }
  536. mthca_free_mr(dev, &eq->mr);
  537. for (i = 0; i < npages; ++i)
  538. pci_free_consistent(dev->pdev, PAGE_SIZE,
  539. eq->page_list[i].buf,
  540. pci_unmap_addr(&eq->page_list[i], mapping));
  541. kfree(eq->page_list);
  542. mthca_free_mailbox(dev, mailbox);
  543. }
  544. static void mthca_free_irqs(struct mthca_dev *dev)
  545. {
  546. int i;
  547. if (dev->eq_table.have_irq)
  548. free_irq(dev->pdev->irq, dev);
  549. for (i = 0; i < MTHCA_NUM_EQ; ++i)
  550. if (dev->eq_table.eq[i].have_irq)
  551. free_irq(dev->eq_table.eq[i].msi_x_vector,
  552. dev->eq_table.eq + i);
  553. }
  554. static int mthca_map_reg(struct mthca_dev *dev,
  555. unsigned long offset, unsigned long size,
  556. void __iomem **map)
  557. {
  558. unsigned long base = pci_resource_start(dev->pdev, 0);
  559. if (!request_mem_region(base + offset, size, DRV_NAME))
  560. return -EBUSY;
  561. *map = ioremap(base + offset, size);
  562. if (!*map) {
  563. release_mem_region(base + offset, size);
  564. return -ENOMEM;
  565. }
  566. return 0;
  567. }
  568. static void mthca_unmap_reg(struct mthca_dev *dev, unsigned long offset,
  569. unsigned long size, void __iomem *map)
  570. {
  571. unsigned long base = pci_resource_start(dev->pdev, 0);
  572. release_mem_region(base + offset, size);
  573. iounmap(map);
  574. }
  575. static int mthca_map_eq_regs(struct mthca_dev *dev)
  576. {
  577. if (mthca_is_memfree(dev)) {
  578. /*
  579. * We assume that the EQ arm and EQ set CI registers
  580. * fall within the first BAR. We can't trust the
  581. * values firmware gives us, since those addresses are
  582. * valid on the HCA's side of the PCI bus but not
  583. * necessarily the host side.
  584. */
  585. if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
  586. dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
  587. &dev->clr_base)) {
  588. mthca_err(dev, "Couldn't map interrupt clear register, "
  589. "aborting.\n");
  590. return -ENOMEM;
  591. }
  592. /*
  593. * Add 4 because we limit ourselves to EQs 0 ... 31,
  594. * so we only need the low word of the register.
  595. */
  596. if (mthca_map_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
  597. dev->fw.arbel.eq_arm_base) + 4, 4,
  598. &dev->eq_regs.arbel.eq_arm)) {
  599. mthca_err(dev, "Couldn't map EQ arm register, aborting.\n");
  600. mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
  601. dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
  602. dev->clr_base);
  603. return -ENOMEM;
  604. }
  605. if (mthca_map_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
  606. dev->fw.arbel.eq_set_ci_base,
  607. MTHCA_EQ_SET_CI_SIZE,
  608. &dev->eq_regs.arbel.eq_set_ci_base)) {
  609. mthca_err(dev, "Couldn't map EQ CI register, aborting.\n");
  610. mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
  611. dev->fw.arbel.eq_arm_base) + 4, 4,
  612. dev->eq_regs.arbel.eq_arm);
  613. mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
  614. dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
  615. dev->clr_base);
  616. return -ENOMEM;
  617. }
  618. } else {
  619. if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
  620. &dev->clr_base)) {
  621. mthca_err(dev, "Couldn't map interrupt clear register, "
  622. "aborting.\n");
  623. return -ENOMEM;
  624. }
  625. if (mthca_map_reg(dev, MTHCA_ECR_BASE,
  626. MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
  627. &dev->eq_regs.tavor.ecr_base)) {
  628. mthca_err(dev, "Couldn't map ecr register, "
  629. "aborting.\n");
  630. mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
  631. dev->clr_base);
  632. return -ENOMEM;
  633. }
  634. }
  635. return 0;
  636. }
  637. static void mthca_unmap_eq_regs(struct mthca_dev *dev)
  638. {
  639. if (mthca_is_memfree(dev)) {
  640. mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
  641. dev->fw.arbel.eq_set_ci_base,
  642. MTHCA_EQ_SET_CI_SIZE,
  643. dev->eq_regs.arbel.eq_set_ci_base);
  644. mthca_unmap_reg(dev, ((pci_resource_len(dev->pdev, 0) - 1) &
  645. dev->fw.arbel.eq_arm_base) + 4, 4,
  646. dev->eq_regs.arbel.eq_arm);
  647. mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
  648. dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE,
  649. dev->clr_base);
  650. } else {
  651. mthca_unmap_reg(dev, MTHCA_ECR_BASE,
  652. MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE,
  653. dev->eq_regs.tavor.ecr_base);
  654. mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE,
  655. dev->clr_base);
  656. }
  657. }
  658. int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
  659. {
  660. int ret;
  661. u8 status;
  662. /*
  663. * We assume that mapping one page is enough for the whole EQ
  664. * context table. This is fine with all current HCAs, because
  665. * we only use 32 EQs and each EQ uses 32 bytes of context
  666. * memory, or 1 KB total.
  667. */
  668. dev->eq_table.icm_virt = icm_virt;
  669. dev->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
  670. if (!dev->eq_table.icm_page)
  671. return -ENOMEM;
  672. dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
  673. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  674. if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
  675. __free_page(dev->eq_table.icm_page);
  676. return -ENOMEM;
  677. }
  678. ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
  679. if (!ret && status)
  680. ret = -EINVAL;
  681. if (ret) {
  682. pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
  683. PCI_DMA_BIDIRECTIONAL);
  684. __free_page(dev->eq_table.icm_page);
  685. }
  686. return ret;
  687. }
  688. void mthca_unmap_eq_icm(struct mthca_dev *dev)
  689. {
  690. u8 status;
  691. mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
  692. pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
  693. PCI_DMA_BIDIRECTIONAL);
  694. __free_page(dev->eq_table.icm_page);
  695. }
  696. int mthca_init_eq_table(struct mthca_dev *dev)
  697. {
  698. int err;
  699. u8 status;
  700. u8 intr;
  701. int i;
  702. err = mthca_alloc_init(&dev->eq_table.alloc,
  703. dev->limits.num_eqs,
  704. dev->limits.num_eqs - 1,
  705. dev->limits.reserved_eqs);
  706. if (err)
  707. return err;
  708. err = mthca_map_eq_regs(dev);
  709. if (err)
  710. goto err_out_free;
  711. if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
  712. dev->eq_table.clr_mask = 0;
  713. } else {
  714. dev->eq_table.clr_mask =
  715. swab32(1 << (dev->eq_table.inta_pin & 31));
  716. dev->eq_table.clr_int = dev->clr_base +
  717. (dev->eq_table.inta_pin < 32 ? 4 : 0);
  718. }
  719. dev->eq_table.arm_mask = 0;
  720. intr = dev->eq_table.inta_pin;
  721. err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
  722. (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
  723. &dev->eq_table.eq[MTHCA_EQ_COMP]);
  724. if (err)
  725. goto err_out_unmap;
  726. err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
  727. (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
  728. &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
  729. if (err)
  730. goto err_out_comp;
  731. err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
  732. (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
  733. &dev->eq_table.eq[MTHCA_EQ_CMD]);
  734. if (err)
  735. goto err_out_async;
  736. if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
  737. static const char *eq_name[] = {
  738. [MTHCA_EQ_COMP] = DRV_NAME " (comp)",
  739. [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
  740. [MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
  741. };
  742. for (i = 0; i < MTHCA_NUM_EQ; ++i) {
  743. err = request_irq(dev->eq_table.eq[i].msi_x_vector,
  744. mthca_is_memfree(dev) ?
  745. mthca_arbel_msi_x_interrupt :
  746. mthca_tavor_msi_x_interrupt,
  747. 0, eq_name[i], dev->eq_table.eq + i);
  748. if (err)
  749. goto err_out_cmd;
  750. dev->eq_table.eq[i].have_irq = 1;
  751. }
  752. } else {
  753. err = request_irq(dev->pdev->irq,
  754. mthca_is_memfree(dev) ?
  755. mthca_arbel_interrupt :
  756. mthca_tavor_interrupt,
  757. IRQF_SHARED, DRV_NAME, dev);
  758. if (err)
  759. goto err_out_cmd;
  760. dev->eq_table.have_irq = 1;
  761. }
  762. err = mthca_MAP_EQ(dev, async_mask(dev),
  763. 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
  764. if (err)
  765. mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
  766. dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
  767. if (status)
  768. mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
  769. dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
  770. err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
  771. 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
  772. if (err)
  773. mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
  774. dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
  775. if (status)
  776. mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
  777. dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
  778. for (i = 0; i < MTHCA_NUM_EQ; ++i)
  779. if (mthca_is_memfree(dev))
  780. arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
  781. else
  782. tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
  783. return 0;
  784. err_out_cmd:
  785. mthca_free_irqs(dev);
  786. mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]);
  787. err_out_async:
  788. mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
  789. err_out_comp:
  790. mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]);
  791. err_out_unmap:
  792. mthca_unmap_eq_regs(dev);
  793. err_out_free:
  794. mthca_alloc_cleanup(&dev->eq_table.alloc);
  795. return err;
  796. }
  797. void mthca_cleanup_eq_table(struct mthca_dev *dev)
  798. {
  799. u8 status;
  800. int i;
  801. mthca_free_irqs(dev);
  802. mthca_MAP_EQ(dev, async_mask(dev),
  803. 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
  804. mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
  805. 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
  806. for (i = 0; i < MTHCA_NUM_EQ; ++i)
  807. mthca_free_eq(dev, &dev->eq_table.eq[i]);
  808. mthca_unmap_eq_regs(dev);
  809. mthca_alloc_cleanup(&dev->eq_table.alloc);
  810. }