t4.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. * - Redistributions in binary form must reproduce the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer in the documentation and/or other materials
  20. * provided with the distribution.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29. * SOFTWARE.
  30. */
  31. #ifndef __T4_H__
  32. #define __T4_H__
  33. #include "t4_hw.h"
  34. #include "t4_regs.h"
  35. #include "t4_msg.h"
  36. #include "t4fw_ri_api.h"
  37. #define T4_QID_BASE 1024
  38. #define T4_MAX_QIDS 256
  39. #define T4_MAX_NUM_QP (1<<16)
  40. #define T4_MAX_NUM_CQ (1<<15)
  41. #define T4_MAX_NUM_PD (1<<15)
  42. #define T4_MAX_PBL_SIZE 256
  43. #define T4_MAX_RQ_SIZE 1024
  44. #define T4_MAX_SQ_SIZE 1024
  45. #define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE-1)
  46. #define T4_MAX_CQ_DEPTH 8192
  47. #define T4_MAX_NUM_STAG (1<<15)
  48. #define T4_MAX_MR_SIZE (~0ULL - 1)
  49. #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
  50. #define T4_STAG_UNSET 0xffffffff
  51. #define T4_FW_MAJ 0
  52. #define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
  53. struct t4_status_page {
  54. __be32 rsvd1; /* flit 0 - hw owns */
  55. __be16 rsvd2;
  56. __be16 qid;
  57. __be16 cidx;
  58. __be16 pidx;
  59. u8 qp_err; /* flit 1 - sw owns */
  60. u8 db_off;
  61. };
  62. #define T4_EQ_SIZE 64
  63. #define T4_SQ_NUM_SLOTS 4
  64. #define T4_SQ_NUM_BYTES (T4_EQ_SIZE * T4_SQ_NUM_SLOTS)
  65. #define T4_MAX_SEND_SGE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
  66. sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
  67. #define T4_MAX_SEND_INLINE ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_send_wr) - \
  68. sizeof(struct fw_ri_immd)))
  69. #define T4_MAX_WRITE_INLINE ((T4_SQ_NUM_BYTES - \
  70. sizeof(struct fw_ri_rdma_write_wr) - \
  71. sizeof(struct fw_ri_immd)))
  72. #define T4_MAX_WRITE_SGE ((T4_SQ_NUM_BYTES - \
  73. sizeof(struct fw_ri_rdma_write_wr) - \
  74. sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
  75. #define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
  76. sizeof(struct fw_ri_immd)))
  77. #define T4_MAX_FR_DEPTH 255
  78. #define T4_RQ_NUM_SLOTS 2
  79. #define T4_RQ_NUM_BYTES (T4_EQ_SIZE * T4_RQ_NUM_SLOTS)
  80. #define T4_MAX_RECV_SGE ((T4_RQ_NUM_BYTES - sizeof(struct fw_ri_recv_wr) - \
  81. sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
  82. union t4_wr {
  83. struct fw_ri_res_wr res;
  84. struct fw_ri_wr ri;
  85. struct fw_ri_rdma_write_wr write;
  86. struct fw_ri_send_wr send;
  87. struct fw_ri_rdma_read_wr read;
  88. struct fw_ri_bind_mw_wr bind;
  89. struct fw_ri_fr_nsmr_wr fr;
  90. struct fw_ri_inv_lstag_wr inv;
  91. struct t4_status_page status;
  92. __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_SQ_NUM_SLOTS];
  93. };
  94. union t4_recv_wr {
  95. struct fw_ri_recv_wr recv;
  96. struct t4_status_page status;
  97. __be64 flits[T4_EQ_SIZE / sizeof(__be64) * T4_RQ_NUM_SLOTS];
  98. };
  99. static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid,
  100. enum fw_wr_opcodes opcode, u8 flags, u8 len16)
  101. {
  102. int slots_used;
  103. wqe->send.opcode = (u8)opcode;
  104. wqe->send.flags = flags;
  105. wqe->send.wrid = wrid;
  106. wqe->send.r1[0] = 0;
  107. wqe->send.r1[1] = 0;
  108. wqe->send.r1[2] = 0;
  109. wqe->send.len16 = len16;
  110. slots_used = DIV_ROUND_UP(len16*16, T4_EQ_SIZE);
  111. while (slots_used < T4_SQ_NUM_SLOTS) {
  112. wqe->flits[slots_used * T4_EQ_SIZE / sizeof(__be64)] = 0;
  113. slots_used++;
  114. }
  115. }
  116. /* CQE/AE status codes */
  117. #define T4_ERR_SUCCESS 0x0
  118. #define T4_ERR_STAG 0x1 /* STAG invalid: either the */
  119. /* STAG is offlimt, being 0, */
  120. /* or STAG_key mismatch */
  121. #define T4_ERR_PDID 0x2 /* PDID mismatch */
  122. #define T4_ERR_QPID 0x3 /* QPID mismatch */
  123. #define T4_ERR_ACCESS 0x4 /* Invalid access right */
  124. #define T4_ERR_WRAP 0x5 /* Wrap error */
  125. #define T4_ERR_BOUND 0x6 /* base and bounds voilation */
  126. #define T4_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
  127. /* shared memory region */
  128. #define T4_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
  129. /* shared memory region */
  130. #define T4_ERR_ECC 0x9 /* ECC error detected */
  131. #define T4_ERR_ECC_PSTAG 0xA /* ECC error detected when */
  132. /* reading PSTAG for a MW */
  133. /* Invalidate */
  134. #define T4_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
  135. /* software error */
  136. #define T4_ERR_SWFLUSH 0xC /* SW FLUSHED */
  137. #define T4_ERR_CRC 0x10 /* CRC error */
  138. #define T4_ERR_MARKER 0x11 /* Marker error */
  139. #define T4_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
  140. #define T4_ERR_OUT_OF_RQE 0x13 /* out of RQE */
  141. #define T4_ERR_DDP_VERSION 0x14 /* wrong DDP version */
  142. #define T4_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
  143. #define T4_ERR_OPCODE 0x16 /* invalid rdma opcode */
  144. #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
  145. #define T4_ERR_MSN 0x18 /* MSN error */
  146. #define T4_ERR_TBIT 0x19 /* tag bit not set correctly */
  147. #define T4_ERR_MO 0x1A /* MO not 0 for TERMINATE */
  148. /* or READ_REQ */
  149. #define T4_ERR_MSN_GAP 0x1B
  150. #define T4_ERR_MSN_RANGE 0x1C
  151. #define T4_ERR_IRD_OVERFLOW 0x1D
  152. #define T4_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
  153. /* software error */
  154. #define T4_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
  155. /* mismatch) */
  156. /*
  157. * CQE defs
  158. */
  159. struct t4_cqe {
  160. __be32 header;
  161. __be32 len;
  162. union {
  163. struct {
  164. __be32 stag;
  165. __be32 msn;
  166. } rcqe;
  167. struct {
  168. u32 nada1;
  169. u16 nada2;
  170. u16 cidx;
  171. } scqe;
  172. struct {
  173. __be32 wrid_hi;
  174. __be32 wrid_low;
  175. } gen;
  176. } u;
  177. __be64 reserved;
  178. __be64 bits_type_ts;
  179. };
  180. /* macros for flit 0 of the cqe */
  181. #define S_CQE_QPID 12
  182. #define M_CQE_QPID 0xFFFFF
  183. #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
  184. #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
  185. #define S_CQE_SWCQE 11
  186. #define M_CQE_SWCQE 0x1
  187. #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
  188. #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
  189. #define S_CQE_STATUS 5
  190. #define M_CQE_STATUS 0x1F
  191. #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
  192. #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
  193. #define S_CQE_TYPE 4
  194. #define M_CQE_TYPE 0x1
  195. #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
  196. #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
  197. #define S_CQE_OPCODE 0
  198. #define M_CQE_OPCODE 0xF
  199. #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
  200. #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
  201. #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x)->header)))
  202. #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x)->header)))
  203. #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x)->header)))
  204. #define SQ_TYPE(x) (CQE_TYPE((x)))
  205. #define RQ_TYPE(x) (!CQE_TYPE((x)))
  206. #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x)->header)))
  207. #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x)->header)))
  208. #define CQE_SEND_OPCODE(x)( \
  209. (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND) || \
  210. (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE) || \
  211. (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_INV) || \
  212. (G_CQE_OPCODE(be32_to_cpu((x)->header)) == FW_RI_SEND_WITH_SE_INV))
  213. #define CQE_LEN(x) (be32_to_cpu((x)->len))
  214. /* used for RQ completion processing */
  215. #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag))
  216. #define CQE_WRID_MSN(x) (be32_to_cpu((x)->u.rcqe.msn))
  217. /* used for SQ completion processing */
  218. #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
  219. /* generic accessor macros */
  220. #define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi)
  221. #define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low)
  222. /* macros for flit 3 of the cqe */
  223. #define S_CQE_GENBIT 63
  224. #define M_CQE_GENBIT 0x1
  225. #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
  226. #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
  227. #define S_CQE_OVFBIT 62
  228. #define M_CQE_OVFBIT 0x1
  229. #define G_CQE_OVFBIT(x) ((((x) >> S_CQE_OVFBIT)) & M_CQE_OVFBIT)
  230. #define S_CQE_IQTYPE 60
  231. #define M_CQE_IQTYPE 0x3
  232. #define G_CQE_IQTYPE(x) ((((x) >> S_CQE_IQTYPE)) & M_CQE_IQTYPE)
  233. #define M_CQE_TS 0x0fffffffffffffffULL
  234. #define G_CQE_TS(x) ((x) & M_CQE_TS)
  235. #define CQE_OVFBIT(x) ((unsigned)G_CQE_OVFBIT(be64_to_cpu((x)->bits_type_ts)))
  236. #define CQE_GENBIT(x) ((unsigned)G_CQE_GENBIT(be64_to_cpu((x)->bits_type_ts)))
  237. #define CQE_TS(x) (G_CQE_TS(be64_to_cpu((x)->bits_type_ts)))
  238. struct t4_swsqe {
  239. u64 wr_id;
  240. struct t4_cqe cqe;
  241. int read_len;
  242. int opcode;
  243. int complete;
  244. int signaled;
  245. u16 idx;
  246. };
  247. struct t4_sq {
  248. union t4_wr *queue;
  249. dma_addr_t dma_addr;
  250. DECLARE_PCI_UNMAP_ADDR(mapping);
  251. struct t4_swsqe *sw_sq;
  252. struct t4_swsqe *oldest_read;
  253. u64 udb;
  254. size_t memsize;
  255. u32 qid;
  256. u16 in_use;
  257. u16 size;
  258. u16 cidx;
  259. u16 pidx;
  260. };
  261. struct t4_swrqe {
  262. u64 wr_id;
  263. };
  264. struct t4_rq {
  265. union t4_recv_wr *queue;
  266. dma_addr_t dma_addr;
  267. DECLARE_PCI_UNMAP_ADDR(mapping);
  268. struct t4_swrqe *sw_rq;
  269. u64 udb;
  270. size_t memsize;
  271. u32 qid;
  272. u32 msn;
  273. u32 rqt_hwaddr;
  274. u16 rqt_size;
  275. u16 in_use;
  276. u16 size;
  277. u16 cidx;
  278. u16 pidx;
  279. };
  280. struct t4_wq {
  281. struct t4_sq sq;
  282. struct t4_rq rq;
  283. void __iomem *db;
  284. void __iomem *gts;
  285. struct c4iw_rdev *rdev;
  286. };
  287. static inline int t4_rqes_posted(struct t4_wq *wq)
  288. {
  289. return wq->rq.in_use;
  290. }
  291. static inline int t4_rq_empty(struct t4_wq *wq)
  292. {
  293. return wq->rq.in_use == 0;
  294. }
  295. static inline int t4_rq_full(struct t4_wq *wq)
  296. {
  297. return wq->rq.in_use == (wq->rq.size - 1);
  298. }
  299. static inline u32 t4_rq_avail(struct t4_wq *wq)
  300. {
  301. return wq->rq.size - 1 - wq->rq.in_use;
  302. }
  303. static inline void t4_rq_produce(struct t4_wq *wq)
  304. {
  305. wq->rq.in_use++;
  306. if (++wq->rq.pidx == wq->rq.size)
  307. wq->rq.pidx = 0;
  308. }
  309. static inline void t4_rq_consume(struct t4_wq *wq)
  310. {
  311. wq->rq.in_use--;
  312. wq->rq.msn++;
  313. if (++wq->rq.cidx == wq->rq.size)
  314. wq->rq.cidx = 0;
  315. }
  316. static inline int t4_sq_empty(struct t4_wq *wq)
  317. {
  318. return wq->sq.in_use == 0;
  319. }
  320. static inline int t4_sq_full(struct t4_wq *wq)
  321. {
  322. return wq->sq.in_use == (wq->sq.size - 1);
  323. }
  324. static inline u32 t4_sq_avail(struct t4_wq *wq)
  325. {
  326. return wq->sq.size - 1 - wq->sq.in_use;
  327. }
  328. static inline void t4_sq_produce(struct t4_wq *wq)
  329. {
  330. wq->sq.in_use++;
  331. if (++wq->sq.pidx == wq->sq.size)
  332. wq->sq.pidx = 0;
  333. }
  334. static inline void t4_sq_consume(struct t4_wq *wq)
  335. {
  336. wq->sq.in_use--;
  337. if (++wq->sq.cidx == wq->sq.size)
  338. wq->sq.cidx = 0;
  339. }
  340. static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc)
  341. {
  342. inc *= T4_SQ_NUM_SLOTS;
  343. wmb();
  344. writel(QID(wq->sq.qid) | PIDX(inc), wq->db);
  345. }
  346. static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc)
  347. {
  348. inc *= T4_RQ_NUM_SLOTS;
  349. wmb();
  350. writel(QID(wq->rq.qid) | PIDX(inc), wq->db);
  351. }
  352. static inline int t4_wq_in_error(struct t4_wq *wq)
  353. {
  354. return wq->sq.queue[wq->sq.size].status.qp_err;
  355. }
  356. static inline void t4_set_wq_in_error(struct t4_wq *wq)
  357. {
  358. wq->sq.queue[wq->sq.size].status.qp_err = 1;
  359. wq->rq.queue[wq->rq.size].status.qp_err = 1;
  360. }
  361. static inline void t4_disable_wq_db(struct t4_wq *wq)
  362. {
  363. wq->sq.queue[wq->sq.size].status.db_off = 1;
  364. wq->rq.queue[wq->rq.size].status.db_off = 1;
  365. }
  366. static inline void t4_enable_wq_db(struct t4_wq *wq)
  367. {
  368. wq->sq.queue[wq->sq.size].status.db_off = 0;
  369. wq->rq.queue[wq->rq.size].status.db_off = 0;
  370. }
  371. static inline int t4_wq_db_enabled(struct t4_wq *wq)
  372. {
  373. return !wq->sq.queue[wq->sq.size].status.db_off;
  374. }
  375. struct t4_cq {
  376. struct t4_cqe *queue;
  377. dma_addr_t dma_addr;
  378. DECLARE_PCI_UNMAP_ADDR(mapping);
  379. struct t4_cqe *sw_queue;
  380. void __iomem *gts;
  381. struct c4iw_rdev *rdev;
  382. u64 ugts;
  383. size_t memsize;
  384. u64 timestamp;
  385. u32 cqid;
  386. u16 size; /* including status page */
  387. u16 cidx;
  388. u16 sw_pidx;
  389. u16 sw_cidx;
  390. u16 sw_in_use;
  391. u16 cidx_inc;
  392. u8 gen;
  393. u8 error;
  394. };
  395. static inline int t4_arm_cq(struct t4_cq *cq, int se)
  396. {
  397. u32 val;
  398. u16 inc;
  399. do {
  400. /*
  401. * inc must be less the both the max update value -and-
  402. * the size of the CQ.
  403. */
  404. inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
  405. CIDXINC_MASK;
  406. inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
  407. if (inc == cq->cidx_inc)
  408. val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
  409. INGRESSQID(cq->cqid);
  410. else
  411. val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
  412. INGRESSQID(cq->cqid);
  413. cq->cidx_inc -= inc;
  414. writel(val, cq->gts);
  415. } while (cq->cidx_inc);
  416. return 0;
  417. }
  418. static inline void t4_swcq_produce(struct t4_cq *cq)
  419. {
  420. cq->sw_in_use++;
  421. if (++cq->sw_pidx == cq->size)
  422. cq->sw_pidx = 0;
  423. }
  424. static inline void t4_swcq_consume(struct t4_cq *cq)
  425. {
  426. cq->sw_in_use--;
  427. if (++cq->sw_cidx == cq->size)
  428. cq->sw_cidx = 0;
  429. }
  430. static inline void t4_hwcq_consume(struct t4_cq *cq)
  431. {
  432. cq->cidx_inc++;
  433. if (++cq->cidx == cq->size) {
  434. cq->cidx = 0;
  435. cq->gen ^= 1;
  436. }
  437. }
  438. static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
  439. {
  440. return (CQE_GENBIT(cqe) == cq->gen);
  441. }
  442. static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
  443. {
  444. int ret = 0;
  445. u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
  446. if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
  447. *cqe = &cq->queue[cq->cidx];
  448. cq->timestamp = G_CQE_TS(bits_type_ts);
  449. } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
  450. ret = -EOVERFLOW;
  451. else
  452. ret = -ENODATA;
  453. if (ret == -EOVERFLOW) {
  454. printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
  455. cq->error = 1;
  456. }
  457. return ret;
  458. }
  459. static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
  460. {
  461. if (cq->sw_in_use)
  462. return &cq->sw_queue[cq->sw_cidx];
  463. return NULL;
  464. }
  465. static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
  466. {
  467. int ret = 0;
  468. if (cq->error)
  469. ret = -ENODATA;
  470. else if (cq->sw_in_use)
  471. *cqe = &cq->sw_queue[cq->sw_cidx];
  472. else
  473. ret = t4_next_hw_cqe(cq, cqe);
  474. return ret;
  475. }
  476. static inline int t4_cq_in_error(struct t4_cq *cq)
  477. {
  478. return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
  479. }
  480. static inline void t4_set_cq_in_error(struct t4_cq *cq)
  481. {
  482. ((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
  483. }
  484. #endif