cxio_wr.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. /*
  2. * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef __CXIO_WR_H__
  33. #define __CXIO_WR_H__
  34. #include <asm/io.h>
  35. #include <linux/pci.h>
  36. #include <linux/timer.h>
  37. #include "firmware_exports.h"
  38. #define T3_MAX_SGE 4
  39. #define T3_MAX_INLINE 64
  40. #define Q_EMPTY(rptr,wptr) ((rptr)==(wptr))
  41. #define Q_FULL(rptr,wptr,size_log2) ( (((wptr)-(rptr))>>(size_log2)) && \
  42. ((rptr)!=(wptr)) )
  43. #define Q_GENBIT(ptr,size_log2) (!(((ptr)>>size_log2)&0x1))
  44. #define Q_FREECNT(rptr,wptr,size_log2) ((1UL<<size_log2)-((wptr)-(rptr)))
  45. #define Q_COUNT(rptr,wptr) ((wptr)-(rptr))
  46. #define Q_PTR2IDX(ptr,size_log2) (ptr & ((1UL<<size_log2)-1))
  47. static inline void ring_doorbell(void __iomem *doorbell, u32 qpid)
  48. {
  49. writel(((1<<31) | qpid), doorbell);
  50. }
  51. #define SEQ32_GE(x,y) (!( (((u32) (x)) - ((u32) (y))) & 0x80000000 ))
  52. enum t3_wr_flags {
  53. T3_COMPLETION_FLAG = 0x01,
  54. T3_NOTIFY_FLAG = 0x02,
  55. T3_SOLICITED_EVENT_FLAG = 0x04,
  56. T3_READ_FENCE_FLAG = 0x08,
  57. T3_LOCAL_FENCE_FLAG = 0x10
  58. } __attribute__ ((packed));
  59. enum t3_wr_opcode {
  60. T3_WR_BP = FW_WROPCODE_RI_BYPASS,
  61. T3_WR_SEND = FW_WROPCODE_RI_SEND,
  62. T3_WR_WRITE = FW_WROPCODE_RI_RDMA_WRITE,
  63. T3_WR_READ = FW_WROPCODE_RI_RDMA_READ,
  64. T3_WR_INV_STAG = FW_WROPCODE_RI_LOCAL_INV,
  65. T3_WR_BIND = FW_WROPCODE_RI_BIND_MW,
  66. T3_WR_RCV = FW_WROPCODE_RI_RECEIVE,
  67. T3_WR_INIT = FW_WROPCODE_RI_RDMA_INIT,
  68. T3_WR_QP_MOD = FW_WROPCODE_RI_MODIFY_QP
  69. } __attribute__ ((packed));
  70. enum t3_rdma_opcode {
  71. T3_RDMA_WRITE, /* IETF RDMAP v1.0 ... */
  72. T3_READ_REQ,
  73. T3_READ_RESP,
  74. T3_SEND,
  75. T3_SEND_WITH_INV,
  76. T3_SEND_WITH_SE,
  77. T3_SEND_WITH_SE_INV,
  78. T3_TERMINATE,
  79. T3_RDMA_INIT, /* CHELSIO RI specific ... */
  80. T3_BIND_MW,
  81. T3_FAST_REGISTER,
  82. T3_LOCAL_INV,
  83. T3_QP_MOD,
  84. T3_BYPASS
  85. } __attribute__ ((packed));
  86. static inline enum t3_rdma_opcode wr2opcode(enum t3_wr_opcode wrop)
  87. {
  88. switch (wrop) {
  89. case T3_WR_BP: return T3_BYPASS;
  90. case T3_WR_SEND: return T3_SEND;
  91. case T3_WR_WRITE: return T3_RDMA_WRITE;
  92. case T3_WR_READ: return T3_READ_REQ;
  93. case T3_WR_INV_STAG: return T3_LOCAL_INV;
  94. case T3_WR_BIND: return T3_BIND_MW;
  95. case T3_WR_INIT: return T3_RDMA_INIT;
  96. case T3_WR_QP_MOD: return T3_QP_MOD;
  97. default: break;
  98. }
  99. return -1;
  100. }
  101. /* Work request id */
  102. union t3_wrid {
  103. struct {
  104. u32 hi;
  105. u32 low;
  106. } id0;
  107. u64 id1;
  108. };
  109. #define WRID(wrid) (wrid.id1)
  110. #define WRID_GEN(wrid) (wrid.id0.wr_gen)
  111. #define WRID_IDX(wrid) (wrid.id0.wr_idx)
  112. #define WRID_LO(wrid) (wrid.id0.wr_lo)
  113. struct fw_riwrh {
  114. __be32 op_seop_flags;
  115. __be32 gen_tid_len;
  116. };
  117. #define S_FW_RIWR_OP 24
  118. #define M_FW_RIWR_OP 0xff
  119. #define V_FW_RIWR_OP(x) ((x) << S_FW_RIWR_OP)
  120. #define G_FW_RIWR_OP(x) ((((x) >> S_FW_RIWR_OP)) & M_FW_RIWR_OP)
  121. #define S_FW_RIWR_SOPEOP 22
  122. #define M_FW_RIWR_SOPEOP 0x3
  123. #define V_FW_RIWR_SOPEOP(x) ((x) << S_FW_RIWR_SOPEOP)
  124. #define S_FW_RIWR_FLAGS 8
  125. #define M_FW_RIWR_FLAGS 0x3fffff
  126. #define V_FW_RIWR_FLAGS(x) ((x) << S_FW_RIWR_FLAGS)
  127. #define G_FW_RIWR_FLAGS(x) ((((x) >> S_FW_RIWR_FLAGS)) & M_FW_RIWR_FLAGS)
  128. #define S_FW_RIWR_TID 8
  129. #define V_FW_RIWR_TID(x) ((x) << S_FW_RIWR_TID)
  130. #define S_FW_RIWR_LEN 0
  131. #define V_FW_RIWR_LEN(x) ((x) << S_FW_RIWR_LEN)
  132. #define S_FW_RIWR_GEN 31
  133. #define V_FW_RIWR_GEN(x) ((x) << S_FW_RIWR_GEN)
  134. struct t3_sge {
  135. __be32 stag;
  136. __be32 len;
  137. __be64 to;
  138. };
  139. /* If num_sgle is zero, flit 5+ contains immediate data.*/
  140. struct t3_send_wr {
  141. struct fw_riwrh wrh; /* 0 */
  142. union t3_wrid wrid; /* 1 */
  143. u8 rdmaop; /* 2 */
  144. u8 reserved[3];
  145. __be32 rem_stag;
  146. __be32 plen; /* 3 */
  147. __be32 num_sgle;
  148. struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
  149. };
  150. struct t3_local_inv_wr {
  151. struct fw_riwrh wrh; /* 0 */
  152. union t3_wrid wrid; /* 1 */
  153. __be32 stag; /* 2 */
  154. __be32 reserved3;
  155. };
  156. struct t3_rdma_write_wr {
  157. struct fw_riwrh wrh; /* 0 */
  158. union t3_wrid wrid; /* 1 */
  159. u8 rdmaop; /* 2 */
  160. u8 reserved[3];
  161. __be32 stag_sink;
  162. __be64 to_sink; /* 3 */
  163. __be32 plen; /* 4 */
  164. __be32 num_sgle;
  165. struct t3_sge sgl[T3_MAX_SGE]; /* 5+ */
  166. };
  167. struct t3_rdma_read_wr {
  168. struct fw_riwrh wrh; /* 0 */
  169. union t3_wrid wrid; /* 1 */
  170. u8 rdmaop; /* 2 */
  171. u8 reserved[3];
  172. __be32 rem_stag;
  173. __be64 rem_to; /* 3 */
  174. __be32 local_stag; /* 4 */
  175. __be32 local_len;
  176. __be64 local_to; /* 5 */
  177. };
  178. enum t3_addr_type {
  179. T3_VA_BASED_TO = 0x0,
  180. T3_ZERO_BASED_TO = 0x1
  181. } __attribute__ ((packed));
  182. enum t3_mem_perms {
  183. T3_MEM_ACCESS_LOCAL_READ = 0x1,
  184. T3_MEM_ACCESS_LOCAL_WRITE = 0x2,
  185. T3_MEM_ACCESS_REM_READ = 0x4,
  186. T3_MEM_ACCESS_REM_WRITE = 0x8
  187. } __attribute__ ((packed));
  188. struct t3_bind_mw_wr {
  189. struct fw_riwrh wrh; /* 0 */
  190. union t3_wrid wrid; /* 1 */
  191. u16 reserved; /* 2 */
  192. u8 type;
  193. u8 perms;
  194. __be32 mr_stag;
  195. __be32 mw_stag; /* 3 */
  196. __be32 mw_len;
  197. __be64 mw_va; /* 4 */
  198. __be32 mr_pbl_addr; /* 5 */
  199. u8 reserved2[3];
  200. u8 mr_pagesz;
  201. };
  202. struct t3_receive_wr {
  203. struct fw_riwrh wrh; /* 0 */
  204. union t3_wrid wrid; /* 1 */
  205. u8 pagesz[T3_MAX_SGE];
  206. __be32 num_sgle; /* 2 */
  207. struct t3_sge sgl[T3_MAX_SGE]; /* 3+ */
  208. __be32 pbl_addr[T3_MAX_SGE];
  209. };
  210. struct t3_bypass_wr {
  211. struct fw_riwrh wrh;
  212. union t3_wrid wrid; /* 1 */
  213. };
  214. struct t3_modify_qp_wr {
  215. struct fw_riwrh wrh; /* 0 */
  216. union t3_wrid wrid; /* 1 */
  217. __be32 flags; /* 2 */
  218. __be32 quiesce; /* 2 */
  219. __be32 max_ird; /* 3 */
  220. __be32 max_ord; /* 3 */
  221. __be64 sge_cmd; /* 4 */
  222. __be64 ctx1; /* 5 */
  223. __be64 ctx0; /* 6 */
  224. };
  225. enum t3_modify_qp_flags {
  226. MODQP_QUIESCE = 0x01,
  227. MODQP_MAX_IRD = 0x02,
  228. MODQP_MAX_ORD = 0x04,
  229. MODQP_WRITE_EC = 0x08,
  230. MODQP_READ_EC = 0x10,
  231. };
  232. enum t3_mpa_attrs {
  233. uP_RI_MPA_RX_MARKER_ENABLE = 0x1,
  234. uP_RI_MPA_TX_MARKER_ENABLE = 0x2,
  235. uP_RI_MPA_CRC_ENABLE = 0x4,
  236. uP_RI_MPA_IETF_ENABLE = 0x8
  237. } __attribute__ ((packed));
  238. enum t3_qp_caps {
  239. uP_RI_QP_RDMA_READ_ENABLE = 0x01,
  240. uP_RI_QP_RDMA_WRITE_ENABLE = 0x02,
  241. uP_RI_QP_BIND_ENABLE = 0x04,
  242. uP_RI_QP_FAST_REGISTER_ENABLE = 0x08,
  243. uP_RI_QP_STAG0_ENABLE = 0x10
  244. } __attribute__ ((packed));
  245. struct t3_rdma_init_attr {
  246. u32 tid;
  247. u32 qpid;
  248. u32 pdid;
  249. u32 scqid;
  250. u32 rcqid;
  251. u32 rq_addr;
  252. u32 rq_size;
  253. enum t3_mpa_attrs mpaattrs;
  254. enum t3_qp_caps qpcaps;
  255. u16 tcp_emss;
  256. u32 ord;
  257. u32 ird;
  258. u64 qp_dma_addr;
  259. u32 qp_dma_size;
  260. u32 flags;
  261. };
  262. struct t3_rdma_init_wr {
  263. struct fw_riwrh wrh; /* 0 */
  264. union t3_wrid wrid; /* 1 */
  265. __be32 qpid; /* 2 */
  266. __be32 pdid;
  267. __be32 scqid; /* 3 */
  268. __be32 rcqid;
  269. __be32 rq_addr; /* 4 */
  270. __be32 rq_size;
  271. u8 mpaattrs; /* 5 */
  272. u8 qpcaps;
  273. __be16 ulpdu_size;
  274. __be32 flags; /* bits 31-1 - reservered */
  275. /* bit 0 - set if RECV posted */
  276. __be32 ord; /* 6 */
  277. __be32 ird;
  278. __be64 qp_dma_addr; /* 7 */
  279. __be32 qp_dma_size; /* 8 */
  280. u32 rsvd;
  281. };
  282. struct t3_genbit {
  283. u64 flit[15];
  284. __be64 genbit;
  285. };
  286. enum rdma_init_wr_flags {
  287. RECVS_POSTED = 1,
  288. };
  289. union t3_wr {
  290. struct t3_send_wr send;
  291. struct t3_rdma_write_wr write;
  292. struct t3_rdma_read_wr read;
  293. struct t3_receive_wr recv;
  294. struct t3_local_inv_wr local_inv;
  295. struct t3_bind_mw_wr bind;
  296. struct t3_bypass_wr bypass;
  297. struct t3_rdma_init_wr init;
  298. struct t3_modify_qp_wr qp_mod;
  299. struct t3_genbit genbit;
  300. u64 flit[16];
  301. };
  302. #define T3_SQ_CQE_FLIT 13
  303. #define T3_SQ_COOKIE_FLIT 14
  304. #define T3_RQ_COOKIE_FLIT 13
  305. #define T3_RQ_CQE_FLIT 14
  306. static inline enum t3_wr_opcode fw_riwrh_opcode(struct fw_riwrh *wqe)
  307. {
  308. return G_FW_RIWR_OP(be32_to_cpu(wqe->op_seop_flags));
  309. }
  310. static inline void build_fw_riwrh(struct fw_riwrh *wqe, enum t3_wr_opcode op,
  311. enum t3_wr_flags flags, u8 genbit, u32 tid,
  312. u8 len)
  313. {
  314. wqe->op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(op) |
  315. V_FW_RIWR_SOPEOP(M_FW_RIWR_SOPEOP) |
  316. V_FW_RIWR_FLAGS(flags));
  317. wmb();
  318. wqe->gen_tid_len = cpu_to_be32(V_FW_RIWR_GEN(genbit) |
  319. V_FW_RIWR_TID(tid) |
  320. V_FW_RIWR_LEN(len));
  321. /* 2nd gen bit... */
  322. ((union t3_wr *)wqe)->genbit.genbit = cpu_to_be64(genbit);
  323. }
  324. /*
  325. * T3 ULP2_TX commands
  326. */
  327. enum t3_utx_mem_op {
  328. T3_UTX_MEM_READ = 2,
  329. T3_UTX_MEM_WRITE = 3
  330. };
  331. /* T3 MC7 RDMA TPT entry format */
  332. enum tpt_mem_type {
  333. TPT_NON_SHARED_MR = 0x0,
  334. TPT_SHARED_MR = 0x1,
  335. TPT_MW = 0x2,
  336. TPT_MW_RELAXED_PROTECTION = 0x3
  337. };
  338. enum tpt_addr_type {
  339. TPT_ZBTO = 0,
  340. TPT_VATO = 1
  341. };
  342. enum tpt_mem_perm {
  343. TPT_LOCAL_READ = 0x8,
  344. TPT_LOCAL_WRITE = 0x4,
  345. TPT_REMOTE_READ = 0x2,
  346. TPT_REMOTE_WRITE = 0x1
  347. };
  348. struct tpt_entry {
  349. __be32 valid_stag_pdid;
  350. __be32 flags_pagesize_qpid;
  351. __be32 rsvd_pbl_addr;
  352. __be32 len;
  353. __be32 va_hi;
  354. __be32 va_low_or_fbo;
  355. __be32 rsvd_bind_cnt_or_pstag;
  356. __be32 rsvd_pbl_size;
  357. };
  358. #define S_TPT_VALID 31
  359. #define V_TPT_VALID(x) ((x) << S_TPT_VALID)
  360. #define F_TPT_VALID V_TPT_VALID(1U)
  361. #define S_TPT_STAG_KEY 23
  362. #define M_TPT_STAG_KEY 0xFF
  363. #define V_TPT_STAG_KEY(x) ((x) << S_TPT_STAG_KEY)
  364. #define G_TPT_STAG_KEY(x) (((x) >> S_TPT_STAG_KEY) & M_TPT_STAG_KEY)
  365. #define S_TPT_STAG_STATE 22
  366. #define V_TPT_STAG_STATE(x) ((x) << S_TPT_STAG_STATE)
  367. #define F_TPT_STAG_STATE V_TPT_STAG_STATE(1U)
  368. #define S_TPT_STAG_TYPE 20
  369. #define M_TPT_STAG_TYPE 0x3
  370. #define V_TPT_STAG_TYPE(x) ((x) << S_TPT_STAG_TYPE)
  371. #define G_TPT_STAG_TYPE(x) (((x) >> S_TPT_STAG_TYPE) & M_TPT_STAG_TYPE)
  372. #define S_TPT_PDID 0
  373. #define M_TPT_PDID 0xFFFFF
  374. #define V_TPT_PDID(x) ((x) << S_TPT_PDID)
  375. #define G_TPT_PDID(x) (((x) >> S_TPT_PDID) & M_TPT_PDID)
  376. #define S_TPT_PERM 28
  377. #define M_TPT_PERM 0xF
  378. #define V_TPT_PERM(x) ((x) << S_TPT_PERM)
  379. #define G_TPT_PERM(x) (((x) >> S_TPT_PERM) & M_TPT_PERM)
  380. #define S_TPT_REM_INV_DIS 27
  381. #define V_TPT_REM_INV_DIS(x) ((x) << S_TPT_REM_INV_DIS)
  382. #define F_TPT_REM_INV_DIS V_TPT_REM_INV_DIS(1U)
  383. #define S_TPT_ADDR_TYPE 26
  384. #define V_TPT_ADDR_TYPE(x) ((x) << S_TPT_ADDR_TYPE)
  385. #define F_TPT_ADDR_TYPE V_TPT_ADDR_TYPE(1U)
  386. #define S_TPT_MW_BIND_ENABLE 25
  387. #define V_TPT_MW_BIND_ENABLE(x) ((x) << S_TPT_MW_BIND_ENABLE)
  388. #define F_TPT_MW_BIND_ENABLE V_TPT_MW_BIND_ENABLE(1U)
  389. #define S_TPT_PAGE_SIZE 20
  390. #define M_TPT_PAGE_SIZE 0x1F
  391. #define V_TPT_PAGE_SIZE(x) ((x) << S_TPT_PAGE_SIZE)
  392. #define G_TPT_PAGE_SIZE(x) (((x) >> S_TPT_PAGE_SIZE) & M_TPT_PAGE_SIZE)
  393. #define S_TPT_PBL_ADDR 0
  394. #define M_TPT_PBL_ADDR 0x1FFFFFFF
  395. #define V_TPT_PBL_ADDR(x) ((x) << S_TPT_PBL_ADDR)
  396. #define G_TPT_PBL_ADDR(x) (((x) >> S_TPT_PBL_ADDR) & M_TPT_PBL_ADDR)
  397. #define S_TPT_QPID 0
  398. #define M_TPT_QPID 0xFFFFF
  399. #define V_TPT_QPID(x) ((x) << S_TPT_QPID)
  400. #define G_TPT_QPID(x) (((x) >> S_TPT_QPID) & M_TPT_QPID)
  401. #define S_TPT_PSTAG 0
  402. #define M_TPT_PSTAG 0xFFFFFF
  403. #define V_TPT_PSTAG(x) ((x) << S_TPT_PSTAG)
  404. #define G_TPT_PSTAG(x) (((x) >> S_TPT_PSTAG) & M_TPT_PSTAG)
  405. #define S_TPT_PBL_SIZE 0
  406. #define M_TPT_PBL_SIZE 0xFFFFF
  407. #define V_TPT_PBL_SIZE(x) ((x) << S_TPT_PBL_SIZE)
  408. #define G_TPT_PBL_SIZE(x) (((x) >> S_TPT_PBL_SIZE) & M_TPT_PBL_SIZE)
  409. /*
  410. * CQE defs
  411. */
  412. struct t3_cqe {
  413. __be32 header;
  414. __be32 len;
  415. union {
  416. struct {
  417. __be32 stag;
  418. __be32 msn;
  419. } rcqe;
  420. struct {
  421. u32 wrid_hi;
  422. u32 wrid_low;
  423. } scqe;
  424. } u;
  425. };
  426. #define S_CQE_OOO 31
  427. #define M_CQE_OOO 0x1
  428. #define G_CQE_OOO(x) ((((x) >> S_CQE_OOO)) & M_CQE_OOO)
  429. #define V_CEQ_OOO(x) ((x)<<S_CQE_OOO)
  430. #define S_CQE_QPID 12
  431. #define M_CQE_QPID 0x7FFFF
  432. #define G_CQE_QPID(x) ((((x) >> S_CQE_QPID)) & M_CQE_QPID)
  433. #define V_CQE_QPID(x) ((x)<<S_CQE_QPID)
  434. #define S_CQE_SWCQE 11
  435. #define M_CQE_SWCQE 0x1
  436. #define G_CQE_SWCQE(x) ((((x) >> S_CQE_SWCQE)) & M_CQE_SWCQE)
  437. #define V_CQE_SWCQE(x) ((x)<<S_CQE_SWCQE)
  438. #define S_CQE_GENBIT 10
  439. #define M_CQE_GENBIT 0x1
  440. #define G_CQE_GENBIT(x) (((x) >> S_CQE_GENBIT) & M_CQE_GENBIT)
  441. #define V_CQE_GENBIT(x) ((x)<<S_CQE_GENBIT)
  442. #define S_CQE_STATUS 5
  443. #define M_CQE_STATUS 0x1F
  444. #define G_CQE_STATUS(x) ((((x) >> S_CQE_STATUS)) & M_CQE_STATUS)
  445. #define V_CQE_STATUS(x) ((x)<<S_CQE_STATUS)
  446. #define S_CQE_TYPE 4
  447. #define M_CQE_TYPE 0x1
  448. #define G_CQE_TYPE(x) ((((x) >> S_CQE_TYPE)) & M_CQE_TYPE)
  449. #define V_CQE_TYPE(x) ((x)<<S_CQE_TYPE)
  450. #define S_CQE_OPCODE 0
  451. #define M_CQE_OPCODE 0xF
  452. #define G_CQE_OPCODE(x) ((((x) >> S_CQE_OPCODE)) & M_CQE_OPCODE)
  453. #define V_CQE_OPCODE(x) ((x)<<S_CQE_OPCODE)
  454. #define SW_CQE(x) (G_CQE_SWCQE(be32_to_cpu((x).header)))
  455. #define CQE_OOO(x) (G_CQE_OOO(be32_to_cpu((x).header)))
  456. #define CQE_QPID(x) (G_CQE_QPID(be32_to_cpu((x).header)))
  457. #define CQE_GENBIT(x) (G_CQE_GENBIT(be32_to_cpu((x).header)))
  458. #define CQE_TYPE(x) (G_CQE_TYPE(be32_to_cpu((x).header)))
  459. #define SQ_TYPE(x) (CQE_TYPE((x)))
  460. #define RQ_TYPE(x) (!CQE_TYPE((x)))
  461. #define CQE_STATUS(x) (G_CQE_STATUS(be32_to_cpu((x).header)))
  462. #define CQE_OPCODE(x) (G_CQE_OPCODE(be32_to_cpu((x).header)))
  463. #define CQE_LEN(x) (be32_to_cpu((x).len))
  464. /* used for RQ completion processing */
  465. #define CQE_WRID_STAG(x) (be32_to_cpu((x).u.rcqe.stag))
  466. #define CQE_WRID_MSN(x) (be32_to_cpu((x).u.rcqe.msn))
  467. /* used for SQ completion processing */
  468. #define CQE_WRID_SQ_WPTR(x) ((x).u.scqe.wrid_hi)
  469. #define CQE_WRID_WPTR(x) ((x).u.scqe.wrid_low)
  470. /* generic accessor macros */
  471. #define CQE_WRID_HI(x) ((x).u.scqe.wrid_hi)
  472. #define CQE_WRID_LOW(x) ((x).u.scqe.wrid_low)
  473. #define TPT_ERR_SUCCESS 0x0
  474. #define TPT_ERR_STAG 0x1 /* STAG invalid: either the */
  475. /* STAG is offlimt, being 0, */
  476. /* or STAG_key mismatch */
  477. #define TPT_ERR_PDID 0x2 /* PDID mismatch */
  478. #define TPT_ERR_QPID 0x3 /* QPID mismatch */
  479. #define TPT_ERR_ACCESS 0x4 /* Invalid access right */
  480. #define TPT_ERR_WRAP 0x5 /* Wrap error */
  481. #define TPT_ERR_BOUND 0x6 /* base and bounds voilation */
  482. #define TPT_ERR_INVALIDATE_SHARED_MR 0x7 /* attempt to invalidate a */
  483. /* shared memory region */
  484. #define TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND 0x8 /* attempt to invalidate a */
  485. /* shared memory region */
  486. #define TPT_ERR_ECC 0x9 /* ECC error detected */
  487. #define TPT_ERR_ECC_PSTAG 0xA /* ECC error detected when */
  488. /* reading PSTAG for a MW */
  489. /* Invalidate */
  490. #define TPT_ERR_PBL_ADDR_BOUND 0xB /* pbl addr out of bounds: */
  491. /* software error */
  492. #define TPT_ERR_SWFLUSH 0xC /* SW FLUSHED */
  493. #define TPT_ERR_CRC 0x10 /* CRC error */
  494. #define TPT_ERR_MARKER 0x11 /* Marker error */
  495. #define TPT_ERR_PDU_LEN_ERR 0x12 /* invalid PDU length */
  496. #define TPT_ERR_OUT_OF_RQE 0x13 /* out of RQE */
  497. #define TPT_ERR_DDP_VERSION 0x14 /* wrong DDP version */
  498. #define TPT_ERR_RDMA_VERSION 0x15 /* wrong RDMA version */
  499. #define TPT_ERR_OPCODE 0x16 /* invalid rdma opcode */
  500. #define TPT_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
  501. #define TPT_ERR_MSN 0x18 /* MSN error */
  502. #define TPT_ERR_TBIT 0x19 /* tag bit not set correctly */
  503. #define TPT_ERR_MO 0x1A /* MO not 0 for TERMINATE */
  504. /* or READ_REQ */
  505. #define TPT_ERR_MSN_GAP 0x1B
  506. #define TPT_ERR_MSN_RANGE 0x1C
  507. #define TPT_ERR_IRD_OVERFLOW 0x1D
  508. #define TPT_ERR_RQE_ADDR_BOUND 0x1E /* RQE addr out of bounds: */
  509. /* software error */
  510. #define TPT_ERR_INTERNAL_ERR 0x1F /* internal error (opcode */
  511. /* mismatch) */
  512. struct t3_swsq {
  513. __u64 wr_id;
  514. struct t3_cqe cqe;
  515. __u32 sq_wptr;
  516. __be32 read_len;
  517. int opcode;
  518. int complete;
  519. int signaled;
  520. };
  521. /*
  522. * A T3 WQ implements both the SQ and RQ.
  523. */
  524. struct t3_wq {
  525. union t3_wr *queue; /* DMA accessable memory */
  526. dma_addr_t dma_addr; /* DMA address for HW */
  527. DECLARE_PCI_UNMAP_ADDR(mapping) /* unmap kruft */
  528. u32 error; /* 1 once we go to ERROR */
  529. u32 qpid;
  530. u32 wptr; /* idx to next available WR slot */
  531. u32 size_log2; /* total wq size */
  532. struct t3_swsq *sq; /* SW SQ */
  533. struct t3_swsq *oldest_read; /* tracks oldest pending read */
  534. u32 sq_wptr; /* sq_wptr - sq_rptr == count of */
  535. u32 sq_rptr; /* pending wrs */
  536. u32 sq_size_log2; /* sq size */
  537. u64 *rq; /* SW RQ (holds consumer wr_ids */
  538. u32 rq_wptr; /* rq_wptr - rq_rptr == count of */
  539. u32 rq_rptr; /* pending wrs */
  540. u64 *rq_oldest_wr; /* oldest wr on the SW RQ */
  541. u32 rq_size_log2; /* rq size */
  542. u32 rq_addr; /* rq adapter address */
  543. void __iomem *doorbell; /* kernel db */
  544. u64 udb; /* user db if any */
  545. };
  546. struct t3_cq {
  547. u32 cqid;
  548. u32 rptr;
  549. u32 wptr;
  550. u32 size_log2;
  551. dma_addr_t dma_addr;
  552. DECLARE_PCI_UNMAP_ADDR(mapping)
  553. struct t3_cqe *queue;
  554. struct t3_cqe *sw_queue;
  555. u32 sw_rptr;
  556. u32 sw_wptr;
  557. };
  558. #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
  559. CQE_GENBIT(*cqe))
  560. static inline void cxio_set_wq_in_error(struct t3_wq *wq)
  561. {
  562. wq->queue->flit[13] = 1;
  563. }
  564. static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
  565. {
  566. struct t3_cqe *cqe;
  567. cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
  568. if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
  569. return cqe;
  570. return NULL;
  571. }
  572. static inline struct t3_cqe *cxio_next_sw_cqe(struct t3_cq *cq)
  573. {
  574. struct t3_cqe *cqe;
  575. if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
  576. cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
  577. return cqe;
  578. }
  579. return NULL;
  580. }
  581. static inline struct t3_cqe *cxio_next_cqe(struct t3_cq *cq)
  582. {
  583. struct t3_cqe *cqe;
  584. if (!Q_EMPTY(cq->sw_rptr, cq->sw_wptr)) {
  585. cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
  586. return cqe;
  587. }
  588. cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
  589. if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
  590. return cqe;
  591. return NULL;
  592. }
  593. #endif