c2_wr.h 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520
  1. /*
  2. * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
  3. * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #ifndef _C2_WR_H_
  34. #define _C2_WR_H_
  35. #ifdef CCDEBUG
  36. #define CCWR_MAGIC 0xb07700b0
  37. #endif
  38. #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
  39. /* Maximum allowed size in bytes of private_data exchange
  40. * on connect.
  41. */
  42. #define C2_MAX_PRIVATE_DATA_SIZE 200
  43. /*
  44. * These types are shared among the adapter, host, and CCIL consumer.
  45. */
  46. enum c2_cq_notification_type {
  47. C2_CQ_NOTIFICATION_TYPE_NONE = 1,
  48. C2_CQ_NOTIFICATION_TYPE_NEXT,
  49. C2_CQ_NOTIFICATION_TYPE_NEXT_SE
  50. };
  51. enum c2_setconfig_cmd {
  52. C2_CFG_ADD_ADDR = 1,
  53. C2_CFG_DEL_ADDR = 2,
  54. C2_CFG_ADD_ROUTE = 3,
  55. C2_CFG_DEL_ROUTE = 4
  56. };
  57. enum c2_getconfig_cmd {
  58. C2_GETCONFIG_ROUTES = 1,
  59. C2_GETCONFIG_ADDRS
  60. };
  61. /*
  62. * CCIL Work Request Identifiers
  63. */
  64. enum c2wr_ids {
  65. CCWR_RNIC_OPEN = 1,
  66. CCWR_RNIC_QUERY,
  67. CCWR_RNIC_SETCONFIG,
  68. CCWR_RNIC_GETCONFIG,
  69. CCWR_RNIC_CLOSE,
  70. CCWR_CQ_CREATE,
  71. CCWR_CQ_QUERY,
  72. CCWR_CQ_MODIFY,
  73. CCWR_CQ_DESTROY,
  74. CCWR_QP_CONNECT,
  75. CCWR_PD_ALLOC,
  76. CCWR_PD_DEALLOC,
  77. CCWR_SRQ_CREATE,
  78. CCWR_SRQ_QUERY,
  79. CCWR_SRQ_MODIFY,
  80. CCWR_SRQ_DESTROY,
  81. CCWR_QP_CREATE,
  82. CCWR_QP_QUERY,
  83. CCWR_QP_MODIFY,
  84. CCWR_QP_DESTROY,
  85. CCWR_NSMR_STAG_ALLOC,
  86. CCWR_NSMR_REGISTER,
  87. CCWR_NSMR_PBL,
  88. CCWR_STAG_DEALLOC,
  89. CCWR_NSMR_REREGISTER,
  90. CCWR_SMR_REGISTER,
  91. CCWR_MR_QUERY,
  92. CCWR_MW_ALLOC,
  93. CCWR_MW_QUERY,
  94. CCWR_EP_CREATE,
  95. CCWR_EP_GETOPT,
  96. CCWR_EP_SETOPT,
  97. CCWR_EP_DESTROY,
  98. CCWR_EP_BIND,
  99. CCWR_EP_CONNECT,
  100. CCWR_EP_LISTEN,
  101. CCWR_EP_SHUTDOWN,
  102. CCWR_EP_LISTEN_CREATE,
  103. CCWR_EP_LISTEN_DESTROY,
  104. CCWR_EP_QUERY,
  105. CCWR_CR_ACCEPT,
  106. CCWR_CR_REJECT,
  107. CCWR_CONSOLE,
  108. CCWR_TERM,
  109. CCWR_FLASH_INIT,
  110. CCWR_FLASH,
  111. CCWR_BUF_ALLOC,
  112. CCWR_BUF_FREE,
  113. CCWR_FLASH_WRITE,
  114. CCWR_INIT, /* WARNING: Don't move this ever again! */
  115. /* Add new IDs here */
  116. /*
  117. * WARNING: CCWR_LAST must always be the last verbs id defined!
  118. * All the preceding IDs are fixed, and must not change.
  119. * You can add new IDs, but must not remove or reorder
  120. * any IDs. If you do, YOU will ruin any hope of
  121. * compatability between versions.
  122. */
  123. CCWR_LAST,
  124. /*
  125. * Start over at 1 so that arrays indexed by user wr id's
  126. * begin at 1. This is OK since the verbs and user wr id's
  127. * are always used on disjoint sets of queues.
  128. */
  129. /*
  130. * The order of the CCWR_SEND_XX verbs must
  131. * match the order of the RDMA_OPs
  132. */
  133. CCWR_SEND = 1,
  134. CCWR_SEND_INV,
  135. CCWR_SEND_SE,
  136. CCWR_SEND_SE_INV,
  137. CCWR_RDMA_WRITE,
  138. CCWR_RDMA_READ,
  139. CCWR_RDMA_READ_INV,
  140. CCWR_MW_BIND,
  141. CCWR_NSMR_FASTREG,
  142. CCWR_STAG_INVALIDATE,
  143. CCWR_RECV,
  144. CCWR_NOP,
  145. CCWR_UNIMPL,
  146. /* WARNING: This must always be the last user wr id defined! */
  147. };
  148. #define RDMA_SEND_OPCODE_FROM_WR_ID(x) (x+2)
  149. /*
  150. * SQ/RQ Work Request Types
  151. */
  152. enum c2_wr_type {
  153. C2_WR_TYPE_SEND = CCWR_SEND,
  154. C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
  155. C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
  156. C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
  157. C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
  158. C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
  159. C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
  160. C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
  161. C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
  162. C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
  163. C2_WR_TYPE_RECV = CCWR_RECV,
  164. C2_WR_TYPE_NOP = CCWR_NOP,
  165. };
  166. struct c2_netaddr {
  167. __be32 ip_addr;
  168. __be32 netmask;
  169. u32 mtu;
  170. };
  171. struct c2_route {
  172. u32 ip_addr; /* 0 indicates the default route */
  173. u32 netmask; /* netmask associated with dst */
  174. u32 flags;
  175. union {
  176. u32 ipaddr; /* address of the nexthop interface */
  177. u8 enaddr[6];
  178. } nexthop;
  179. };
  180. /*
  181. * A Scatter Gather Entry.
  182. */
  183. struct c2_data_addr {
  184. __be32 stag;
  185. __be32 length;
  186. __be64 to;
  187. };
  188. /*
  189. * MR and MW flags used by the consumer, RI, and RNIC.
  190. */
  191. enum c2_mm_flags {
  192. MEM_REMOTE = 0x0001, /* allow mw binds with remote access. */
  193. MEM_VA_BASED = 0x0002, /* Not Zero-based */
  194. MEM_PBL_COMPLETE = 0x0004, /* PBL array is complete in this msg */
  195. MEM_LOCAL_READ = 0x0008, /* allow local reads */
  196. MEM_LOCAL_WRITE = 0x0010, /* allow local writes */
  197. MEM_REMOTE_READ = 0x0020, /* allow remote reads */
  198. MEM_REMOTE_WRITE = 0x0040, /* allow remote writes */
  199. MEM_WINDOW_BIND = 0x0080, /* binds allowed */
  200. MEM_SHARED = 0x0100, /* set if MR is shared */
  201. MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
  202. };
  203. /*
  204. * CCIL API ACF flags defined in terms of the low level mem flags.
  205. * This minimizes translation needed in the user API
  206. */
  207. enum c2_acf {
  208. C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
  209. C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
  210. C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
  211. C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
  212. C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
  213. };
  214. /*
  215. * Image types of objects written to flash
  216. */
  217. #define C2_FLASH_IMG_BITFILE 1
  218. #define C2_FLASH_IMG_OPTION_ROM 2
  219. #define C2_FLASH_IMG_VPD 3
  220. /*
  221. * to fix bug 1815 we define the max size allowable of the
  222. * terminate message (per the IETF spec).Refer to the IETF
  223. * protocal specification, section 12.1.6, page 64)
  224. * The message is prefixed by 20 types of DDP info.
  225. *
  226. * Then the message has 6 bytes for the terminate control
  227. * and DDP segment length info plus a DDP header (either
  228. * 14 or 18 byts) plus 28 bytes for the RDMA header.
  229. * Thus the max size in:
  230. * 20 + (6 + 18 + 28) = 72
  231. */
  232. #define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
  233. /*
  234. * Build String Length. It must be the same as C2_BUILD_STR_LEN in ccil_api.h
  235. */
  236. #define WR_BUILD_STR_LEN 64
  237. /*
  238. * WARNING: All of these structs need to align any 64bit types on
  239. * 64 bit boundaries! 64bit types include u64 and u64.
  240. */
  241. /*
  242. * Clustercore Work Request Header. Be sensitive to field layout
  243. * and alignment.
  244. */
  245. struct c2wr_hdr {
  246. /* wqe_count is part of the cqe. It is put here so the
  247. * adapter can write to it while the wr is pending without
  248. * clobbering part of the wr. This word need not be dma'd
  249. * from the host to adapter by libccil, but we copy it anyway
  250. * to make the memcpy to the adapter better aligned.
  251. */
  252. __be32 wqe_count;
  253. /* Put these fields next so that later 32- and 64-bit
  254. * quantities are naturally aligned.
  255. */
  256. u8 id;
  257. u8 result; /* adapter -> host */
  258. u8 sge_count; /* host -> adapter */
  259. u8 flags; /* host -> adapter */
  260. u64 context;
  261. #ifdef CCMSGMAGIC
  262. u32 magic;
  263. u32 pad;
  264. #endif
  265. } __attribute__((packed));
  266. /*
  267. *------------------------ RNIC ------------------------
  268. */
  269. /*
  270. * WR_RNIC_OPEN
  271. */
  272. /*
  273. * Flags for the RNIC WRs
  274. */
  275. enum c2_rnic_flags {
  276. RNIC_IRD_STATIC = 0x0001,
  277. RNIC_ORD_STATIC = 0x0002,
  278. RNIC_QP_STATIC = 0x0004,
  279. RNIC_SRQ_SUPPORTED = 0x0008,
  280. RNIC_PBL_BLOCK_MODE = 0x0010,
  281. RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
  282. RNIC_CQ_OVF_DETECTED = 0x0040,
  283. RNIC_PRIV_MODE = 0x0080
  284. };
  285. struct c2wr_rnic_open_req {
  286. struct c2wr_hdr hdr;
  287. u64 user_context;
  288. __be16 flags; /* See enum c2_rnic_flags */
  289. __be16 port_num;
  290. } __attribute__((packed));
  291. struct c2wr_rnic_open_rep {
  292. struct c2wr_hdr hdr;
  293. u32 rnic_handle;
  294. } __attribute__((packed));
  295. union c2wr_rnic_open {
  296. struct c2wr_rnic_open_req req;
  297. struct c2wr_rnic_open_rep rep;
  298. } __attribute__((packed));
  299. struct c2wr_rnic_query_req {
  300. struct c2wr_hdr hdr;
  301. u32 rnic_handle;
  302. } __attribute__((packed));
  303. /*
  304. * WR_RNIC_QUERY
  305. */
  306. struct c2wr_rnic_query_rep {
  307. struct c2wr_hdr hdr;
  308. u64 user_context;
  309. __be32 vendor_id;
  310. __be32 part_number;
  311. __be32 hw_version;
  312. __be32 fw_ver_major;
  313. __be32 fw_ver_minor;
  314. __be32 fw_ver_patch;
  315. char fw_ver_build_str[WR_BUILD_STR_LEN];
  316. __be32 max_qps;
  317. __be32 max_qp_depth;
  318. u32 max_srq_depth;
  319. u32 max_send_sgl_depth;
  320. u32 max_rdma_sgl_depth;
  321. __be32 max_cqs;
  322. __be32 max_cq_depth;
  323. u32 max_cq_event_handlers;
  324. __be32 max_mrs;
  325. u32 max_pbl_depth;
  326. __be32 max_pds;
  327. __be32 max_global_ird;
  328. u32 max_global_ord;
  329. __be32 max_qp_ird;
  330. __be32 max_qp_ord;
  331. u32 flags;
  332. __be32 max_mws;
  333. u32 pbe_range_low;
  334. u32 pbe_range_high;
  335. u32 max_srqs;
  336. u32 page_size;
  337. } __attribute__((packed));
  338. union c2wr_rnic_query {
  339. struct c2wr_rnic_query_req req;
  340. struct c2wr_rnic_query_rep rep;
  341. } __attribute__((packed));
  342. /*
  343. * WR_RNIC_GETCONFIG
  344. */
  345. struct c2wr_rnic_getconfig_req {
  346. struct c2wr_hdr hdr;
  347. u32 rnic_handle;
  348. u32 option; /* see c2_getconfig_cmd_t */
  349. u64 reply_buf;
  350. u32 reply_buf_len;
  351. } __attribute__((packed)) ;
  352. struct c2wr_rnic_getconfig_rep {
  353. struct c2wr_hdr hdr;
  354. u32 option; /* see c2_getconfig_cmd_t */
  355. u32 count_len; /* length of the number of addresses configured */
  356. } __attribute__((packed)) ;
  357. union c2wr_rnic_getconfig {
  358. struct c2wr_rnic_getconfig_req req;
  359. struct c2wr_rnic_getconfig_rep rep;
  360. } __attribute__((packed)) ;
  361. /*
  362. * WR_RNIC_SETCONFIG
  363. */
  364. struct c2wr_rnic_setconfig_req {
  365. struct c2wr_hdr hdr;
  366. u32 rnic_handle;
  367. __be32 option; /* See c2_setconfig_cmd_t */
  368. /* variable data and pad. See c2_netaddr and c2_route */
  369. u8 data[0];
  370. } __attribute__((packed)) ;
  371. struct c2wr_rnic_setconfig_rep {
  372. struct c2wr_hdr hdr;
  373. } __attribute__((packed)) ;
  374. union c2wr_rnic_setconfig {
  375. struct c2wr_rnic_setconfig_req req;
  376. struct c2wr_rnic_setconfig_rep rep;
  377. } __attribute__((packed)) ;
  378. /*
  379. * WR_RNIC_CLOSE
  380. */
  381. struct c2wr_rnic_close_req {
  382. struct c2wr_hdr hdr;
  383. u32 rnic_handle;
  384. } __attribute__((packed)) ;
  385. struct c2wr_rnic_close_rep {
  386. struct c2wr_hdr hdr;
  387. } __attribute__((packed)) ;
  388. union c2wr_rnic_close {
  389. struct c2wr_rnic_close_req req;
  390. struct c2wr_rnic_close_rep rep;
  391. } __attribute__((packed)) ;
  392. /*
  393. *------------------------ CQ ------------------------
  394. */
  395. struct c2wr_cq_create_req {
  396. struct c2wr_hdr hdr;
  397. __be64 shared_ht;
  398. u64 user_context;
  399. __be64 msg_pool;
  400. u32 rnic_handle;
  401. __be32 msg_size;
  402. __be32 depth;
  403. } __attribute__((packed)) ;
  404. struct c2wr_cq_create_rep {
  405. struct c2wr_hdr hdr;
  406. __be32 mq_index;
  407. __be32 adapter_shared;
  408. u32 cq_handle;
  409. } __attribute__((packed)) ;
  410. union c2wr_cq_create {
  411. struct c2wr_cq_create_req req;
  412. struct c2wr_cq_create_rep rep;
  413. } __attribute__((packed)) ;
  414. struct c2wr_cq_modify_req {
  415. struct c2wr_hdr hdr;
  416. u32 rnic_handle;
  417. u32 cq_handle;
  418. u32 new_depth;
  419. u64 new_msg_pool;
  420. } __attribute__((packed)) ;
  421. struct c2wr_cq_modify_rep {
  422. struct c2wr_hdr hdr;
  423. } __attribute__((packed)) ;
  424. union c2wr_cq_modify {
  425. struct c2wr_cq_modify_req req;
  426. struct c2wr_cq_modify_rep rep;
  427. } __attribute__((packed)) ;
  428. struct c2wr_cq_destroy_req {
  429. struct c2wr_hdr hdr;
  430. u32 rnic_handle;
  431. u32 cq_handle;
  432. } __attribute__((packed)) ;
  433. struct c2wr_cq_destroy_rep {
  434. struct c2wr_hdr hdr;
  435. } __attribute__((packed)) ;
  436. union c2wr_cq_destroy {
  437. struct c2wr_cq_destroy_req req;
  438. struct c2wr_cq_destroy_rep rep;
  439. } __attribute__((packed)) ;
  440. /*
  441. *------------------------ PD ------------------------
  442. */
  443. struct c2wr_pd_alloc_req {
  444. struct c2wr_hdr hdr;
  445. u32 rnic_handle;
  446. u32 pd_id;
  447. } __attribute__((packed)) ;
  448. struct c2wr_pd_alloc_rep {
  449. struct c2wr_hdr hdr;
  450. } __attribute__((packed)) ;
  451. union c2wr_pd_alloc {
  452. struct c2wr_pd_alloc_req req;
  453. struct c2wr_pd_alloc_rep rep;
  454. } __attribute__((packed)) ;
  455. struct c2wr_pd_dealloc_req {
  456. struct c2wr_hdr hdr;
  457. u32 rnic_handle;
  458. u32 pd_id;
  459. } __attribute__((packed)) ;
  460. struct c2wr_pd_dealloc_rep {
  461. struct c2wr_hdr hdr;
  462. } __attribute__((packed)) ;
  463. union c2wr_pd_dealloc {
  464. struct c2wr_pd_dealloc_req req;
  465. struct c2wr_pd_dealloc_rep rep;
  466. } __attribute__((packed)) ;
  467. /*
  468. *------------------------ SRQ ------------------------
  469. */
  470. struct c2wr_srq_create_req {
  471. struct c2wr_hdr hdr;
  472. u64 shared_ht;
  473. u64 user_context;
  474. u32 rnic_handle;
  475. u32 srq_depth;
  476. u32 srq_limit;
  477. u32 sgl_depth;
  478. u32 pd_id;
  479. } __attribute__((packed)) ;
  480. struct c2wr_srq_create_rep {
  481. struct c2wr_hdr hdr;
  482. u32 srq_depth;
  483. u32 sgl_depth;
  484. u32 msg_size;
  485. u32 mq_index;
  486. u32 mq_start;
  487. u32 srq_handle;
  488. } __attribute__((packed)) ;
  489. union c2wr_srq_create {
  490. struct c2wr_srq_create_req req;
  491. struct c2wr_srq_create_rep rep;
  492. } __attribute__((packed)) ;
  493. struct c2wr_srq_destroy_req {
  494. struct c2wr_hdr hdr;
  495. u32 rnic_handle;
  496. u32 srq_handle;
  497. } __attribute__((packed)) ;
  498. struct c2wr_srq_destroy_rep {
  499. struct c2wr_hdr hdr;
  500. } __attribute__((packed)) ;
  501. union c2wr_srq_destroy {
  502. struct c2wr_srq_destroy_req req;
  503. struct c2wr_srq_destroy_rep rep;
  504. } __attribute__((packed)) ;
  505. /*
  506. *------------------------ QP ------------------------
  507. */
  508. enum c2wr_qp_flags {
  509. QP_RDMA_READ = 0x00000001, /* RDMA read enabled? */
  510. QP_RDMA_WRITE = 0x00000002, /* RDMA write enabled? */
  511. QP_MW_BIND = 0x00000004, /* MWs enabled */
  512. QP_ZERO_STAG = 0x00000008, /* enabled? */
  513. QP_REMOTE_TERMINATION = 0x00000010, /* remote end terminated */
  514. QP_RDMA_READ_RESPONSE = 0x00000020 /* Remote RDMA read */
  515. /* enabled? */
  516. };
  517. struct c2wr_qp_create_req {
  518. struct c2wr_hdr hdr;
  519. __be64 shared_sq_ht;
  520. __be64 shared_rq_ht;
  521. u64 user_context;
  522. u32 rnic_handle;
  523. u32 sq_cq_handle;
  524. u32 rq_cq_handle;
  525. __be32 sq_depth;
  526. __be32 rq_depth;
  527. u32 srq_handle;
  528. u32 srq_limit;
  529. __be32 flags; /* see enum c2wr_qp_flags */
  530. __be32 send_sgl_depth;
  531. __be32 recv_sgl_depth;
  532. __be32 rdma_write_sgl_depth;
  533. __be32 ord;
  534. __be32 ird;
  535. u32 pd_id;
  536. } __attribute__((packed)) ;
  537. struct c2wr_qp_create_rep {
  538. struct c2wr_hdr hdr;
  539. __be32 sq_depth;
  540. __be32 rq_depth;
  541. u32 send_sgl_depth;
  542. u32 recv_sgl_depth;
  543. u32 rdma_write_sgl_depth;
  544. u32 ord;
  545. u32 ird;
  546. __be32 sq_msg_size;
  547. __be32 sq_mq_index;
  548. __be32 sq_mq_start;
  549. __be32 rq_msg_size;
  550. __be32 rq_mq_index;
  551. __be32 rq_mq_start;
  552. u32 qp_handle;
  553. } __attribute__((packed)) ;
  554. union c2wr_qp_create {
  555. struct c2wr_qp_create_req req;
  556. struct c2wr_qp_create_rep rep;
  557. } __attribute__((packed)) ;
  558. struct c2wr_qp_query_req {
  559. struct c2wr_hdr hdr;
  560. u32 rnic_handle;
  561. u32 qp_handle;
  562. } __attribute__((packed)) ;
  563. struct c2wr_qp_query_rep {
  564. struct c2wr_hdr hdr;
  565. u64 user_context;
  566. u32 rnic_handle;
  567. u32 sq_depth;
  568. u32 rq_depth;
  569. u32 send_sgl_depth;
  570. u32 rdma_write_sgl_depth;
  571. u32 recv_sgl_depth;
  572. u32 ord;
  573. u32 ird;
  574. u16 qp_state;
  575. u16 flags; /* see c2wr_qp_flags_t */
  576. u32 qp_id;
  577. u32 local_addr;
  578. u32 remote_addr;
  579. u16 local_port;
  580. u16 remote_port;
  581. u32 terminate_msg_length; /* 0 if not present */
  582. u8 data[0];
  583. /* Terminate Message in-line here. */
  584. } __attribute__((packed)) ;
  585. union c2wr_qp_query {
  586. struct c2wr_qp_query_req req;
  587. struct c2wr_qp_query_rep rep;
  588. } __attribute__((packed)) ;
  589. struct c2wr_qp_modify_req {
  590. struct c2wr_hdr hdr;
  591. u64 stream_msg;
  592. u32 stream_msg_length;
  593. u32 rnic_handle;
  594. u32 qp_handle;
  595. __be32 next_qp_state;
  596. __be32 ord;
  597. __be32 ird;
  598. __be32 sq_depth;
  599. __be32 rq_depth;
  600. u32 llp_ep_handle;
  601. } __attribute__((packed)) ;
  602. struct c2wr_qp_modify_rep {
  603. struct c2wr_hdr hdr;
  604. u32 ord;
  605. u32 ird;
  606. u32 sq_depth;
  607. u32 rq_depth;
  608. u32 sq_msg_size;
  609. u32 sq_mq_index;
  610. u32 sq_mq_start;
  611. u32 rq_msg_size;
  612. u32 rq_mq_index;
  613. u32 rq_mq_start;
  614. } __attribute__((packed)) ;
  615. union c2wr_qp_modify {
  616. struct c2wr_qp_modify_req req;
  617. struct c2wr_qp_modify_rep rep;
  618. } __attribute__((packed)) ;
  619. struct c2wr_qp_destroy_req {
  620. struct c2wr_hdr hdr;
  621. u32 rnic_handle;
  622. u32 qp_handle;
  623. } __attribute__((packed)) ;
  624. struct c2wr_qp_destroy_rep {
  625. struct c2wr_hdr hdr;
  626. } __attribute__((packed)) ;
  627. union c2wr_qp_destroy {
  628. struct c2wr_qp_destroy_req req;
  629. struct c2wr_qp_destroy_rep rep;
  630. } __attribute__((packed)) ;
  631. /*
  632. * The CCWR_QP_CONNECT msg is posted on the verbs request queue. It can
  633. * only be posted when a QP is in IDLE state. After the connect request is
  634. * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
  635. * No synchronous reply from adapter to this WR. The results of
  636. * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
  637. * See c2wr_ae_active_connect_results_t
  638. */
  639. struct c2wr_qp_connect_req {
  640. struct c2wr_hdr hdr;
  641. u32 rnic_handle;
  642. u32 qp_handle;
  643. __be32 remote_addr;
  644. __be16 remote_port;
  645. u16 pad;
  646. __be32 private_data_length;
  647. u8 private_data[0]; /* Private data in-line. */
  648. } __attribute__((packed)) ;
  649. struct c2wr_qp_connect {
  650. struct c2wr_qp_connect_req req;
  651. /* no synchronous reply. */
  652. } __attribute__((packed)) ;
  653. /*
  654. *------------------------ MM ------------------------
  655. */
  656. struct c2wr_nsmr_stag_alloc_req {
  657. struct c2wr_hdr hdr;
  658. u32 rnic_handle;
  659. u32 pbl_depth;
  660. u32 pd_id;
  661. u32 flags;
  662. } __attribute__((packed)) ;
  663. struct c2wr_nsmr_stag_alloc_rep {
  664. struct c2wr_hdr hdr;
  665. u32 pbl_depth;
  666. u32 stag_index;
  667. } __attribute__((packed)) ;
  668. union c2wr_nsmr_stag_alloc {
  669. struct c2wr_nsmr_stag_alloc_req req;
  670. struct c2wr_nsmr_stag_alloc_rep rep;
  671. } __attribute__((packed)) ;
  672. struct c2wr_nsmr_register_req {
  673. struct c2wr_hdr hdr;
  674. __be64 va;
  675. u32 rnic_handle;
  676. __be16 flags;
  677. u8 stag_key;
  678. u8 pad;
  679. u32 pd_id;
  680. __be32 pbl_depth;
  681. __be32 pbe_size;
  682. __be32 fbo;
  683. __be32 length;
  684. __be32 addrs_length;
  685. /* array of paddrs (must be aligned on a 64bit boundary) */
  686. __be64 paddrs[0];
  687. } __attribute__((packed)) ;
  688. struct c2wr_nsmr_register_rep {
  689. struct c2wr_hdr hdr;
  690. u32 pbl_depth;
  691. __be32 stag_index;
  692. } __attribute__((packed)) ;
  693. union c2wr_nsmr_register {
  694. struct c2wr_nsmr_register_req req;
  695. struct c2wr_nsmr_register_rep rep;
  696. } __attribute__((packed)) ;
  697. struct c2wr_nsmr_pbl_req {
  698. struct c2wr_hdr hdr;
  699. u32 rnic_handle;
  700. __be32 flags;
  701. __be32 stag_index;
  702. __be32 addrs_length;
  703. /* array of paddrs (must be aligned on a 64bit boundary) */
  704. __be64 paddrs[0];
  705. } __attribute__((packed)) ;
  706. struct c2wr_nsmr_pbl_rep {
  707. struct c2wr_hdr hdr;
  708. } __attribute__((packed)) ;
  709. union c2wr_nsmr_pbl {
  710. struct c2wr_nsmr_pbl_req req;
  711. struct c2wr_nsmr_pbl_rep rep;
  712. } __attribute__((packed)) ;
  713. struct c2wr_mr_query_req {
  714. struct c2wr_hdr hdr;
  715. u32 rnic_handle;
  716. u32 stag_index;
  717. } __attribute__((packed)) ;
  718. struct c2wr_mr_query_rep {
  719. struct c2wr_hdr hdr;
  720. u8 stag_key;
  721. u8 pad[3];
  722. u32 pd_id;
  723. u32 flags;
  724. u32 pbl_depth;
  725. } __attribute__((packed)) ;
  726. union c2wr_mr_query {
  727. struct c2wr_mr_query_req req;
  728. struct c2wr_mr_query_rep rep;
  729. } __attribute__((packed)) ;
  730. struct c2wr_mw_query_req {
  731. struct c2wr_hdr hdr;
  732. u32 rnic_handle;
  733. u32 stag_index;
  734. } __attribute__((packed)) ;
  735. struct c2wr_mw_query_rep {
  736. struct c2wr_hdr hdr;
  737. u8 stag_key;
  738. u8 pad[3];
  739. u32 pd_id;
  740. u32 flags;
  741. } __attribute__((packed)) ;
  742. union c2wr_mw_query {
  743. struct c2wr_mw_query_req req;
  744. struct c2wr_mw_query_rep rep;
  745. } __attribute__((packed)) ;
  746. struct c2wr_stag_dealloc_req {
  747. struct c2wr_hdr hdr;
  748. u32 rnic_handle;
  749. __be32 stag_index;
  750. } __attribute__((packed)) ;
  751. struct c2wr_stag_dealloc_rep {
  752. struct c2wr_hdr hdr;
  753. } __attribute__((packed)) ;
  754. union c2wr_stag_dealloc {
  755. struct c2wr_stag_dealloc_req req;
  756. struct c2wr_stag_dealloc_rep rep;
  757. } __attribute__((packed)) ;
  758. struct c2wr_nsmr_reregister_req {
  759. struct c2wr_hdr hdr;
  760. u64 va;
  761. u32 rnic_handle;
  762. u16 flags;
  763. u8 stag_key;
  764. u8 pad;
  765. u32 stag_index;
  766. u32 pd_id;
  767. u32 pbl_depth;
  768. u32 pbe_size;
  769. u32 fbo;
  770. u32 length;
  771. u32 addrs_length;
  772. u32 pad1;
  773. /* array of paddrs (must be aligned on a 64bit boundary) */
  774. u64 paddrs[0];
  775. } __attribute__((packed)) ;
  776. struct c2wr_nsmr_reregister_rep {
  777. struct c2wr_hdr hdr;
  778. u32 pbl_depth;
  779. u32 stag_index;
  780. } __attribute__((packed)) ;
  781. union c2wr_nsmr_reregister {
  782. struct c2wr_nsmr_reregister_req req;
  783. struct c2wr_nsmr_reregister_rep rep;
  784. } __attribute__((packed)) ;
  785. struct c2wr_smr_register_req {
  786. struct c2wr_hdr hdr;
  787. u64 va;
  788. u32 rnic_handle;
  789. u16 flags;
  790. u8 stag_key;
  791. u8 pad;
  792. u32 stag_index;
  793. u32 pd_id;
  794. } __attribute__((packed)) ;
  795. struct c2wr_smr_register_rep {
  796. struct c2wr_hdr hdr;
  797. u32 stag_index;
  798. } __attribute__((packed)) ;
  799. union c2wr_smr_register {
  800. struct c2wr_smr_register_req req;
  801. struct c2wr_smr_register_rep rep;
  802. } __attribute__((packed)) ;
  803. struct c2wr_mw_alloc_req {
  804. struct c2wr_hdr hdr;
  805. u32 rnic_handle;
  806. u32 pd_id;
  807. } __attribute__((packed)) ;
  808. struct c2wr_mw_alloc_rep {
  809. struct c2wr_hdr hdr;
  810. u32 stag_index;
  811. } __attribute__((packed)) ;
  812. union c2wr_mw_alloc {
  813. struct c2wr_mw_alloc_req req;
  814. struct c2wr_mw_alloc_rep rep;
  815. } __attribute__((packed)) ;
  816. /*
  817. *------------------------ WRs -----------------------
  818. */
  819. struct c2wr_user_hdr {
  820. struct c2wr_hdr hdr; /* Has status and WR Type */
  821. } __attribute__((packed)) ;
  822. enum c2_qp_state {
  823. C2_QP_STATE_IDLE = 0x01,
  824. C2_QP_STATE_CONNECTING = 0x02,
  825. C2_QP_STATE_RTS = 0x04,
  826. C2_QP_STATE_CLOSING = 0x08,
  827. C2_QP_STATE_TERMINATE = 0x10,
  828. C2_QP_STATE_ERROR = 0x20,
  829. };
  830. /* Completion queue entry. */
  831. struct c2wr_ce {
  832. struct c2wr_hdr hdr; /* Has status and WR Type */
  833. u64 qp_user_context; /* c2_user_qp_t * */
  834. u32 qp_state; /* Current QP State */
  835. u32 handle; /* QPID or EP Handle */
  836. __be32 bytes_rcvd; /* valid for RECV WCs */
  837. u32 stag;
  838. } __attribute__((packed)) ;
  839. /*
  840. * Flags used for all post-sq WRs. These must fit in the flags
  841. * field of the struct c2wr_hdr (eight bits).
  842. */
  843. enum {
  844. SQ_SIGNALED = 0x01,
  845. SQ_READ_FENCE = 0x02,
  846. SQ_FENCE = 0x04,
  847. };
  848. /*
  849. * Common fields for all post-sq WRs. Namely the standard header and a
  850. * secondary header with fields common to all post-sq WRs.
  851. */
  852. struct c2_sq_hdr {
  853. struct c2wr_user_hdr user_hdr;
  854. } __attribute__((packed));
  855. /*
  856. * Same as above but for post-rq WRs.
  857. */
  858. struct c2_rq_hdr {
  859. struct c2wr_user_hdr user_hdr;
  860. } __attribute__((packed));
  861. /*
  862. * use the same struct for all sends.
  863. */
  864. struct c2wr_send_req {
  865. struct c2_sq_hdr sq_hdr;
  866. __be32 sge_len;
  867. __be32 remote_stag;
  868. u8 data[0]; /* SGE array */
  869. } __attribute__((packed));
  870. union c2wr_send {
  871. struct c2wr_send_req req;
  872. struct c2wr_ce rep;
  873. } __attribute__((packed));
  874. struct c2wr_rdma_write_req {
  875. struct c2_sq_hdr sq_hdr;
  876. __be64 remote_to;
  877. __be32 remote_stag;
  878. __be32 sge_len;
  879. u8 data[0]; /* SGE array */
  880. } __attribute__((packed));
  881. union c2wr_rdma_write {
  882. struct c2wr_rdma_write_req req;
  883. struct c2wr_ce rep;
  884. } __attribute__((packed));
  885. struct c2wr_rdma_read_req {
  886. struct c2_sq_hdr sq_hdr;
  887. __be64 local_to;
  888. __be64 remote_to;
  889. __be32 local_stag;
  890. __be32 remote_stag;
  891. __be32 length;
  892. } __attribute__((packed));
  893. union c2wr_rdma_read {
  894. struct c2wr_rdma_read_req req;
  895. struct c2wr_ce rep;
  896. } __attribute__((packed));
  897. struct c2wr_mw_bind_req {
  898. struct c2_sq_hdr sq_hdr;
  899. u64 va;
  900. u8 stag_key;
  901. u8 pad[3];
  902. u32 mw_stag_index;
  903. u32 mr_stag_index;
  904. u32 length;
  905. u32 flags;
  906. } __attribute__((packed));
  907. union c2wr_mw_bind {
  908. struct c2wr_mw_bind_req req;
  909. struct c2wr_ce rep;
  910. } __attribute__((packed));
  911. struct c2wr_nsmr_fastreg_req {
  912. struct c2_sq_hdr sq_hdr;
  913. u64 va;
  914. u8 stag_key;
  915. u8 pad[3];
  916. u32 stag_index;
  917. u32 pbe_size;
  918. u32 fbo;
  919. u32 length;
  920. u32 addrs_length;
  921. /* array of paddrs (must be aligned on a 64bit boundary) */
  922. u64 paddrs[0];
  923. } __attribute__((packed));
  924. union c2wr_nsmr_fastreg {
  925. struct c2wr_nsmr_fastreg_req req;
  926. struct c2wr_ce rep;
  927. } __attribute__((packed));
  928. struct c2wr_stag_invalidate_req {
  929. struct c2_sq_hdr sq_hdr;
  930. u8 stag_key;
  931. u8 pad[3];
  932. u32 stag_index;
  933. } __attribute__((packed));
  934. union c2wr_stag_invalidate {
  935. struct c2wr_stag_invalidate_req req;
  936. struct c2wr_ce rep;
  937. } __attribute__((packed));
  938. union c2wr_sqwr {
  939. struct c2_sq_hdr sq_hdr;
  940. struct c2wr_send_req send;
  941. struct c2wr_send_req send_se;
  942. struct c2wr_send_req send_inv;
  943. struct c2wr_send_req send_se_inv;
  944. struct c2wr_rdma_write_req rdma_write;
  945. struct c2wr_rdma_read_req rdma_read;
  946. struct c2wr_mw_bind_req mw_bind;
  947. struct c2wr_nsmr_fastreg_req nsmr_fastreg;
  948. struct c2wr_stag_invalidate_req stag_inv;
  949. } __attribute__((packed));
  950. /*
  951. * RQ WRs
  952. */
  953. struct c2wr_rqwr {
  954. struct c2_rq_hdr rq_hdr;
  955. u8 data[0]; /* array of SGEs */
  956. } __attribute__((packed));
  957. union c2wr_recv {
  958. struct c2wr_rqwr req;
  959. struct c2wr_ce rep;
  960. } __attribute__((packed));
  961. /*
  962. * All AEs start with this header. Most AEs only need to convey the
  963. * information in the header. Some, like LLP connection events, need
  964. * more info. The union typdef c2wr_ae_t has all the possible AEs.
  965. *
  966. * hdr.context is the user_context from the rnic_open WR. NULL If this
  967. * is not affiliated with an rnic
  968. *
  969. * hdr.id is the AE identifier (eg; CCAE_REMOTE_SHUTDOWN,
  970. * CCAE_LLP_CLOSE_COMPLETE)
  971. *
  972. * resource_type is one of: C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
  973. *
  974. * user_context is the context passed down when the host created the resource.
  975. */
  976. struct c2wr_ae_hdr {
  977. struct c2wr_hdr hdr;
  978. u64 user_context; /* user context for this res. */
  979. __be32 resource_type; /* see enum c2_resource_indicator */
  980. __be32 resource; /* handle for resource */
  981. __be32 qp_state; /* current QP State */
  982. } __attribute__((packed));
  983. /*
  984. * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
  985. * the adapter moves the QP into RTS state
  986. */
  987. struct c2wr_ae_active_connect_results {
  988. struct c2wr_ae_hdr ae_hdr;
  989. __be32 laddr;
  990. __be32 raddr;
  991. __be16 lport;
  992. __be16 rport;
  993. __be32 private_data_length;
  994. u8 private_data[0]; /* data is in-line in the msg. */
  995. } __attribute__((packed));
  996. /*
  997. * When connections are established by the stack (and the private data
  998. * MPA frame is received), the adapter will generate an event to the host.
  999. * The details of the connection, any private data, and the new connection
  1000. * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
  1001. * AE queue:
  1002. */
  1003. struct c2wr_ae_connection_request {
  1004. struct c2wr_ae_hdr ae_hdr;
  1005. u32 cr_handle; /* connreq handle (sock ptr) */
  1006. __be32 laddr;
  1007. __be32 raddr;
  1008. __be16 lport;
  1009. __be16 rport;
  1010. __be32 private_data_length;
  1011. u8 private_data[0]; /* data is in-line in the msg. */
  1012. } __attribute__((packed));
  1013. union c2wr_ae {
  1014. struct c2wr_ae_hdr ae_generic;
  1015. struct c2wr_ae_active_connect_results ae_active_connect_results;
  1016. struct c2wr_ae_connection_request ae_connection_request;
  1017. } __attribute__((packed));
  1018. struct c2wr_init_req {
  1019. struct c2wr_hdr hdr;
  1020. __be64 hint_count;
  1021. __be64 q0_host_shared;
  1022. __be64 q1_host_shared;
  1023. __be64 q1_host_msg_pool;
  1024. __be64 q2_host_shared;
  1025. __be64 q2_host_msg_pool;
  1026. } __attribute__((packed));
  1027. struct c2wr_init_rep {
  1028. struct c2wr_hdr hdr;
  1029. } __attribute__((packed));
  1030. union c2wr_init {
  1031. struct c2wr_init_req req;
  1032. struct c2wr_init_rep rep;
  1033. } __attribute__((packed));
  1034. /*
  1035. * For upgrading flash.
  1036. */
  1037. struct c2wr_flash_init_req {
  1038. struct c2wr_hdr hdr;
  1039. u32 rnic_handle;
  1040. } __attribute__((packed));
  1041. struct c2wr_flash_init_rep {
  1042. struct c2wr_hdr hdr;
  1043. u32 adapter_flash_buf_offset;
  1044. u32 adapter_flash_len;
  1045. } __attribute__((packed));
  1046. union c2wr_flash_init {
  1047. struct c2wr_flash_init_req req;
  1048. struct c2wr_flash_init_rep rep;
  1049. } __attribute__((packed));
  1050. struct c2wr_flash_req {
  1051. struct c2wr_hdr hdr;
  1052. u32 rnic_handle;
  1053. u32 len;
  1054. } __attribute__((packed));
  1055. struct c2wr_flash_rep {
  1056. struct c2wr_hdr hdr;
  1057. u32 status;
  1058. } __attribute__((packed));
  1059. union c2wr_flash {
  1060. struct c2wr_flash_req req;
  1061. struct c2wr_flash_rep rep;
  1062. } __attribute__((packed));
  1063. struct c2wr_buf_alloc_req {
  1064. struct c2wr_hdr hdr;
  1065. u32 rnic_handle;
  1066. u32 size;
  1067. } __attribute__((packed));
  1068. struct c2wr_buf_alloc_rep {
  1069. struct c2wr_hdr hdr;
  1070. u32 offset; /* 0 if mem not available */
  1071. u32 size; /* 0 if mem not available */
  1072. } __attribute__((packed));
  1073. union c2wr_buf_alloc {
  1074. struct c2wr_buf_alloc_req req;
  1075. struct c2wr_buf_alloc_rep rep;
  1076. } __attribute__((packed));
  1077. struct c2wr_buf_free_req {
  1078. struct c2wr_hdr hdr;
  1079. u32 rnic_handle;
  1080. u32 offset; /* Must match value from alloc */
  1081. u32 size; /* Must match value from alloc */
  1082. } __attribute__((packed));
  1083. struct c2wr_buf_free_rep {
  1084. struct c2wr_hdr hdr;
  1085. } __attribute__((packed));
  1086. union c2wr_buf_free {
  1087. struct c2wr_buf_free_req req;
  1088. struct c2wr_ce rep;
  1089. } __attribute__((packed));
  1090. struct c2wr_flash_write_req {
  1091. struct c2wr_hdr hdr;
  1092. u32 rnic_handle;
  1093. u32 offset;
  1094. u32 size;
  1095. u32 type;
  1096. u32 flags;
  1097. } __attribute__((packed));
  1098. struct c2wr_flash_write_rep {
  1099. struct c2wr_hdr hdr;
  1100. u32 status;
  1101. } __attribute__((packed));
  1102. union c2wr_flash_write {
  1103. struct c2wr_flash_write_req req;
  1104. struct c2wr_flash_write_rep rep;
  1105. } __attribute__((packed));
  1106. /*
  1107. * Messages for LLP connection setup.
  1108. */
  1109. /*
  1110. * Listen Request. This allocates a listening endpoint to allow passive
  1111. * connection setup. Newly established LLP connections are passed up
  1112. * via an AE. See c2wr_ae_connection_request_t
  1113. */
  1114. struct c2wr_ep_listen_create_req {
  1115. struct c2wr_hdr hdr;
  1116. u64 user_context; /* returned in AEs. */
  1117. u32 rnic_handle;
  1118. __be32 local_addr; /* local addr, or 0 */
  1119. __be16 local_port; /* 0 means "pick one" */
  1120. u16 pad;
  1121. __be32 backlog; /* tradional tcp listen bl */
  1122. } __attribute__((packed));
  1123. struct c2wr_ep_listen_create_rep {
  1124. struct c2wr_hdr hdr;
  1125. u32 ep_handle; /* handle to new listening ep */
  1126. u16 local_port; /* resulting port... */
  1127. u16 pad;
  1128. } __attribute__((packed));
  1129. union c2wr_ep_listen_create {
  1130. struct c2wr_ep_listen_create_req req;
  1131. struct c2wr_ep_listen_create_rep rep;
  1132. } __attribute__((packed));
  1133. struct c2wr_ep_listen_destroy_req {
  1134. struct c2wr_hdr hdr;
  1135. u32 rnic_handle;
  1136. u32 ep_handle;
  1137. } __attribute__((packed));
  1138. struct c2wr_ep_listen_destroy_rep {
  1139. struct c2wr_hdr hdr;
  1140. } __attribute__((packed));
  1141. union c2wr_ep_listen_destroy {
  1142. struct c2wr_ep_listen_destroy_req req;
  1143. struct c2wr_ep_listen_destroy_rep rep;
  1144. } __attribute__((packed));
  1145. struct c2wr_ep_query_req {
  1146. struct c2wr_hdr hdr;
  1147. u32 rnic_handle;
  1148. u32 ep_handle;
  1149. } __attribute__((packed));
  1150. struct c2wr_ep_query_rep {
  1151. struct c2wr_hdr hdr;
  1152. u32 rnic_handle;
  1153. u32 local_addr;
  1154. u32 remote_addr;
  1155. u16 local_port;
  1156. u16 remote_port;
  1157. } __attribute__((packed));
  1158. union c2wr_ep_query {
  1159. struct c2wr_ep_query_req req;
  1160. struct c2wr_ep_query_rep rep;
  1161. } __attribute__((packed));
  1162. /*
  1163. * The host passes this down to indicate acceptance of a pending iWARP
  1164. * connection. The cr_handle was obtained from the CONNECTION_REQUEST
  1165. * AE passed up by the adapter. See c2wr_ae_connection_request_t.
  1166. */
  1167. struct c2wr_cr_accept_req {
  1168. struct c2wr_hdr hdr;
  1169. u32 rnic_handle;
  1170. u32 qp_handle; /* QP to bind to this LLP conn */
  1171. u32 ep_handle; /* LLP handle to accept */
  1172. __be32 private_data_length;
  1173. u8 private_data[0]; /* data in-line in msg. */
  1174. } __attribute__((packed));
  1175. /*
  1176. * adapter sends reply when private data is successfully submitted to
  1177. * the LLP.
  1178. */
  1179. struct c2wr_cr_accept_rep {
  1180. struct c2wr_hdr hdr;
  1181. } __attribute__((packed));
  1182. union c2wr_cr_accept {
  1183. struct c2wr_cr_accept_req req;
  1184. struct c2wr_cr_accept_rep rep;
  1185. } __attribute__((packed));
  1186. /*
  1187. * The host sends this down if a given iWARP connection request was
  1188. * rejected by the consumer. The cr_handle was obtained from a
  1189. * previous c2wr_ae_connection_request_t AE sent by the adapter.
  1190. */
  1191. struct c2wr_cr_reject_req {
  1192. struct c2wr_hdr hdr;
  1193. u32 rnic_handle;
  1194. u32 ep_handle; /* LLP handle to reject */
  1195. } __attribute__((packed));
  1196. /*
  1197. * Dunno if this is needed, but we'll add it for now. The adapter will
  1198. * send the reject_reply after the LLP endpoint has been destroyed.
  1199. */
  1200. struct c2wr_cr_reject_rep {
  1201. struct c2wr_hdr hdr;
  1202. } __attribute__((packed));
  1203. union c2wr_cr_reject {
  1204. struct c2wr_cr_reject_req req;
  1205. struct c2wr_cr_reject_rep rep;
  1206. } __attribute__((packed));
  1207. /*
  1208. * console command. Used to implement a debug console over the verbs
  1209. * request and reply queues.
  1210. */
  1211. /*
  1212. * Console request message. It contains:
  1213. * - message hdr with id = CCWR_CONSOLE
  1214. * - the physaddr/len of host memory to be used for the reply.
  1215. * - the command string. eg: "netstat -s" or "zoneinfo"
  1216. */
  1217. struct c2wr_console_req {
  1218. struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
  1219. u64 reply_buf; /* pinned host buf for reply */
  1220. u32 reply_buf_len; /* length of reply buffer */
  1221. u8 command[0]; /* NUL terminated ascii string */
  1222. /* containing the command req */
  1223. } __attribute__((packed));
  1224. /*
  1225. * flags used in the console reply.
  1226. */
  1227. enum c2_console_flags {
  1228. CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
  1229. } __attribute__((packed));
  1230. /*
  1231. * Console reply message.
  1232. * hdr.result contains the c2_status_t error if the reply was _not_ generated,
  1233. * or C2_OK if the reply was generated.
  1234. */
  1235. struct c2wr_console_rep {
  1236. struct c2wr_hdr hdr; /* id = CCWR_CONSOLE */
  1237. u32 flags;
  1238. } __attribute__((packed));
  1239. union c2wr_console {
  1240. struct c2wr_console_req req;
  1241. struct c2wr_console_rep rep;
  1242. } __attribute__((packed));
  1243. /*
  1244. * Giant union with all WRs. Makes life easier...
  1245. */
  1246. union c2wr {
  1247. struct c2wr_hdr hdr;
  1248. struct c2wr_user_hdr user_hdr;
  1249. union c2wr_rnic_open rnic_open;
  1250. union c2wr_rnic_query rnic_query;
  1251. union c2wr_rnic_getconfig rnic_getconfig;
  1252. union c2wr_rnic_setconfig rnic_setconfig;
  1253. union c2wr_rnic_close rnic_close;
  1254. union c2wr_cq_create cq_create;
  1255. union c2wr_cq_modify cq_modify;
  1256. union c2wr_cq_destroy cq_destroy;
  1257. union c2wr_pd_alloc pd_alloc;
  1258. union c2wr_pd_dealloc pd_dealloc;
  1259. union c2wr_srq_create srq_create;
  1260. union c2wr_srq_destroy srq_destroy;
  1261. union c2wr_qp_create qp_create;
  1262. union c2wr_qp_query qp_query;
  1263. union c2wr_qp_modify qp_modify;
  1264. union c2wr_qp_destroy qp_destroy;
  1265. struct c2wr_qp_connect qp_connect;
  1266. union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
  1267. union c2wr_nsmr_register nsmr_register;
  1268. union c2wr_nsmr_pbl nsmr_pbl;
  1269. union c2wr_mr_query mr_query;
  1270. union c2wr_mw_query mw_query;
  1271. union c2wr_stag_dealloc stag_dealloc;
  1272. union c2wr_sqwr sqwr;
  1273. struct c2wr_rqwr rqwr;
  1274. struct c2wr_ce ce;
  1275. union c2wr_ae ae;
  1276. union c2wr_init init;
  1277. union c2wr_ep_listen_create ep_listen_create;
  1278. union c2wr_ep_listen_destroy ep_listen_destroy;
  1279. union c2wr_cr_accept cr_accept;
  1280. union c2wr_cr_reject cr_reject;
  1281. union c2wr_console console;
  1282. union c2wr_flash_init flash_init;
  1283. union c2wr_flash flash;
  1284. union c2wr_buf_alloc buf_alloc;
  1285. union c2wr_buf_free buf_free;
  1286. union c2wr_flash_write flash_write;
  1287. } __attribute__((packed));
  1288. /*
  1289. * Accessors for the wr fields that are packed together tightly to
  1290. * reduce the wr message size. The wr arguments are void* so that
  1291. * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
  1292. * in the struct c2wr union can be passed in.
  1293. */
  1294. static __inline__ u8 c2_wr_get_id(void *wr)
  1295. {
  1296. return ((struct c2wr_hdr *) wr)->id;
  1297. }
  1298. static __inline__ void c2_wr_set_id(void *wr, u8 id)
  1299. {
  1300. ((struct c2wr_hdr *) wr)->id = id;
  1301. }
  1302. static __inline__ u8 c2_wr_get_result(void *wr)
  1303. {
  1304. return ((struct c2wr_hdr *) wr)->result;
  1305. }
  1306. static __inline__ void c2_wr_set_result(void *wr, u8 result)
  1307. {
  1308. ((struct c2wr_hdr *) wr)->result = result;
  1309. }
  1310. static __inline__ u8 c2_wr_get_flags(void *wr)
  1311. {
  1312. return ((struct c2wr_hdr *) wr)->flags;
  1313. }
  1314. static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
  1315. {
  1316. ((struct c2wr_hdr *) wr)->flags = flags;
  1317. }
  1318. static __inline__ u8 c2_wr_get_sge_count(void *wr)
  1319. {
  1320. return ((struct c2wr_hdr *) wr)->sge_count;
  1321. }
  1322. static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
  1323. {
  1324. ((struct c2wr_hdr *) wr)->sge_count = sge_count;
  1325. }
  1326. static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
  1327. {
  1328. return ((struct c2wr_hdr *) wr)->wqe_count;
  1329. }
  1330. static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
  1331. {
  1332. ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
  1333. }
  1334. #endif /* _C2_WR_H_ */