ib_verbs.h 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  8. * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
  9. *
  10. * This software is available to you under a choice of one of two
  11. * licenses. You may choose to be licensed under the terms of the GNU
  12. * General Public License (GPL) Version 2, available from the file
  13. * COPYING in the main directory of this source tree, or the
  14. * OpenIB.org BSD license below:
  15. *
  16. * Redistribution and use in source and binary forms, with or
  17. * without modification, are permitted provided that the following
  18. * conditions are met:
  19. *
  20. * - Redistributions of source code must retain the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer.
  23. *
  24. * - Redistributions in binary form must reproduce the above
  25. * copyright notice, this list of conditions and the following
  26. * disclaimer in the documentation and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  30. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  31. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  32. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  33. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  34. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  35. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  36. * SOFTWARE.
  37. *
  38. * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
  39. */
  40. #if !defined(IB_VERBS_H)
  41. #define IB_VERBS_H
  42. #include <linux/types.h>
  43. #include <linux/device.h>
  44. #include <linux/mm.h>
  45. #include <linux/dma-mapping.h>
  46. #include <linux/kref.h>
  47. #include <linux/list.h>
  48. #include <linux/rwsem.h>
  49. #include <linux/scatterlist.h>
  50. #include <asm/atomic.h>
  51. #include <asm/uaccess.h>
  52. union ib_gid {
  53. u8 raw[16];
  54. struct {
  55. __be64 subnet_prefix;
  56. __be64 interface_id;
  57. } global;
  58. };
  59. enum rdma_node_type {
  60. /* IB values map to NodeInfo:NodeType. */
  61. RDMA_NODE_IB_CA = 1,
  62. RDMA_NODE_IB_SWITCH,
  63. RDMA_NODE_IB_ROUTER,
  64. RDMA_NODE_RNIC
  65. };
  66. enum rdma_transport_type {
  67. RDMA_TRANSPORT_IB,
  68. RDMA_TRANSPORT_IWARP
  69. };
  70. enum rdma_transport_type
  71. rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
  72. enum ib_device_cap_flags {
  73. IB_DEVICE_RESIZE_MAX_WR = 1,
  74. IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
  75. IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
  76. IB_DEVICE_RAW_MULTI = (1<<3),
  77. IB_DEVICE_AUTO_PATH_MIG = (1<<4),
  78. IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
  79. IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
  80. IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
  81. IB_DEVICE_SHUTDOWN_PORT = (1<<8),
  82. IB_DEVICE_INIT_TYPE = (1<<9),
  83. IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
  84. IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
  85. IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
  86. IB_DEVICE_SRQ_RESIZE = (1<<13),
  87. IB_DEVICE_N_NOTIFY_CQ = (1<<14),
  88. IB_DEVICE_ZERO_STAG = (1<<15),
  89. IB_DEVICE_SEND_W_INV = (1<<16),
  90. IB_DEVICE_MEM_WINDOW = (1<<17),
  91. /*
  92. * Devices should set IB_DEVICE_UD_IP_SUM if they support
  93. * insertion of UDP and TCP checksum on outgoing UD IPoIB
  94. * messages and can verify the validity of checksum for
  95. * incoming messages. Setting this flag implies that the
  96. * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
  97. */
  98. IB_DEVICE_UD_IP_CSUM = (1<<18),
  99. };
  100. enum ib_atomic_cap {
  101. IB_ATOMIC_NONE,
  102. IB_ATOMIC_HCA,
  103. IB_ATOMIC_GLOB
  104. };
  105. struct ib_device_attr {
  106. u64 fw_ver;
  107. __be64 sys_image_guid;
  108. u64 max_mr_size;
  109. u64 page_size_cap;
  110. u32 vendor_id;
  111. u32 vendor_part_id;
  112. u32 hw_ver;
  113. int max_qp;
  114. int max_qp_wr;
  115. int device_cap_flags;
  116. int max_sge;
  117. int max_sge_rd;
  118. int max_cq;
  119. int max_cqe;
  120. int max_mr;
  121. int max_pd;
  122. int max_qp_rd_atom;
  123. int max_ee_rd_atom;
  124. int max_res_rd_atom;
  125. int max_qp_init_rd_atom;
  126. int max_ee_init_rd_atom;
  127. enum ib_atomic_cap atomic_cap;
  128. int max_ee;
  129. int max_rdd;
  130. int max_mw;
  131. int max_raw_ipv6_qp;
  132. int max_raw_ethy_qp;
  133. int max_mcast_grp;
  134. int max_mcast_qp_attach;
  135. int max_total_mcast_qp_attach;
  136. int max_ah;
  137. int max_fmr;
  138. int max_map_per_fmr;
  139. int max_srq;
  140. int max_srq_wr;
  141. int max_srq_sge;
  142. u16 max_pkeys;
  143. u8 local_ca_ack_delay;
  144. };
  145. enum ib_mtu {
  146. IB_MTU_256 = 1,
  147. IB_MTU_512 = 2,
  148. IB_MTU_1024 = 3,
  149. IB_MTU_2048 = 4,
  150. IB_MTU_4096 = 5
  151. };
  152. static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
  153. {
  154. switch (mtu) {
  155. case IB_MTU_256: return 256;
  156. case IB_MTU_512: return 512;
  157. case IB_MTU_1024: return 1024;
  158. case IB_MTU_2048: return 2048;
  159. case IB_MTU_4096: return 4096;
  160. default: return -1;
  161. }
  162. }
  163. enum ib_port_state {
  164. IB_PORT_NOP = 0,
  165. IB_PORT_DOWN = 1,
  166. IB_PORT_INIT = 2,
  167. IB_PORT_ARMED = 3,
  168. IB_PORT_ACTIVE = 4,
  169. IB_PORT_ACTIVE_DEFER = 5
  170. };
  171. enum ib_port_cap_flags {
  172. IB_PORT_SM = 1 << 1,
  173. IB_PORT_NOTICE_SUP = 1 << 2,
  174. IB_PORT_TRAP_SUP = 1 << 3,
  175. IB_PORT_OPT_IPD_SUP = 1 << 4,
  176. IB_PORT_AUTO_MIGR_SUP = 1 << 5,
  177. IB_PORT_SL_MAP_SUP = 1 << 6,
  178. IB_PORT_MKEY_NVRAM = 1 << 7,
  179. IB_PORT_PKEY_NVRAM = 1 << 8,
  180. IB_PORT_LED_INFO_SUP = 1 << 9,
  181. IB_PORT_SM_DISABLED = 1 << 10,
  182. IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
  183. IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
  184. IB_PORT_CM_SUP = 1 << 16,
  185. IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
  186. IB_PORT_REINIT_SUP = 1 << 18,
  187. IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
  188. IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
  189. IB_PORT_DR_NOTICE_SUP = 1 << 21,
  190. IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
  191. IB_PORT_BOOT_MGMT_SUP = 1 << 23,
  192. IB_PORT_LINK_LATENCY_SUP = 1 << 24,
  193. IB_PORT_CLIENT_REG_SUP = 1 << 25
  194. };
  195. enum ib_port_width {
  196. IB_WIDTH_1X = 1,
  197. IB_WIDTH_4X = 2,
  198. IB_WIDTH_8X = 4,
  199. IB_WIDTH_12X = 8
  200. };
  201. static inline int ib_width_enum_to_int(enum ib_port_width width)
  202. {
  203. switch (width) {
  204. case IB_WIDTH_1X: return 1;
  205. case IB_WIDTH_4X: return 4;
  206. case IB_WIDTH_8X: return 8;
  207. case IB_WIDTH_12X: return 12;
  208. default: return -1;
  209. }
  210. }
  211. struct ib_port_attr {
  212. enum ib_port_state state;
  213. enum ib_mtu max_mtu;
  214. enum ib_mtu active_mtu;
  215. int gid_tbl_len;
  216. u32 port_cap_flags;
  217. u32 max_msg_sz;
  218. u32 bad_pkey_cntr;
  219. u32 qkey_viol_cntr;
  220. u16 pkey_tbl_len;
  221. u16 lid;
  222. u16 sm_lid;
  223. u8 lmc;
  224. u8 max_vl_num;
  225. u8 sm_sl;
  226. u8 subnet_timeout;
  227. u8 init_type_reply;
  228. u8 active_width;
  229. u8 active_speed;
  230. u8 phys_state;
  231. };
  232. enum ib_device_modify_flags {
  233. IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
  234. IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
  235. };
  236. struct ib_device_modify {
  237. u64 sys_image_guid;
  238. char node_desc[64];
  239. };
  240. enum ib_port_modify_flags {
  241. IB_PORT_SHUTDOWN = 1,
  242. IB_PORT_INIT_TYPE = (1<<2),
  243. IB_PORT_RESET_QKEY_CNTR = (1<<3)
  244. };
  245. struct ib_port_modify {
  246. u32 set_port_cap_mask;
  247. u32 clr_port_cap_mask;
  248. u8 init_type;
  249. };
  250. enum ib_event_type {
  251. IB_EVENT_CQ_ERR,
  252. IB_EVENT_QP_FATAL,
  253. IB_EVENT_QP_REQ_ERR,
  254. IB_EVENT_QP_ACCESS_ERR,
  255. IB_EVENT_COMM_EST,
  256. IB_EVENT_SQ_DRAINED,
  257. IB_EVENT_PATH_MIG,
  258. IB_EVENT_PATH_MIG_ERR,
  259. IB_EVENT_DEVICE_FATAL,
  260. IB_EVENT_PORT_ACTIVE,
  261. IB_EVENT_PORT_ERR,
  262. IB_EVENT_LID_CHANGE,
  263. IB_EVENT_PKEY_CHANGE,
  264. IB_EVENT_SM_CHANGE,
  265. IB_EVENT_SRQ_ERR,
  266. IB_EVENT_SRQ_LIMIT_REACHED,
  267. IB_EVENT_QP_LAST_WQE_REACHED,
  268. IB_EVENT_CLIENT_REREGISTER
  269. };
  270. struct ib_event {
  271. struct ib_device *device;
  272. union {
  273. struct ib_cq *cq;
  274. struct ib_qp *qp;
  275. struct ib_srq *srq;
  276. u8 port_num;
  277. } element;
  278. enum ib_event_type event;
  279. };
  280. struct ib_event_handler {
  281. struct ib_device *device;
  282. void (*handler)(struct ib_event_handler *, struct ib_event *);
  283. struct list_head list;
  284. };
  285. #define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
  286. do { \
  287. (_ptr)->device = _device; \
  288. (_ptr)->handler = _handler; \
  289. INIT_LIST_HEAD(&(_ptr)->list); \
  290. } while (0)
  291. struct ib_global_route {
  292. union ib_gid dgid;
  293. u32 flow_label;
  294. u8 sgid_index;
  295. u8 hop_limit;
  296. u8 traffic_class;
  297. };
  298. struct ib_grh {
  299. __be32 version_tclass_flow;
  300. __be16 paylen;
  301. u8 next_hdr;
  302. u8 hop_limit;
  303. union ib_gid sgid;
  304. union ib_gid dgid;
  305. };
  306. enum {
  307. IB_MULTICAST_QPN = 0xffffff
  308. };
  309. #define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
  310. enum ib_ah_flags {
  311. IB_AH_GRH = 1
  312. };
  313. enum ib_rate {
  314. IB_RATE_PORT_CURRENT = 0,
  315. IB_RATE_2_5_GBPS = 2,
  316. IB_RATE_5_GBPS = 5,
  317. IB_RATE_10_GBPS = 3,
  318. IB_RATE_20_GBPS = 6,
  319. IB_RATE_30_GBPS = 4,
  320. IB_RATE_40_GBPS = 7,
  321. IB_RATE_60_GBPS = 8,
  322. IB_RATE_80_GBPS = 9,
  323. IB_RATE_120_GBPS = 10
  324. };
  325. /**
  326. * ib_rate_to_mult - Convert the IB rate enum to a multiple of the
  327. * base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
  328. * converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
  329. * @rate: rate to convert.
  330. */
  331. int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
  332. /**
  333. * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
  334. * enum.
  335. * @mult: multiple to convert.
  336. */
  337. enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
  338. struct ib_ah_attr {
  339. struct ib_global_route grh;
  340. u16 dlid;
  341. u8 sl;
  342. u8 src_path_bits;
  343. u8 static_rate;
  344. u8 ah_flags;
  345. u8 port_num;
  346. };
  347. enum ib_wc_status {
  348. IB_WC_SUCCESS,
  349. IB_WC_LOC_LEN_ERR,
  350. IB_WC_LOC_QP_OP_ERR,
  351. IB_WC_LOC_EEC_OP_ERR,
  352. IB_WC_LOC_PROT_ERR,
  353. IB_WC_WR_FLUSH_ERR,
  354. IB_WC_MW_BIND_ERR,
  355. IB_WC_BAD_RESP_ERR,
  356. IB_WC_LOC_ACCESS_ERR,
  357. IB_WC_REM_INV_REQ_ERR,
  358. IB_WC_REM_ACCESS_ERR,
  359. IB_WC_REM_OP_ERR,
  360. IB_WC_RETRY_EXC_ERR,
  361. IB_WC_RNR_RETRY_EXC_ERR,
  362. IB_WC_LOC_RDD_VIOL_ERR,
  363. IB_WC_REM_INV_RD_REQ_ERR,
  364. IB_WC_REM_ABORT_ERR,
  365. IB_WC_INV_EECN_ERR,
  366. IB_WC_INV_EEC_STATE_ERR,
  367. IB_WC_FATAL_ERR,
  368. IB_WC_RESP_TIMEOUT_ERR,
  369. IB_WC_GENERAL_ERR
  370. };
  371. enum ib_wc_opcode {
  372. IB_WC_SEND,
  373. IB_WC_RDMA_WRITE,
  374. IB_WC_RDMA_READ,
  375. IB_WC_COMP_SWAP,
  376. IB_WC_FETCH_ADD,
  377. IB_WC_BIND_MW,
  378. /*
  379. * Set value of IB_WC_RECV so consumers can test if a completion is a
  380. * receive by testing (opcode & IB_WC_RECV).
  381. */
  382. IB_WC_RECV = 1 << 7,
  383. IB_WC_RECV_RDMA_WITH_IMM
  384. };
  385. enum ib_wc_flags {
  386. IB_WC_GRH = 1,
  387. IB_WC_WITH_IMM = (1<<1)
  388. };
  389. struct ib_wc {
  390. u64 wr_id;
  391. enum ib_wc_status status;
  392. enum ib_wc_opcode opcode;
  393. u32 vendor_err;
  394. u32 byte_len;
  395. struct ib_qp *qp;
  396. __be32 imm_data;
  397. u32 src_qp;
  398. int wc_flags;
  399. u16 pkey_index;
  400. u16 slid;
  401. u8 sl;
  402. u8 dlid_path_bits;
  403. u8 port_num; /* valid only for DR SMPs on switches */
  404. int csum_ok;
  405. };
  406. enum ib_cq_notify_flags {
  407. IB_CQ_SOLICITED = 1 << 0,
  408. IB_CQ_NEXT_COMP = 1 << 1,
  409. IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
  410. IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
  411. };
  412. enum ib_srq_attr_mask {
  413. IB_SRQ_MAX_WR = 1 << 0,
  414. IB_SRQ_LIMIT = 1 << 1,
  415. };
  416. struct ib_srq_attr {
  417. u32 max_wr;
  418. u32 max_sge;
  419. u32 srq_limit;
  420. };
  421. struct ib_srq_init_attr {
  422. void (*event_handler)(struct ib_event *, void *);
  423. void *srq_context;
  424. struct ib_srq_attr attr;
  425. };
  426. struct ib_qp_cap {
  427. u32 max_send_wr;
  428. u32 max_recv_wr;
  429. u32 max_send_sge;
  430. u32 max_recv_sge;
  431. u32 max_inline_data;
  432. };
  433. enum ib_sig_type {
  434. IB_SIGNAL_ALL_WR,
  435. IB_SIGNAL_REQ_WR
  436. };
  437. enum ib_qp_type {
  438. /*
  439. * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
  440. * here (and in that order) since the MAD layer uses them as
  441. * indices into a 2-entry table.
  442. */
  443. IB_QPT_SMI,
  444. IB_QPT_GSI,
  445. IB_QPT_RC,
  446. IB_QPT_UC,
  447. IB_QPT_UD,
  448. IB_QPT_RAW_IPV6,
  449. IB_QPT_RAW_ETY
  450. };
  451. struct ib_qp_init_attr {
  452. void (*event_handler)(struct ib_event *, void *);
  453. void *qp_context;
  454. struct ib_cq *send_cq;
  455. struct ib_cq *recv_cq;
  456. struct ib_srq *srq;
  457. struct ib_qp_cap cap;
  458. enum ib_sig_type sq_sig_type;
  459. enum ib_qp_type qp_type;
  460. u8 port_num; /* special QP types only */
  461. };
  462. enum ib_rnr_timeout {
  463. IB_RNR_TIMER_655_36 = 0,
  464. IB_RNR_TIMER_000_01 = 1,
  465. IB_RNR_TIMER_000_02 = 2,
  466. IB_RNR_TIMER_000_03 = 3,
  467. IB_RNR_TIMER_000_04 = 4,
  468. IB_RNR_TIMER_000_06 = 5,
  469. IB_RNR_TIMER_000_08 = 6,
  470. IB_RNR_TIMER_000_12 = 7,
  471. IB_RNR_TIMER_000_16 = 8,
  472. IB_RNR_TIMER_000_24 = 9,
  473. IB_RNR_TIMER_000_32 = 10,
  474. IB_RNR_TIMER_000_48 = 11,
  475. IB_RNR_TIMER_000_64 = 12,
  476. IB_RNR_TIMER_000_96 = 13,
  477. IB_RNR_TIMER_001_28 = 14,
  478. IB_RNR_TIMER_001_92 = 15,
  479. IB_RNR_TIMER_002_56 = 16,
  480. IB_RNR_TIMER_003_84 = 17,
  481. IB_RNR_TIMER_005_12 = 18,
  482. IB_RNR_TIMER_007_68 = 19,
  483. IB_RNR_TIMER_010_24 = 20,
  484. IB_RNR_TIMER_015_36 = 21,
  485. IB_RNR_TIMER_020_48 = 22,
  486. IB_RNR_TIMER_030_72 = 23,
  487. IB_RNR_TIMER_040_96 = 24,
  488. IB_RNR_TIMER_061_44 = 25,
  489. IB_RNR_TIMER_081_92 = 26,
  490. IB_RNR_TIMER_122_88 = 27,
  491. IB_RNR_TIMER_163_84 = 28,
  492. IB_RNR_TIMER_245_76 = 29,
  493. IB_RNR_TIMER_327_68 = 30,
  494. IB_RNR_TIMER_491_52 = 31
  495. };
  496. enum ib_qp_attr_mask {
  497. IB_QP_STATE = 1,
  498. IB_QP_CUR_STATE = (1<<1),
  499. IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
  500. IB_QP_ACCESS_FLAGS = (1<<3),
  501. IB_QP_PKEY_INDEX = (1<<4),
  502. IB_QP_PORT = (1<<5),
  503. IB_QP_QKEY = (1<<6),
  504. IB_QP_AV = (1<<7),
  505. IB_QP_PATH_MTU = (1<<8),
  506. IB_QP_TIMEOUT = (1<<9),
  507. IB_QP_RETRY_CNT = (1<<10),
  508. IB_QP_RNR_RETRY = (1<<11),
  509. IB_QP_RQ_PSN = (1<<12),
  510. IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
  511. IB_QP_ALT_PATH = (1<<14),
  512. IB_QP_MIN_RNR_TIMER = (1<<15),
  513. IB_QP_SQ_PSN = (1<<16),
  514. IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
  515. IB_QP_PATH_MIG_STATE = (1<<18),
  516. IB_QP_CAP = (1<<19),
  517. IB_QP_DEST_QPN = (1<<20)
  518. };
  519. enum ib_qp_state {
  520. IB_QPS_RESET,
  521. IB_QPS_INIT,
  522. IB_QPS_RTR,
  523. IB_QPS_RTS,
  524. IB_QPS_SQD,
  525. IB_QPS_SQE,
  526. IB_QPS_ERR
  527. };
  528. enum ib_mig_state {
  529. IB_MIG_MIGRATED,
  530. IB_MIG_REARM,
  531. IB_MIG_ARMED
  532. };
  533. struct ib_qp_attr {
  534. enum ib_qp_state qp_state;
  535. enum ib_qp_state cur_qp_state;
  536. enum ib_mtu path_mtu;
  537. enum ib_mig_state path_mig_state;
  538. u32 qkey;
  539. u32 rq_psn;
  540. u32 sq_psn;
  541. u32 dest_qp_num;
  542. int qp_access_flags;
  543. struct ib_qp_cap cap;
  544. struct ib_ah_attr ah_attr;
  545. struct ib_ah_attr alt_ah_attr;
  546. u16 pkey_index;
  547. u16 alt_pkey_index;
  548. u8 en_sqd_async_notify;
  549. u8 sq_draining;
  550. u8 max_rd_atomic;
  551. u8 max_dest_rd_atomic;
  552. u8 min_rnr_timer;
  553. u8 port_num;
  554. u8 timeout;
  555. u8 retry_cnt;
  556. u8 rnr_retry;
  557. u8 alt_port_num;
  558. u8 alt_timeout;
  559. };
  560. enum ib_wr_opcode {
  561. IB_WR_RDMA_WRITE,
  562. IB_WR_RDMA_WRITE_WITH_IMM,
  563. IB_WR_SEND,
  564. IB_WR_SEND_WITH_IMM,
  565. IB_WR_RDMA_READ,
  566. IB_WR_ATOMIC_CMP_AND_SWP,
  567. IB_WR_ATOMIC_FETCH_AND_ADD
  568. };
  569. enum ib_send_flags {
  570. IB_SEND_FENCE = 1,
  571. IB_SEND_SIGNALED = (1<<1),
  572. IB_SEND_SOLICITED = (1<<2),
  573. IB_SEND_INLINE = (1<<3),
  574. IB_SEND_IP_CSUM = (1<<4)
  575. };
  576. struct ib_sge {
  577. u64 addr;
  578. u32 length;
  579. u32 lkey;
  580. };
  581. struct ib_send_wr {
  582. struct ib_send_wr *next;
  583. u64 wr_id;
  584. struct ib_sge *sg_list;
  585. int num_sge;
  586. enum ib_wr_opcode opcode;
  587. int send_flags;
  588. __be32 imm_data;
  589. union {
  590. struct {
  591. u64 remote_addr;
  592. u32 rkey;
  593. } rdma;
  594. struct {
  595. u64 remote_addr;
  596. u64 compare_add;
  597. u64 swap;
  598. u32 rkey;
  599. } atomic;
  600. struct {
  601. struct ib_ah *ah;
  602. u32 remote_qpn;
  603. u32 remote_qkey;
  604. u16 pkey_index; /* valid for GSI only */
  605. u8 port_num; /* valid for DR SMPs on switch only */
  606. } ud;
  607. } wr;
  608. };
  609. struct ib_recv_wr {
  610. struct ib_recv_wr *next;
  611. u64 wr_id;
  612. struct ib_sge *sg_list;
  613. int num_sge;
  614. };
  615. enum ib_access_flags {
  616. IB_ACCESS_LOCAL_WRITE = 1,
  617. IB_ACCESS_REMOTE_WRITE = (1<<1),
  618. IB_ACCESS_REMOTE_READ = (1<<2),
  619. IB_ACCESS_REMOTE_ATOMIC = (1<<3),
  620. IB_ACCESS_MW_BIND = (1<<4)
  621. };
  622. struct ib_phys_buf {
  623. u64 addr;
  624. u64 size;
  625. };
  626. struct ib_mr_attr {
  627. struct ib_pd *pd;
  628. u64 device_virt_addr;
  629. u64 size;
  630. int mr_access_flags;
  631. u32 lkey;
  632. u32 rkey;
  633. };
  634. enum ib_mr_rereg_flags {
  635. IB_MR_REREG_TRANS = 1,
  636. IB_MR_REREG_PD = (1<<1),
  637. IB_MR_REREG_ACCESS = (1<<2)
  638. };
  639. struct ib_mw_bind {
  640. struct ib_mr *mr;
  641. u64 wr_id;
  642. u64 addr;
  643. u32 length;
  644. int send_flags;
  645. int mw_access_flags;
  646. };
  647. struct ib_fmr_attr {
  648. int max_pages;
  649. int max_maps;
  650. u8 page_shift;
  651. };
  652. struct ib_ucontext {
  653. struct ib_device *device;
  654. struct list_head pd_list;
  655. struct list_head mr_list;
  656. struct list_head mw_list;
  657. struct list_head cq_list;
  658. struct list_head qp_list;
  659. struct list_head srq_list;
  660. struct list_head ah_list;
  661. int closing;
  662. };
  663. struct ib_uobject {
  664. u64 user_handle; /* handle given to us by userspace */
  665. struct ib_ucontext *context; /* associated user context */
  666. void *object; /* containing object */
  667. struct list_head list; /* link to context's list */
  668. u32 id; /* index into kernel idr */
  669. struct kref ref;
  670. struct rw_semaphore mutex; /* protects .live */
  671. int live;
  672. };
  673. struct ib_udata {
  674. void __user *inbuf;
  675. void __user *outbuf;
  676. size_t inlen;
  677. size_t outlen;
  678. };
  679. struct ib_pd {
  680. struct ib_device *device;
  681. struct ib_uobject *uobject;
  682. atomic_t usecnt; /* count all resources */
  683. };
  684. struct ib_ah {
  685. struct ib_device *device;
  686. struct ib_pd *pd;
  687. struct ib_uobject *uobject;
  688. };
  689. typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
  690. struct ib_cq {
  691. struct ib_device *device;
  692. struct ib_uobject *uobject;
  693. ib_comp_handler comp_handler;
  694. void (*event_handler)(struct ib_event *, void *);
  695. void * cq_context;
  696. int cqe;
  697. atomic_t usecnt; /* count number of work queues */
  698. };
  699. struct ib_srq {
  700. struct ib_device *device;
  701. struct ib_pd *pd;
  702. struct ib_uobject *uobject;
  703. void (*event_handler)(struct ib_event *, void *);
  704. void *srq_context;
  705. atomic_t usecnt;
  706. };
  707. struct ib_qp {
  708. struct ib_device *device;
  709. struct ib_pd *pd;
  710. struct ib_cq *send_cq;
  711. struct ib_cq *recv_cq;
  712. struct ib_srq *srq;
  713. struct ib_uobject *uobject;
  714. void (*event_handler)(struct ib_event *, void *);
  715. void *qp_context;
  716. u32 qp_num;
  717. enum ib_qp_type qp_type;
  718. };
  719. struct ib_mr {
  720. struct ib_device *device;
  721. struct ib_pd *pd;
  722. struct ib_uobject *uobject;
  723. u32 lkey;
  724. u32 rkey;
  725. atomic_t usecnt; /* count number of MWs */
  726. };
  727. struct ib_mw {
  728. struct ib_device *device;
  729. struct ib_pd *pd;
  730. struct ib_uobject *uobject;
  731. u32 rkey;
  732. };
  733. struct ib_fmr {
  734. struct ib_device *device;
  735. struct ib_pd *pd;
  736. struct list_head list;
  737. u32 lkey;
  738. u32 rkey;
  739. };
  740. struct ib_mad;
  741. struct ib_grh;
  742. enum ib_process_mad_flags {
  743. IB_MAD_IGNORE_MKEY = 1,
  744. IB_MAD_IGNORE_BKEY = 2,
  745. IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
  746. };
  747. enum ib_mad_result {
  748. IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
  749. IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
  750. IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
  751. IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
  752. };
  753. #define IB_DEVICE_NAME_MAX 64
  754. struct ib_cache {
  755. rwlock_t lock;
  756. struct ib_event_handler event_handler;
  757. struct ib_pkey_cache **pkey_cache;
  758. struct ib_gid_cache **gid_cache;
  759. u8 *lmc_cache;
  760. };
  761. struct ib_dma_mapping_ops {
  762. int (*mapping_error)(struct ib_device *dev,
  763. u64 dma_addr);
  764. u64 (*map_single)(struct ib_device *dev,
  765. void *ptr, size_t size,
  766. enum dma_data_direction direction);
  767. void (*unmap_single)(struct ib_device *dev,
  768. u64 addr, size_t size,
  769. enum dma_data_direction direction);
  770. u64 (*map_page)(struct ib_device *dev,
  771. struct page *page, unsigned long offset,
  772. size_t size,
  773. enum dma_data_direction direction);
  774. void (*unmap_page)(struct ib_device *dev,
  775. u64 addr, size_t size,
  776. enum dma_data_direction direction);
  777. int (*map_sg)(struct ib_device *dev,
  778. struct scatterlist *sg, int nents,
  779. enum dma_data_direction direction);
  780. void (*unmap_sg)(struct ib_device *dev,
  781. struct scatterlist *sg, int nents,
  782. enum dma_data_direction direction);
  783. u64 (*dma_address)(struct ib_device *dev,
  784. struct scatterlist *sg);
  785. unsigned int (*dma_len)(struct ib_device *dev,
  786. struct scatterlist *sg);
  787. void (*sync_single_for_cpu)(struct ib_device *dev,
  788. u64 dma_handle,
  789. size_t size,
  790. enum dma_data_direction dir);
  791. void (*sync_single_for_device)(struct ib_device *dev,
  792. u64 dma_handle,
  793. size_t size,
  794. enum dma_data_direction dir);
  795. void *(*alloc_coherent)(struct ib_device *dev,
  796. size_t size,
  797. u64 *dma_handle,
  798. gfp_t flag);
  799. void (*free_coherent)(struct ib_device *dev,
  800. size_t size, void *cpu_addr,
  801. u64 dma_handle);
  802. };
  803. struct iw_cm_verbs;
  804. struct ib_device {
  805. struct device *dma_device;
  806. char name[IB_DEVICE_NAME_MAX];
  807. struct list_head event_handler_list;
  808. spinlock_t event_handler_lock;
  809. struct list_head core_list;
  810. struct list_head client_data_list;
  811. spinlock_t client_data_lock;
  812. struct ib_cache cache;
  813. int *pkey_tbl_len;
  814. int *gid_tbl_len;
  815. int num_comp_vectors;
  816. struct iw_cm_verbs *iwcm;
  817. int (*query_device)(struct ib_device *device,
  818. struct ib_device_attr *device_attr);
  819. int (*query_port)(struct ib_device *device,
  820. u8 port_num,
  821. struct ib_port_attr *port_attr);
  822. int (*query_gid)(struct ib_device *device,
  823. u8 port_num, int index,
  824. union ib_gid *gid);
  825. int (*query_pkey)(struct ib_device *device,
  826. u8 port_num, u16 index, u16 *pkey);
  827. int (*modify_device)(struct ib_device *device,
  828. int device_modify_mask,
  829. struct ib_device_modify *device_modify);
  830. int (*modify_port)(struct ib_device *device,
  831. u8 port_num, int port_modify_mask,
  832. struct ib_port_modify *port_modify);
  833. struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
  834. struct ib_udata *udata);
  835. int (*dealloc_ucontext)(struct ib_ucontext *context);
  836. int (*mmap)(struct ib_ucontext *context,
  837. struct vm_area_struct *vma);
  838. struct ib_pd * (*alloc_pd)(struct ib_device *device,
  839. struct ib_ucontext *context,
  840. struct ib_udata *udata);
  841. int (*dealloc_pd)(struct ib_pd *pd);
  842. struct ib_ah * (*create_ah)(struct ib_pd *pd,
  843. struct ib_ah_attr *ah_attr);
  844. int (*modify_ah)(struct ib_ah *ah,
  845. struct ib_ah_attr *ah_attr);
  846. int (*query_ah)(struct ib_ah *ah,
  847. struct ib_ah_attr *ah_attr);
  848. int (*destroy_ah)(struct ib_ah *ah);
  849. struct ib_srq * (*create_srq)(struct ib_pd *pd,
  850. struct ib_srq_init_attr *srq_init_attr,
  851. struct ib_udata *udata);
  852. int (*modify_srq)(struct ib_srq *srq,
  853. struct ib_srq_attr *srq_attr,
  854. enum ib_srq_attr_mask srq_attr_mask,
  855. struct ib_udata *udata);
  856. int (*query_srq)(struct ib_srq *srq,
  857. struct ib_srq_attr *srq_attr);
  858. int (*destroy_srq)(struct ib_srq *srq);
  859. int (*post_srq_recv)(struct ib_srq *srq,
  860. struct ib_recv_wr *recv_wr,
  861. struct ib_recv_wr **bad_recv_wr);
  862. struct ib_qp * (*create_qp)(struct ib_pd *pd,
  863. struct ib_qp_init_attr *qp_init_attr,
  864. struct ib_udata *udata);
  865. int (*modify_qp)(struct ib_qp *qp,
  866. struct ib_qp_attr *qp_attr,
  867. int qp_attr_mask,
  868. struct ib_udata *udata);
  869. int (*query_qp)(struct ib_qp *qp,
  870. struct ib_qp_attr *qp_attr,
  871. int qp_attr_mask,
  872. struct ib_qp_init_attr *qp_init_attr);
  873. int (*destroy_qp)(struct ib_qp *qp);
  874. int (*post_send)(struct ib_qp *qp,
  875. struct ib_send_wr *send_wr,
  876. struct ib_send_wr **bad_send_wr);
  877. int (*post_recv)(struct ib_qp *qp,
  878. struct ib_recv_wr *recv_wr,
  879. struct ib_recv_wr **bad_recv_wr);
  880. struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
  881. int comp_vector,
  882. struct ib_ucontext *context,
  883. struct ib_udata *udata);
  884. int (*destroy_cq)(struct ib_cq *cq);
  885. int (*resize_cq)(struct ib_cq *cq, int cqe,
  886. struct ib_udata *udata);
  887. int (*poll_cq)(struct ib_cq *cq, int num_entries,
  888. struct ib_wc *wc);
  889. int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
  890. int (*req_notify_cq)(struct ib_cq *cq,
  891. enum ib_cq_notify_flags flags);
  892. int (*req_ncomp_notif)(struct ib_cq *cq,
  893. int wc_cnt);
  894. struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
  895. int mr_access_flags);
  896. struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
  897. struct ib_phys_buf *phys_buf_array,
  898. int num_phys_buf,
  899. int mr_access_flags,
  900. u64 *iova_start);
  901. struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
  902. u64 start, u64 length,
  903. u64 virt_addr,
  904. int mr_access_flags,
  905. struct ib_udata *udata);
  906. int (*query_mr)(struct ib_mr *mr,
  907. struct ib_mr_attr *mr_attr);
  908. int (*dereg_mr)(struct ib_mr *mr);
  909. int (*rereg_phys_mr)(struct ib_mr *mr,
  910. int mr_rereg_mask,
  911. struct ib_pd *pd,
  912. struct ib_phys_buf *phys_buf_array,
  913. int num_phys_buf,
  914. int mr_access_flags,
  915. u64 *iova_start);
  916. struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
  917. int (*bind_mw)(struct ib_qp *qp,
  918. struct ib_mw *mw,
  919. struct ib_mw_bind *mw_bind);
  920. int (*dealloc_mw)(struct ib_mw *mw);
  921. struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
  922. int mr_access_flags,
  923. struct ib_fmr_attr *fmr_attr);
  924. int (*map_phys_fmr)(struct ib_fmr *fmr,
  925. u64 *page_list, int list_len,
  926. u64 iova);
  927. int (*unmap_fmr)(struct list_head *fmr_list);
  928. int (*dealloc_fmr)(struct ib_fmr *fmr);
  929. int (*attach_mcast)(struct ib_qp *qp,
  930. union ib_gid *gid,
  931. u16 lid);
  932. int (*detach_mcast)(struct ib_qp *qp,
  933. union ib_gid *gid,
  934. u16 lid);
  935. int (*process_mad)(struct ib_device *device,
  936. int process_mad_flags,
  937. u8 port_num,
  938. struct ib_wc *in_wc,
  939. struct ib_grh *in_grh,
  940. struct ib_mad *in_mad,
  941. struct ib_mad *out_mad);
  942. struct ib_dma_mapping_ops *dma_ops;
  943. struct module *owner;
  944. struct class_device class_dev;
  945. struct kobject *ports_parent;
  946. struct list_head port_list;
  947. enum {
  948. IB_DEV_UNINITIALIZED,
  949. IB_DEV_REGISTERED,
  950. IB_DEV_UNREGISTERED
  951. } reg_state;
  952. u64 uverbs_cmd_mask;
  953. int uverbs_abi_ver;
  954. char node_desc[64];
  955. __be64 node_guid;
  956. u8 node_type;
  957. u8 phys_port_cnt;
  958. };
  959. struct ib_client {
  960. char *name;
  961. void (*add) (struct ib_device *);
  962. void (*remove)(struct ib_device *);
  963. struct list_head list;
  964. };
  965. struct ib_device *ib_alloc_device(size_t size);
  966. void ib_dealloc_device(struct ib_device *device);
  967. int ib_register_device (struct ib_device *device);
  968. void ib_unregister_device(struct ib_device *device);
  969. int ib_register_client (struct ib_client *client);
  970. void ib_unregister_client(struct ib_client *client);
  971. void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
  972. void ib_set_client_data(struct ib_device *device, struct ib_client *client,
  973. void *data);
  974. static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
  975. {
  976. return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
  977. }
  978. static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
  979. {
  980. return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
  981. }
  982. /**
  983. * ib_modify_qp_is_ok - Check that the supplied attribute mask
  984. * contains all required attributes and no attributes not allowed for
  985. * the given QP state transition.
  986. * @cur_state: Current QP state
  987. * @next_state: Next QP state
  988. * @type: QP type
  989. * @mask: Mask of supplied QP attributes
  990. *
  991. * This function is a helper function that a low-level driver's
  992. * modify_qp method can use to validate the consumer's input. It
  993. * checks that cur_state and next_state are valid QP states, that a
  994. * transition from cur_state to next_state is allowed by the IB spec,
  995. * and that the attribute mask supplied is allowed for the transition.
  996. */
  997. int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
  998. enum ib_qp_type type, enum ib_qp_attr_mask mask);
  999. int ib_register_event_handler (struct ib_event_handler *event_handler);
  1000. int ib_unregister_event_handler(struct ib_event_handler *event_handler);
  1001. void ib_dispatch_event(struct ib_event *event);
  1002. int ib_query_device(struct ib_device *device,
  1003. struct ib_device_attr *device_attr);
  1004. int ib_query_port(struct ib_device *device,
  1005. u8 port_num, struct ib_port_attr *port_attr);
  1006. int ib_query_gid(struct ib_device *device,
  1007. u8 port_num, int index, union ib_gid *gid);
  1008. int ib_query_pkey(struct ib_device *device,
  1009. u8 port_num, u16 index, u16 *pkey);
  1010. int ib_modify_device(struct ib_device *device,
  1011. int device_modify_mask,
  1012. struct ib_device_modify *device_modify);
  1013. int ib_modify_port(struct ib_device *device,
  1014. u8 port_num, int port_modify_mask,
  1015. struct ib_port_modify *port_modify);
  1016. int ib_find_gid(struct ib_device *device, union ib_gid *gid,
  1017. u8 *port_num, u16 *index);
  1018. int ib_find_pkey(struct ib_device *device,
  1019. u8 port_num, u16 pkey, u16 *index);
  1020. /**
  1021. * ib_alloc_pd - Allocates an unused protection domain.
  1022. * @device: The device on which to allocate the protection domain.
  1023. *
  1024. * A protection domain object provides an association between QPs, shared
  1025. * receive queues, address handles, memory regions, and memory windows.
  1026. */
  1027. struct ib_pd *ib_alloc_pd(struct ib_device *device);
  1028. /**
  1029. * ib_dealloc_pd - Deallocates a protection domain.
  1030. * @pd: The protection domain to deallocate.
  1031. */
  1032. int ib_dealloc_pd(struct ib_pd *pd);
  1033. /**
  1034. * ib_create_ah - Creates an address handle for the given address vector.
  1035. * @pd: The protection domain associated with the address handle.
  1036. * @ah_attr: The attributes of the address vector.
  1037. *
  1038. * The address handle is used to reference a local or global destination
  1039. * in all UD QP post sends.
  1040. */
  1041. struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
  1042. /**
  1043. * ib_init_ah_from_wc - Initializes address handle attributes from a
  1044. * work completion.
  1045. * @device: Device on which the received message arrived.
  1046. * @port_num: Port on which the received message arrived.
  1047. * @wc: Work completion associated with the received message.
  1048. * @grh: References the received global route header. This parameter is
  1049. * ignored unless the work completion indicates that the GRH is valid.
  1050. * @ah_attr: Returned attributes that can be used when creating an address
  1051. * handle for replying to the message.
  1052. */
  1053. int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
  1054. struct ib_grh *grh, struct ib_ah_attr *ah_attr);
  1055. /**
  1056. * ib_create_ah_from_wc - Creates an address handle associated with the
  1057. * sender of the specified work completion.
  1058. * @pd: The protection domain associated with the address handle.
  1059. * @wc: Work completion information associated with a received message.
  1060. * @grh: References the received global route header. This parameter is
  1061. * ignored unless the work completion indicates that the GRH is valid.
  1062. * @port_num: The outbound port number to associate with the address.
  1063. *
  1064. * The address handle is used to reference a local or global destination
  1065. * in all UD QP post sends.
  1066. */
  1067. struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
  1068. struct ib_grh *grh, u8 port_num);
  1069. /**
  1070. * ib_modify_ah - Modifies the address vector associated with an address
  1071. * handle.
  1072. * @ah: The address handle to modify.
  1073. * @ah_attr: The new address vector attributes to associate with the
  1074. * address handle.
  1075. */
  1076. int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
  1077. /**
  1078. * ib_query_ah - Queries the address vector associated with an address
  1079. * handle.
  1080. * @ah: The address handle to query.
  1081. * @ah_attr: The address vector attributes associated with the address
  1082. * handle.
  1083. */
  1084. int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
  1085. /**
  1086. * ib_destroy_ah - Destroys an address handle.
  1087. * @ah: The address handle to destroy.
  1088. */
  1089. int ib_destroy_ah(struct ib_ah *ah);
  1090. /**
  1091. * ib_create_srq - Creates a SRQ associated with the specified protection
  1092. * domain.
  1093. * @pd: The protection domain associated with the SRQ.
  1094. * @srq_init_attr: A list of initial attributes required to create the
  1095. * SRQ. If SRQ creation succeeds, then the attributes are updated to
  1096. * the actual capabilities of the created SRQ.
  1097. *
  1098. * srq_attr->max_wr and srq_attr->max_sge are read the determine the
  1099. * requested size of the SRQ, and set to the actual values allocated
  1100. * on return. If ib_create_srq() succeeds, then max_wr and max_sge
  1101. * will always be at least as large as the requested values.
  1102. */
  1103. struct ib_srq *ib_create_srq(struct ib_pd *pd,
  1104. struct ib_srq_init_attr *srq_init_attr);
  1105. /**
  1106. * ib_modify_srq - Modifies the attributes for the specified SRQ.
  1107. * @srq: The SRQ to modify.
  1108. * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
  1109. * the current values of selected SRQ attributes are returned.
  1110. * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
  1111. * are being modified.
  1112. *
  1113. * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
  1114. * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
  1115. * the number of receives queued drops below the limit.
  1116. */
  1117. int ib_modify_srq(struct ib_srq *srq,
  1118. struct ib_srq_attr *srq_attr,
  1119. enum ib_srq_attr_mask srq_attr_mask);
  1120. /**
  1121. * ib_query_srq - Returns the attribute list and current values for the
  1122. * specified SRQ.
  1123. * @srq: The SRQ to query.
  1124. * @srq_attr: The attributes of the specified SRQ.
  1125. */
  1126. int ib_query_srq(struct ib_srq *srq,
  1127. struct ib_srq_attr *srq_attr);
  1128. /**
  1129. * ib_destroy_srq - Destroys the specified SRQ.
  1130. * @srq: The SRQ to destroy.
  1131. */
  1132. int ib_destroy_srq(struct ib_srq *srq);
  1133. /**
  1134. * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
  1135. * @srq: The SRQ to post the work request on.
  1136. * @recv_wr: A list of work requests to post on the receive queue.
  1137. * @bad_recv_wr: On an immediate failure, this parameter will reference
  1138. * the work request that failed to be posted on the QP.
  1139. */
  1140. static inline int ib_post_srq_recv(struct ib_srq *srq,
  1141. struct ib_recv_wr *recv_wr,
  1142. struct ib_recv_wr **bad_recv_wr)
  1143. {
  1144. return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
  1145. }
  1146. /**
  1147. * ib_create_qp - Creates a QP associated with the specified protection
  1148. * domain.
  1149. * @pd: The protection domain associated with the QP.
  1150. * @qp_init_attr: A list of initial attributes required to create the
  1151. * QP. If QP creation succeeds, then the attributes are updated to
  1152. * the actual capabilities of the created QP.
  1153. */
  1154. struct ib_qp *ib_create_qp(struct ib_pd *pd,
  1155. struct ib_qp_init_attr *qp_init_attr);
  1156. /**
  1157. * ib_modify_qp - Modifies the attributes for the specified QP and then
  1158. * transitions the QP to the given state.
  1159. * @qp: The QP to modify.
  1160. * @qp_attr: On input, specifies the QP attributes to modify. On output,
  1161. * the current values of selected QP attributes are returned.
  1162. * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
  1163. * are being modified.
  1164. */
  1165. int ib_modify_qp(struct ib_qp *qp,
  1166. struct ib_qp_attr *qp_attr,
  1167. int qp_attr_mask);
  1168. /**
  1169. * ib_query_qp - Returns the attribute list and current values for the
  1170. * specified QP.
  1171. * @qp: The QP to query.
  1172. * @qp_attr: The attributes of the specified QP.
  1173. * @qp_attr_mask: A bit-mask used to select specific attributes to query.
  1174. * @qp_init_attr: Additional attributes of the selected QP.
  1175. *
  1176. * The qp_attr_mask may be used to limit the query to gathering only the
  1177. * selected attributes.
  1178. */
  1179. int ib_query_qp(struct ib_qp *qp,
  1180. struct ib_qp_attr *qp_attr,
  1181. int qp_attr_mask,
  1182. struct ib_qp_init_attr *qp_init_attr);
  1183. /**
  1184. * ib_destroy_qp - Destroys the specified QP.
  1185. * @qp: The QP to destroy.
  1186. */
  1187. int ib_destroy_qp(struct ib_qp *qp);
  1188. /**
  1189. * ib_post_send - Posts a list of work requests to the send queue of
  1190. * the specified QP.
  1191. * @qp: The QP to post the work request on.
  1192. * @send_wr: A list of work requests to post on the send queue.
  1193. * @bad_send_wr: On an immediate failure, this parameter will reference
  1194. * the work request that failed to be posted on the QP.
  1195. */
  1196. static inline int ib_post_send(struct ib_qp *qp,
  1197. struct ib_send_wr *send_wr,
  1198. struct ib_send_wr **bad_send_wr)
  1199. {
  1200. return qp->device->post_send(qp, send_wr, bad_send_wr);
  1201. }
  1202. /**
  1203. * ib_post_recv - Posts a list of work requests to the receive queue of
  1204. * the specified QP.
  1205. * @qp: The QP to post the work request on.
  1206. * @recv_wr: A list of work requests to post on the receive queue.
  1207. * @bad_recv_wr: On an immediate failure, this parameter will reference
  1208. * the work request that failed to be posted on the QP.
  1209. */
  1210. static inline int ib_post_recv(struct ib_qp *qp,
  1211. struct ib_recv_wr *recv_wr,
  1212. struct ib_recv_wr **bad_recv_wr)
  1213. {
  1214. return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
  1215. }
  1216. /**
  1217. * ib_create_cq - Creates a CQ on the specified device.
  1218. * @device: The device on which to create the CQ.
  1219. * @comp_handler: A user-specified callback that is invoked when a
  1220. * completion event occurs on the CQ.
  1221. * @event_handler: A user-specified callback that is invoked when an
  1222. * asynchronous event not associated with a completion occurs on the CQ.
  1223. * @cq_context: Context associated with the CQ returned to the user via
  1224. * the associated completion and event handlers.
  1225. * @cqe: The minimum size of the CQ.
  1226. * @comp_vector - Completion vector used to signal completion events.
  1227. * Must be >= 0 and < context->num_comp_vectors.
  1228. *
  1229. * Users can examine the cq structure to determine the actual CQ size.
  1230. */
  1231. struct ib_cq *ib_create_cq(struct ib_device *device,
  1232. ib_comp_handler comp_handler,
  1233. void (*event_handler)(struct ib_event *, void *),
  1234. void *cq_context, int cqe, int comp_vector);
  1235. /**
  1236. * ib_resize_cq - Modifies the capacity of the CQ.
  1237. * @cq: The CQ to resize.
  1238. * @cqe: The minimum size of the CQ.
  1239. *
  1240. * Users can examine the cq structure to determine the actual CQ size.
  1241. */
  1242. int ib_resize_cq(struct ib_cq *cq, int cqe);
  1243. /**
  1244. * ib_destroy_cq - Destroys the specified CQ.
  1245. * @cq: The CQ to destroy.
  1246. */
  1247. int ib_destroy_cq(struct ib_cq *cq);
  1248. /**
  1249. * ib_poll_cq - poll a CQ for completion(s)
  1250. * @cq:the CQ being polled
  1251. * @num_entries:maximum number of completions to return
  1252. * @wc:array of at least @num_entries &struct ib_wc where completions
  1253. * will be returned
  1254. *
  1255. * Poll a CQ for (possibly multiple) completions. If the return value
  1256. * is < 0, an error occurred. If the return value is >= 0, it is the
  1257. * number of completions returned. If the return value is
  1258. * non-negative and < num_entries, then the CQ was emptied.
  1259. */
  1260. static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
  1261. struct ib_wc *wc)
  1262. {
  1263. return cq->device->poll_cq(cq, num_entries, wc);
  1264. }
  1265. /**
  1266. * ib_peek_cq - Returns the number of unreaped completions currently
  1267. * on the specified CQ.
  1268. * @cq: The CQ to peek.
  1269. * @wc_cnt: A minimum number of unreaped completions to check for.
  1270. *
  1271. * If the number of unreaped completions is greater than or equal to wc_cnt,
  1272. * this function returns wc_cnt, otherwise, it returns the actual number of
  1273. * unreaped completions.
  1274. */
  1275. int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
  1276. /**
  1277. * ib_req_notify_cq - Request completion notification on a CQ.
  1278. * @cq: The CQ to generate an event for.
  1279. * @flags:
  1280. * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
  1281. * to request an event on the next solicited event or next work
  1282. * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
  1283. * may also be |ed in to request a hint about missed events, as
  1284. * described below.
  1285. *
  1286. * Return Value:
  1287. * < 0 means an error occurred while requesting notification
  1288. * == 0 means notification was requested successfully, and if
  1289. * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
  1290. * were missed and it is safe to wait for another event. In
  1291. * this case is it guaranteed that any work completions added
  1292. * to the CQ since the last CQ poll will trigger a completion
  1293. * notification event.
  1294. * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
  1295. * in. It means that the consumer must poll the CQ again to
  1296. * make sure it is empty to avoid missing an event because of a
  1297. * race between requesting notification and an entry being
  1298. * added to the CQ. This return value means it is possible
  1299. * (but not guaranteed) that a work completion has been added
  1300. * to the CQ since the last poll without triggering a
  1301. * completion notification event.
  1302. */
  1303. static inline int ib_req_notify_cq(struct ib_cq *cq,
  1304. enum ib_cq_notify_flags flags)
  1305. {
  1306. return cq->device->req_notify_cq(cq, flags);
  1307. }
  1308. /**
  1309. * ib_req_ncomp_notif - Request completion notification when there are
  1310. * at least the specified number of unreaped completions on the CQ.
  1311. * @cq: The CQ to generate an event for.
  1312. * @wc_cnt: The number of unreaped completions that should be on the
  1313. * CQ before an event is generated.
  1314. */
  1315. static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
  1316. {
  1317. return cq->device->req_ncomp_notif ?
  1318. cq->device->req_ncomp_notif(cq, wc_cnt) :
  1319. -ENOSYS;
  1320. }
  1321. /**
  1322. * ib_get_dma_mr - Returns a memory region for system memory that is
  1323. * usable for DMA.
  1324. * @pd: The protection domain associated with the memory region.
  1325. * @mr_access_flags: Specifies the memory access rights.
  1326. *
  1327. * Note that the ib_dma_*() functions defined below must be used
  1328. * to create/destroy addresses used with the Lkey or Rkey returned
  1329. * by ib_get_dma_mr().
  1330. */
  1331. struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
  1332. /**
  1333. * ib_dma_mapping_error - check a DMA addr for error
  1334. * @dev: The device for which the dma_addr was created
  1335. * @dma_addr: The DMA address to check
  1336. */
  1337. static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
  1338. {
  1339. if (dev->dma_ops)
  1340. return dev->dma_ops->mapping_error(dev, dma_addr);
  1341. return dma_mapping_error(dma_addr);
  1342. }
  1343. /**
  1344. * ib_dma_map_single - Map a kernel virtual address to DMA address
  1345. * @dev: The device for which the dma_addr is to be created
  1346. * @cpu_addr: The kernel virtual address
  1347. * @size: The size of the region in bytes
  1348. * @direction: The direction of the DMA
  1349. */
  1350. static inline u64 ib_dma_map_single(struct ib_device *dev,
  1351. void *cpu_addr, size_t size,
  1352. enum dma_data_direction direction)
  1353. {
  1354. if (dev->dma_ops)
  1355. return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
  1356. return dma_map_single(dev->dma_device, cpu_addr, size, direction);
  1357. }
  1358. /**
  1359. * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
  1360. * @dev: The device for which the DMA address was created
  1361. * @addr: The DMA address
  1362. * @size: The size of the region in bytes
  1363. * @direction: The direction of the DMA
  1364. */
  1365. static inline void ib_dma_unmap_single(struct ib_device *dev,
  1366. u64 addr, size_t size,
  1367. enum dma_data_direction direction)
  1368. {
  1369. if (dev->dma_ops)
  1370. dev->dma_ops->unmap_single(dev, addr, size, direction);
  1371. else
  1372. dma_unmap_single(dev->dma_device, addr, size, direction);
  1373. }
  1374. /**
  1375. * ib_dma_map_page - Map a physical page to DMA address
  1376. * @dev: The device for which the dma_addr is to be created
  1377. * @page: The page to be mapped
  1378. * @offset: The offset within the page
  1379. * @size: The size of the region in bytes
  1380. * @direction: The direction of the DMA
  1381. */
  1382. static inline u64 ib_dma_map_page(struct ib_device *dev,
  1383. struct page *page,
  1384. unsigned long offset,
  1385. size_t size,
  1386. enum dma_data_direction direction)
  1387. {
  1388. if (dev->dma_ops)
  1389. return dev->dma_ops->map_page(dev, page, offset, size, direction);
  1390. return dma_map_page(dev->dma_device, page, offset, size, direction);
  1391. }
  1392. /**
  1393. * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
  1394. * @dev: The device for which the DMA address was created
  1395. * @addr: The DMA address
  1396. * @size: The size of the region in bytes
  1397. * @direction: The direction of the DMA
  1398. */
  1399. static inline void ib_dma_unmap_page(struct ib_device *dev,
  1400. u64 addr, size_t size,
  1401. enum dma_data_direction direction)
  1402. {
  1403. if (dev->dma_ops)
  1404. dev->dma_ops->unmap_page(dev, addr, size, direction);
  1405. else
  1406. dma_unmap_page(dev->dma_device, addr, size, direction);
  1407. }
  1408. /**
  1409. * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
  1410. * @dev: The device for which the DMA addresses are to be created
  1411. * @sg: The array of scatter/gather entries
  1412. * @nents: The number of scatter/gather entries
  1413. * @direction: The direction of the DMA
  1414. */
  1415. static inline int ib_dma_map_sg(struct ib_device *dev,
  1416. struct scatterlist *sg, int nents,
  1417. enum dma_data_direction direction)
  1418. {
  1419. if (dev->dma_ops)
  1420. return dev->dma_ops->map_sg(dev, sg, nents, direction);
  1421. return dma_map_sg(dev->dma_device, sg, nents, direction);
  1422. }
  1423. /**
  1424. * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
  1425. * @dev: The device for which the DMA addresses were created
  1426. * @sg: The array of scatter/gather entries
  1427. * @nents: The number of scatter/gather entries
  1428. * @direction: The direction of the DMA
  1429. */
  1430. static inline void ib_dma_unmap_sg(struct ib_device *dev,
  1431. struct scatterlist *sg, int nents,
  1432. enum dma_data_direction direction)
  1433. {
  1434. if (dev->dma_ops)
  1435. dev->dma_ops->unmap_sg(dev, sg, nents, direction);
  1436. else
  1437. dma_unmap_sg(dev->dma_device, sg, nents, direction);
  1438. }
  1439. /**
  1440. * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  1441. * @dev: The device for which the DMA addresses were created
  1442. * @sg: The scatter/gather entry
  1443. */
  1444. static inline u64 ib_sg_dma_address(struct ib_device *dev,
  1445. struct scatterlist *sg)
  1446. {
  1447. if (dev->dma_ops)
  1448. return dev->dma_ops->dma_address(dev, sg);
  1449. return sg_dma_address(sg);
  1450. }
  1451. /**
  1452. * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
  1453. * @dev: The device for which the DMA addresses were created
  1454. * @sg: The scatter/gather entry
  1455. */
  1456. static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
  1457. struct scatterlist *sg)
  1458. {
  1459. if (dev->dma_ops)
  1460. return dev->dma_ops->dma_len(dev, sg);
  1461. return sg_dma_len(sg);
  1462. }
  1463. /**
  1464. * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
  1465. * @dev: The device for which the DMA address was created
  1466. * @addr: The DMA address
  1467. * @size: The size of the region in bytes
  1468. * @dir: The direction of the DMA
  1469. */
  1470. static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
  1471. u64 addr,
  1472. size_t size,
  1473. enum dma_data_direction dir)
  1474. {
  1475. if (dev->dma_ops)
  1476. dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
  1477. else
  1478. dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
  1479. }
  1480. /**
  1481. * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
  1482. * @dev: The device for which the DMA address was created
  1483. * @addr: The DMA address
  1484. * @size: The size of the region in bytes
  1485. * @dir: The direction of the DMA
  1486. */
  1487. static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
  1488. u64 addr,
  1489. size_t size,
  1490. enum dma_data_direction dir)
  1491. {
  1492. if (dev->dma_ops)
  1493. dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
  1494. else
  1495. dma_sync_single_for_device(dev->dma_device, addr, size, dir);
  1496. }
  1497. /**
  1498. * ib_dma_alloc_coherent - Allocate memory and map it for DMA
  1499. * @dev: The device for which the DMA address is requested
  1500. * @size: The size of the region to allocate in bytes
  1501. * @dma_handle: A pointer for returning the DMA address of the region
  1502. * @flag: memory allocator flags
  1503. */
  1504. static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
  1505. size_t size,
  1506. u64 *dma_handle,
  1507. gfp_t flag)
  1508. {
  1509. if (dev->dma_ops)
  1510. return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
  1511. else {
  1512. dma_addr_t handle;
  1513. void *ret;
  1514. ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
  1515. *dma_handle = handle;
  1516. return ret;
  1517. }
  1518. }
  1519. /**
  1520. * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
  1521. * @dev: The device for which the DMA addresses were allocated
  1522. * @size: The size of the region
  1523. * @cpu_addr: the address returned by ib_dma_alloc_coherent()
  1524. * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
  1525. */
  1526. static inline void ib_dma_free_coherent(struct ib_device *dev,
  1527. size_t size, void *cpu_addr,
  1528. u64 dma_handle)
  1529. {
  1530. if (dev->dma_ops)
  1531. dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
  1532. else
  1533. dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
  1534. }
  1535. /**
  1536. * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
  1537. * by an HCA.
  1538. * @pd: The protection domain associated assigned to the registered region.
  1539. * @phys_buf_array: Specifies a list of physical buffers to use in the
  1540. * memory region.
  1541. * @num_phys_buf: Specifies the size of the phys_buf_array.
  1542. * @mr_access_flags: Specifies the memory access rights.
  1543. * @iova_start: The offset of the region's starting I/O virtual address.
  1544. */
  1545. struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
  1546. struct ib_phys_buf *phys_buf_array,
  1547. int num_phys_buf,
  1548. int mr_access_flags,
  1549. u64 *iova_start);
  1550. /**
  1551. * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
  1552. * Conceptually, this call performs the functions deregister memory region
  1553. * followed by register physical memory region. Where possible,
  1554. * resources are reused instead of deallocated and reallocated.
  1555. * @mr: The memory region to modify.
  1556. * @mr_rereg_mask: A bit-mask used to indicate which of the following
  1557. * properties of the memory region are being modified.
  1558. * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
  1559. * the new protection domain to associated with the memory region,
  1560. * otherwise, this parameter is ignored.
  1561. * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
  1562. * field specifies a list of physical buffers to use in the new
  1563. * translation, otherwise, this parameter is ignored.
  1564. * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
  1565. * field specifies the size of the phys_buf_array, otherwise, this
  1566. * parameter is ignored.
  1567. * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
  1568. * field specifies the new memory access rights, otherwise, this
  1569. * parameter is ignored.
  1570. * @iova_start: The offset of the region's starting I/O virtual address.
  1571. */
  1572. int ib_rereg_phys_mr(struct ib_mr *mr,
  1573. int mr_rereg_mask,
  1574. struct ib_pd *pd,
  1575. struct ib_phys_buf *phys_buf_array,
  1576. int num_phys_buf,
  1577. int mr_access_flags,
  1578. u64 *iova_start);
  1579. /**
  1580. * ib_query_mr - Retrieves information about a specific memory region.
  1581. * @mr: The memory region to retrieve information about.
  1582. * @mr_attr: The attributes of the specified memory region.
  1583. */
  1584. int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
  1585. /**
  1586. * ib_dereg_mr - Deregisters a memory region and removes it from the
  1587. * HCA translation table.
  1588. * @mr: The memory region to deregister.
  1589. */
  1590. int ib_dereg_mr(struct ib_mr *mr);
  1591. /**
  1592. * ib_alloc_mw - Allocates a memory window.
  1593. * @pd: The protection domain associated with the memory window.
  1594. */
  1595. struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
  1596. /**
  1597. * ib_bind_mw - Posts a work request to the send queue of the specified
  1598. * QP, which binds the memory window to the given address range and
  1599. * remote access attributes.
  1600. * @qp: QP to post the bind work request on.
  1601. * @mw: The memory window to bind.
  1602. * @mw_bind: Specifies information about the memory window, including
  1603. * its address range, remote access rights, and associated memory region.
  1604. */
  1605. static inline int ib_bind_mw(struct ib_qp *qp,
  1606. struct ib_mw *mw,
  1607. struct ib_mw_bind *mw_bind)
  1608. {
  1609. /* XXX reference counting in corresponding MR? */
  1610. return mw->device->bind_mw ?
  1611. mw->device->bind_mw(qp, mw, mw_bind) :
  1612. -ENOSYS;
  1613. }
  1614. /**
  1615. * ib_dealloc_mw - Deallocates a memory window.
  1616. * @mw: The memory window to deallocate.
  1617. */
  1618. int ib_dealloc_mw(struct ib_mw *mw);
  1619. /**
  1620. * ib_alloc_fmr - Allocates a unmapped fast memory region.
  1621. * @pd: The protection domain associated with the unmapped region.
  1622. * @mr_access_flags: Specifies the memory access rights.
  1623. * @fmr_attr: Attributes of the unmapped region.
  1624. *
  1625. * A fast memory region must be mapped before it can be used as part of
  1626. * a work request.
  1627. */
  1628. struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
  1629. int mr_access_flags,
  1630. struct ib_fmr_attr *fmr_attr);
  1631. /**
  1632. * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
  1633. * @fmr: The fast memory region to associate with the pages.
  1634. * @page_list: An array of physical pages to map to the fast memory region.
  1635. * @list_len: The number of pages in page_list.
  1636. * @iova: The I/O virtual address to use with the mapped region.
  1637. */
  1638. static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
  1639. u64 *page_list, int list_len,
  1640. u64 iova)
  1641. {
  1642. return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
  1643. }
  1644. /**
  1645. * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
  1646. * @fmr_list: A linked list of fast memory regions to unmap.
  1647. */
  1648. int ib_unmap_fmr(struct list_head *fmr_list);
  1649. /**
  1650. * ib_dealloc_fmr - Deallocates a fast memory region.
  1651. * @fmr: The fast memory region to deallocate.
  1652. */
  1653. int ib_dealloc_fmr(struct ib_fmr *fmr);
  1654. /**
  1655. * ib_attach_mcast - Attaches the specified QP to a multicast group.
  1656. * @qp: QP to attach to the multicast group. The QP must be type
  1657. * IB_QPT_UD.
  1658. * @gid: Multicast group GID.
  1659. * @lid: Multicast group LID in host byte order.
  1660. *
  1661. * In order to send and receive multicast packets, subnet
  1662. * administration must have created the multicast group and configured
  1663. * the fabric appropriately. The port associated with the specified
  1664. * QP must also be a member of the multicast group.
  1665. */
  1666. int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
  1667. /**
  1668. * ib_detach_mcast - Detaches the specified QP from a multicast group.
  1669. * @qp: QP to detach from the multicast group.
  1670. * @gid: Multicast group GID.
  1671. * @lid: Multicast group LID in host byte order.
  1672. */
  1673. int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
  1674. #endif /* IB_VERBS_H */