ehca_classes.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * Struct definition for eHCA internal structures
  5. *
  6. * Authors: Heiko J Schick <schickhj@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Joachim Fenkes <fenkes@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #ifndef __EHCA_CLASSES_H__
  43. #define __EHCA_CLASSES_H__
  44. struct ehca_module;
  45. struct ehca_qp;
  46. struct ehca_cq;
  47. struct ehca_eq;
  48. struct ehca_mr;
  49. struct ehca_mw;
  50. struct ehca_pd;
  51. struct ehca_av;
  52. #include <linux/wait.h>
  53. #include <linux/mutex.h>
  54. #include <rdma/ib_verbs.h>
  55. #include <rdma/ib_user_verbs.h>
  56. #ifdef CONFIG_PPC64
  57. #include "ehca_classes_pSeries.h"
  58. #endif
  59. #include "ipz_pt_fn.h"
  60. #include "ehca_qes.h"
  61. #include "ehca_irq.h"
  62. #define EHCA_EQE_CACHE_SIZE 20
  63. #define EHCA_MAX_NUM_QUEUES 0xffff
  64. struct ehca_eqe_cache_entry {
  65. struct ehca_eqe *eqe;
  66. struct ehca_cq *cq;
  67. };
  68. struct ehca_eq {
  69. u32 length;
  70. struct ipz_queue ipz_queue;
  71. struct ipz_eq_handle ipz_eq_handle;
  72. struct work_struct work;
  73. struct h_galpas galpas;
  74. int is_initialized;
  75. struct ehca_pfeq pf;
  76. spinlock_t spinlock;
  77. struct tasklet_struct interrupt_task;
  78. u32 ist;
  79. spinlock_t irq_spinlock;
  80. struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
  81. };
  82. struct ehca_sma_attr {
  83. u16 lid, lmc, sm_sl, sm_lid;
  84. u16 pkey_tbl_len, pkeys[16];
  85. };
  86. struct ehca_sport {
  87. struct ib_cq *ibcq_aqp1;
  88. struct ib_qp *ibqp_sqp[2];
  89. /* lock to serialze modify_qp() calls for sqp in normal
  90. * and irq path (when event PORT_ACTIVE is received first time)
  91. */
  92. spinlock_t mod_sqp_lock;
  93. enum ib_port_state port_state;
  94. struct ehca_sma_attr saved_attr;
  95. u32 pma_qp_nr;
  96. };
  97. #define HCA_CAP_MR_PGSIZE_4K 0x80000000
  98. #define HCA_CAP_MR_PGSIZE_64K 0x40000000
  99. #define HCA_CAP_MR_PGSIZE_1M 0x20000000
  100. #define HCA_CAP_MR_PGSIZE_16M 0x10000000
  101. struct ehca_shca {
  102. struct ib_device ib_device;
  103. struct of_device *ofdev;
  104. u8 num_ports;
  105. int hw_level;
  106. struct list_head shca_list;
  107. struct ipz_adapter_handle ipz_hca_handle;
  108. struct ehca_sport sport[2];
  109. struct ehca_eq eq;
  110. struct ehca_eq neq;
  111. struct ehca_mr *maxmr;
  112. struct ehca_pd *pd;
  113. struct h_galpas galpas;
  114. struct mutex modify_mutex;
  115. u64 hca_cap;
  116. /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
  117. u32 hca_cap_mr_pgsize;
  118. int max_mtu;
  119. atomic_t num_cqs;
  120. atomic_t num_qps;
  121. };
  122. struct ehca_pd {
  123. struct ib_pd ib_pd;
  124. struct ipz_pd fw_pd;
  125. /* small queue mgmt */
  126. struct mutex lock;
  127. struct list_head free[2];
  128. struct list_head full[2];
  129. };
  130. enum ehca_ext_qp_type {
  131. EQPT_NORMAL = 0,
  132. EQPT_LLQP = 1,
  133. EQPT_SRQBASE = 2,
  134. EQPT_SRQ = 3,
  135. };
  136. /* struct to cache modify_qp()'s parms for GSI/SMI qp */
  137. struct ehca_mod_qp_parm {
  138. int mask;
  139. struct ib_qp_attr attr;
  140. };
  141. #define EHCA_MOD_QP_PARM_MAX 4
  142. #define QMAP_IDX_MASK 0xFFFFULL
  143. /* struct for tracking if cqes have been reported to the application */
  144. struct ehca_qmap_entry {
  145. u16 app_wr_id;
  146. u16 reported;
  147. };
  148. struct ehca_queue_map {
  149. struct ehca_qmap_entry *map;
  150. unsigned int entries;
  151. unsigned int tail;
  152. unsigned int left_to_poll;
  153. };
  154. struct ehca_qp {
  155. union {
  156. struct ib_qp ib_qp;
  157. struct ib_srq ib_srq;
  158. };
  159. u32 qp_type;
  160. enum ehca_ext_qp_type ext_type;
  161. enum ib_qp_state state;
  162. struct ipz_queue ipz_squeue;
  163. struct ehca_queue_map sq_map;
  164. struct ipz_queue ipz_rqueue;
  165. struct ehca_queue_map rq_map;
  166. struct h_galpas galpas;
  167. u32 qkey;
  168. u32 real_qp_num;
  169. u32 token;
  170. spinlock_t spinlock_s;
  171. spinlock_t spinlock_r;
  172. u32 sq_max_inline_data_size;
  173. struct ipz_qp_handle ipz_qp_handle;
  174. struct ehca_pfqp pf;
  175. struct ib_qp_init_attr init_attr;
  176. struct ehca_cq *send_cq;
  177. struct ehca_cq *recv_cq;
  178. unsigned int sqerr_purgeflag;
  179. struct hlist_node list_entries;
  180. /* array to cache modify_qp()'s parms for GSI/SMI qp */
  181. struct ehca_mod_qp_parm *mod_qp_parm;
  182. int mod_qp_parm_idx;
  183. /* mmap counter for resources mapped into user space */
  184. u32 mm_count_squeue;
  185. u32 mm_count_rqueue;
  186. u32 mm_count_galpa;
  187. /* unsolicited ack circumvention */
  188. int unsol_ack_circ;
  189. int mtu_shift;
  190. u32 message_count;
  191. u32 packet_count;
  192. atomic_t nr_events; /* events seen */
  193. wait_queue_head_t wait_completion;
  194. int mig_armed;
  195. struct list_head sq_err_node;
  196. struct list_head rq_err_node;
  197. };
  198. #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
  199. #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
  200. #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
  201. /* must be power of 2 */
  202. #define QP_HASHTAB_LEN 8
  203. struct ehca_cq {
  204. struct ib_cq ib_cq;
  205. struct ipz_queue ipz_queue;
  206. struct h_galpas galpas;
  207. spinlock_t spinlock;
  208. u32 cq_number;
  209. u32 token;
  210. u32 nr_of_entries;
  211. struct ipz_cq_handle ipz_cq_handle;
  212. struct ehca_pfcq pf;
  213. spinlock_t cb_lock;
  214. struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
  215. struct list_head entry;
  216. u32 nr_callbacks; /* #events assigned to cpu by scaling code */
  217. atomic_t nr_events; /* #events seen */
  218. wait_queue_head_t wait_completion;
  219. spinlock_t task_lock;
  220. /* mmap counter for resources mapped into user space */
  221. u32 mm_count_queue;
  222. u32 mm_count_galpa;
  223. struct list_head sqp_err_list;
  224. struct list_head rqp_err_list;
  225. };
  226. enum ehca_mr_flag {
  227. EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
  228. EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
  229. };
  230. struct ehca_mr {
  231. union {
  232. struct ib_mr ib_mr; /* must always be first in ehca_mr */
  233. struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
  234. } ib;
  235. struct ib_umem *umem;
  236. spinlock_t mrlock;
  237. enum ehca_mr_flag flags;
  238. u32 num_kpages; /* number of kernel pages */
  239. u32 num_hwpages; /* number of hw pages to form MR */
  240. u64 hwpage_size; /* hw page size used for this MR */
  241. int acl; /* ACL (stored here for usage in reregister) */
  242. u64 *start; /* virtual start address (stored here for */
  243. /* usage in reregister) */
  244. u64 size; /* size (stored here for usage in reregister) */
  245. u32 fmr_page_size; /* page size for FMR */
  246. u32 fmr_max_pages; /* max pages for FMR */
  247. u32 fmr_max_maps; /* max outstanding maps for FMR */
  248. u32 fmr_map_cnt; /* map counter for FMR */
  249. /* fw specific data */
  250. struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
  251. struct h_galpas galpas;
  252. };
  253. struct ehca_mw {
  254. struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
  255. spinlock_t mwlock;
  256. u8 never_bound; /* indication MW was never bound */
  257. struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
  258. struct h_galpas galpas;
  259. };
  260. enum ehca_mr_pgi_type {
  261. EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
  262. * ehca_rereg_phys_mr,
  263. * ehca_reg_internal_maxmr */
  264. EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
  265. EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
  266. };
  267. struct ehca_mr_pginfo {
  268. enum ehca_mr_pgi_type type;
  269. u64 num_kpages;
  270. u64 kpage_cnt;
  271. u64 hwpage_size; /* hw page size used for this MR */
  272. u64 num_hwpages; /* number of hw pages */
  273. u64 hwpage_cnt; /* counter for hw pages */
  274. u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
  275. union {
  276. struct { /* type EHCA_MR_PGI_PHYS section */
  277. int num_phys_buf;
  278. struct ib_phys_buf *phys_buf_array;
  279. u64 next_buf;
  280. } phy;
  281. struct { /* type EHCA_MR_PGI_USER section */
  282. struct ib_umem *region;
  283. struct ib_umem_chunk *next_chunk;
  284. u64 next_nmap;
  285. } usr;
  286. struct { /* type EHCA_MR_PGI_FMR section */
  287. u64 fmr_pgsize;
  288. u64 *page_list;
  289. u64 next_listelem;
  290. } fmr;
  291. } u;
  292. };
  293. /* output parameters for MR/FMR hipz calls */
  294. struct ehca_mr_hipzout_parms {
  295. struct ipz_mrmw_handle handle;
  296. u32 lkey;
  297. u32 rkey;
  298. u64 len;
  299. u64 vaddr;
  300. u32 acl;
  301. };
  302. /* output parameters for MW hipz calls */
  303. struct ehca_mw_hipzout_parms {
  304. struct ipz_mrmw_handle handle;
  305. u32 rkey;
  306. };
  307. struct ehca_av {
  308. struct ib_ah ib_ah;
  309. struct ehca_ud_av av;
  310. };
  311. struct ehca_ucontext {
  312. struct ib_ucontext ib_ucontext;
  313. };
  314. int ehca_init_pd_cache(void);
  315. void ehca_cleanup_pd_cache(void);
  316. int ehca_init_cq_cache(void);
  317. void ehca_cleanup_cq_cache(void);
  318. int ehca_init_qp_cache(void);
  319. void ehca_cleanup_qp_cache(void);
  320. int ehca_init_av_cache(void);
  321. void ehca_cleanup_av_cache(void);
  322. int ehca_init_mrmw_cache(void);
  323. void ehca_cleanup_mrmw_cache(void);
  324. int ehca_init_small_qp_cache(void);
  325. void ehca_cleanup_small_qp_cache(void);
  326. extern rwlock_t ehca_qp_idr_lock;
  327. extern rwlock_t ehca_cq_idr_lock;
  328. extern struct idr ehca_qp_idr;
  329. extern struct idr ehca_cq_idr;
  330. extern int ehca_static_rate;
  331. extern int ehca_port_act_time;
  332. extern int ehca_use_hp_mr;
  333. extern int ehca_scaling_code;
  334. extern int ehca_lock_hcalls;
  335. extern int ehca_nr_ports;
  336. extern int ehca_max_cq;
  337. extern int ehca_max_qp;
  338. struct ipzu_queue_resp {
  339. u32 qe_size; /* queue entry size */
  340. u32 act_nr_of_sg;
  341. u32 queue_length; /* queue length allocated in bytes */
  342. u32 pagesize;
  343. u32 toggle_state;
  344. u32 offset; /* save offset within a page for small_qp */
  345. };
  346. struct ehca_create_cq_resp {
  347. u32 cq_number;
  348. u32 token;
  349. struct ipzu_queue_resp ipz_queue;
  350. u32 fw_handle_ofs;
  351. u32 dummy;
  352. };
  353. struct ehca_create_qp_resp {
  354. u32 qp_num;
  355. u32 token;
  356. u32 qp_type;
  357. u32 ext_type;
  358. u32 qkey;
  359. /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
  360. u32 real_qp_num;
  361. u32 fw_handle_ofs;
  362. u32 dummy;
  363. struct ipzu_queue_resp ipz_squeue;
  364. struct ipzu_queue_resp ipz_rqueue;
  365. };
  366. struct ehca_alloc_cq_parms {
  367. u32 nr_cqe;
  368. u32 act_nr_of_entries;
  369. u32 act_pages;
  370. struct ipz_eq_handle eq_handle;
  371. };
  372. enum ehca_service_type {
  373. ST_RC = 0,
  374. ST_UC = 1,
  375. ST_RD = 2,
  376. ST_UD = 3,
  377. };
  378. enum ehca_ll_comp_flags {
  379. LLQP_SEND_COMP = 0x20,
  380. LLQP_RECV_COMP = 0x40,
  381. LLQP_COMP_MASK = 0x60,
  382. };
  383. struct ehca_alloc_queue_parms {
  384. /* input parameters */
  385. int max_wr;
  386. int max_sge;
  387. int page_size;
  388. int is_small;
  389. /* output parameters */
  390. u16 act_nr_wqes;
  391. u8 act_nr_sges;
  392. u32 queue_size; /* bytes for small queues, pages otherwise */
  393. };
  394. struct ehca_alloc_qp_parms {
  395. struct ehca_alloc_queue_parms squeue;
  396. struct ehca_alloc_queue_parms rqueue;
  397. /* input parameters */
  398. enum ehca_service_type servicetype;
  399. int qp_storage;
  400. int sigtype;
  401. enum ehca_ext_qp_type ext_type;
  402. enum ehca_ll_comp_flags ll_comp_flags;
  403. int ud_av_l_key_ctl;
  404. u32 token;
  405. struct ipz_eq_handle eq_handle;
  406. struct ipz_pd pd;
  407. struct ipz_cq_handle send_cq_handle, recv_cq_handle;
  408. u32 srq_qpn, srq_token, srq_limit;
  409. /* output parameters */
  410. u32 real_qp_num;
  411. struct ipz_qp_handle qp_handle;
  412. struct h_galpas galpas;
  413. };
  414. int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
  415. int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
  416. struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
  417. #endif