ehca_classes.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * Struct definition for eHCA internal structures
  5. *
  6. * Authors: Heiko J Schick <schickhj@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Joachim Fenkes <fenkes@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #ifndef __EHCA_CLASSES_H__
  43. #define __EHCA_CLASSES_H__
  44. struct ehca_module;
  45. struct ehca_qp;
  46. struct ehca_cq;
  47. struct ehca_eq;
  48. struct ehca_mr;
  49. struct ehca_mw;
  50. struct ehca_pd;
  51. struct ehca_av;
  52. #include <linux/wait.h>
  53. #include <linux/mutex.h>
  54. #include <rdma/ib_verbs.h>
  55. #include <rdma/ib_user_verbs.h>
  56. #ifdef CONFIG_PPC64
  57. #include "ehca_classes_pSeries.h"
  58. #endif
  59. #include "ipz_pt_fn.h"
  60. #include "ehca_qes.h"
  61. #include "ehca_irq.h"
  62. #define EHCA_EQE_CACHE_SIZE 20
  63. #define EHCA_MAX_NUM_QUEUES 0xffff
  64. struct ehca_eqe_cache_entry {
  65. struct ehca_eqe *eqe;
  66. struct ehca_cq *cq;
  67. };
  68. struct ehca_eq {
  69. u32 length;
  70. struct ipz_queue ipz_queue;
  71. struct ipz_eq_handle ipz_eq_handle;
  72. struct work_struct work;
  73. struct h_galpas galpas;
  74. int is_initialized;
  75. struct ehca_pfeq pf;
  76. spinlock_t spinlock;
  77. struct tasklet_struct interrupt_task;
  78. u32 ist;
  79. spinlock_t irq_spinlock;
  80. struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
  81. };
  82. struct ehca_sma_attr {
  83. u16 lid, lmc, sm_sl, sm_lid;
  84. u16 pkey_tbl_len, pkeys[16];
  85. };
  86. struct ehca_sport {
  87. struct ib_cq *ibcq_aqp1;
  88. struct ib_qp *ibqp_sqp[2];
  89. /* lock to serialze modify_qp() calls for sqp in normal
  90. * and irq path (when event PORT_ACTIVE is received first time)
  91. */
  92. spinlock_t mod_sqp_lock;
  93. enum ib_port_state port_state;
  94. struct ehca_sma_attr saved_attr;
  95. u32 pma_qp_nr;
  96. };
  97. #define HCA_CAP_MR_PGSIZE_4K 0x80000000
  98. #define HCA_CAP_MR_PGSIZE_64K 0x40000000
  99. #define HCA_CAP_MR_PGSIZE_1M 0x20000000
  100. #define HCA_CAP_MR_PGSIZE_16M 0x10000000
  101. struct ehca_shca {
  102. struct ib_device ib_device;
  103. struct of_device *ofdev;
  104. u8 num_ports;
  105. int hw_level;
  106. struct list_head shca_list;
  107. struct ipz_adapter_handle ipz_hca_handle;
  108. struct ehca_sport sport[2];
  109. struct ehca_eq eq;
  110. struct ehca_eq neq;
  111. struct ehca_mr *maxmr;
  112. struct ehca_pd *pd;
  113. struct h_galpas galpas;
  114. struct mutex modify_mutex;
  115. u64 hca_cap;
  116. /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
  117. u32 hca_cap_mr_pgsize;
  118. int max_mtu;
  119. int max_num_qps;
  120. int max_num_cqs;
  121. atomic_t num_cqs;
  122. atomic_t num_qps;
  123. };
  124. struct ehca_pd {
  125. struct ib_pd ib_pd;
  126. struct ipz_pd fw_pd;
  127. /* small queue mgmt */
  128. struct mutex lock;
  129. struct list_head free[2];
  130. struct list_head full[2];
  131. };
  132. enum ehca_ext_qp_type {
  133. EQPT_NORMAL = 0,
  134. EQPT_LLQP = 1,
  135. EQPT_SRQBASE = 2,
  136. EQPT_SRQ = 3,
  137. };
  138. /* struct to cache modify_qp()'s parms for GSI/SMI qp */
  139. struct ehca_mod_qp_parm {
  140. int mask;
  141. struct ib_qp_attr attr;
  142. };
  143. #define EHCA_MOD_QP_PARM_MAX 4
  144. #define QMAP_IDX_MASK 0xFFFFULL
  145. /* struct for tracking if cqes have been reported to the application */
  146. struct ehca_qmap_entry {
  147. u16 app_wr_id;
  148. u8 reported;
  149. u8 cqe_req;
  150. };
  151. struct ehca_queue_map {
  152. struct ehca_qmap_entry *map;
  153. unsigned int entries;
  154. unsigned int tail;
  155. unsigned int left_to_poll;
  156. unsigned int next_wqe_idx; /* Idx to first wqe to be flushed */
  157. };
  158. struct ehca_qp {
  159. union {
  160. struct ib_qp ib_qp;
  161. struct ib_srq ib_srq;
  162. };
  163. u32 qp_type;
  164. enum ehca_ext_qp_type ext_type;
  165. enum ib_qp_state state;
  166. struct ipz_queue ipz_squeue;
  167. struct ehca_queue_map sq_map;
  168. struct ipz_queue ipz_rqueue;
  169. struct ehca_queue_map rq_map;
  170. struct h_galpas galpas;
  171. u32 qkey;
  172. u32 real_qp_num;
  173. u32 token;
  174. spinlock_t spinlock_s;
  175. spinlock_t spinlock_r;
  176. u32 sq_max_inline_data_size;
  177. struct ipz_qp_handle ipz_qp_handle;
  178. struct ehca_pfqp pf;
  179. struct ib_qp_init_attr init_attr;
  180. struct ehca_cq *send_cq;
  181. struct ehca_cq *recv_cq;
  182. unsigned int sqerr_purgeflag;
  183. struct hlist_node list_entries;
  184. /* array to cache modify_qp()'s parms for GSI/SMI qp */
  185. struct ehca_mod_qp_parm *mod_qp_parm;
  186. int mod_qp_parm_idx;
  187. /* mmap counter for resources mapped into user space */
  188. u32 mm_count_squeue;
  189. u32 mm_count_rqueue;
  190. u32 mm_count_galpa;
  191. /* unsolicited ack circumvention */
  192. int unsol_ack_circ;
  193. int mtu_shift;
  194. u32 message_count;
  195. u32 packet_count;
  196. atomic_t nr_events; /* events seen */
  197. wait_queue_head_t wait_completion;
  198. int mig_armed;
  199. struct list_head sq_err_node;
  200. struct list_head rq_err_node;
  201. };
  202. #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
  203. #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
  204. #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
  205. /* must be power of 2 */
  206. #define QP_HASHTAB_LEN 8
  207. struct ehca_cq {
  208. struct ib_cq ib_cq;
  209. struct ipz_queue ipz_queue;
  210. struct h_galpas galpas;
  211. spinlock_t spinlock;
  212. u32 cq_number;
  213. u32 token;
  214. u32 nr_of_entries;
  215. struct ipz_cq_handle ipz_cq_handle;
  216. struct ehca_pfcq pf;
  217. spinlock_t cb_lock;
  218. struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
  219. struct list_head entry;
  220. u32 nr_callbacks; /* #events assigned to cpu by scaling code */
  221. atomic_t nr_events; /* #events seen */
  222. wait_queue_head_t wait_completion;
  223. spinlock_t task_lock;
  224. /* mmap counter for resources mapped into user space */
  225. u32 mm_count_queue;
  226. u32 mm_count_galpa;
  227. struct list_head sqp_err_list;
  228. struct list_head rqp_err_list;
  229. };
  230. enum ehca_mr_flag {
  231. EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
  232. EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
  233. };
  234. struct ehca_mr {
  235. union {
  236. struct ib_mr ib_mr; /* must always be first in ehca_mr */
  237. struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
  238. } ib;
  239. struct ib_umem *umem;
  240. spinlock_t mrlock;
  241. enum ehca_mr_flag flags;
  242. u32 num_kpages; /* number of kernel pages */
  243. u32 num_hwpages; /* number of hw pages to form MR */
  244. u64 hwpage_size; /* hw page size used for this MR */
  245. int acl; /* ACL (stored here for usage in reregister) */
  246. u64 *start; /* virtual start address (stored here for */
  247. /* usage in reregister) */
  248. u64 size; /* size (stored here for usage in reregister) */
  249. u32 fmr_page_size; /* page size for FMR */
  250. u32 fmr_max_pages; /* max pages for FMR */
  251. u32 fmr_max_maps; /* max outstanding maps for FMR */
  252. u32 fmr_map_cnt; /* map counter for FMR */
  253. /* fw specific data */
  254. struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
  255. struct h_galpas galpas;
  256. };
  257. struct ehca_mw {
  258. struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
  259. spinlock_t mwlock;
  260. u8 never_bound; /* indication MW was never bound */
  261. struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
  262. struct h_galpas galpas;
  263. };
  264. enum ehca_mr_pgi_type {
  265. EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
  266. * ehca_rereg_phys_mr,
  267. * ehca_reg_internal_maxmr */
  268. EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
  269. EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
  270. };
  271. struct ehca_mr_pginfo {
  272. enum ehca_mr_pgi_type type;
  273. u64 num_kpages;
  274. u64 kpage_cnt;
  275. u64 hwpage_size; /* hw page size used for this MR */
  276. u64 num_hwpages; /* number of hw pages */
  277. u64 hwpage_cnt; /* counter for hw pages */
  278. u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
  279. union {
  280. struct { /* type EHCA_MR_PGI_PHYS section */
  281. int num_phys_buf;
  282. struct ib_phys_buf *phys_buf_array;
  283. u64 next_buf;
  284. } phy;
  285. struct { /* type EHCA_MR_PGI_USER section */
  286. struct ib_umem *region;
  287. struct ib_umem_chunk *next_chunk;
  288. u64 next_nmap;
  289. } usr;
  290. struct { /* type EHCA_MR_PGI_FMR section */
  291. u64 fmr_pgsize;
  292. u64 *page_list;
  293. u64 next_listelem;
  294. } fmr;
  295. } u;
  296. };
  297. /* output parameters for MR/FMR hipz calls */
  298. struct ehca_mr_hipzout_parms {
  299. struct ipz_mrmw_handle handle;
  300. u32 lkey;
  301. u32 rkey;
  302. u64 len;
  303. u64 vaddr;
  304. u32 acl;
  305. };
  306. /* output parameters for MW hipz calls */
  307. struct ehca_mw_hipzout_parms {
  308. struct ipz_mrmw_handle handle;
  309. u32 rkey;
  310. };
  311. struct ehca_av {
  312. struct ib_ah ib_ah;
  313. struct ehca_ud_av av;
  314. };
  315. struct ehca_ucontext {
  316. struct ib_ucontext ib_ucontext;
  317. };
  318. int ehca_init_pd_cache(void);
  319. void ehca_cleanup_pd_cache(void);
  320. int ehca_init_cq_cache(void);
  321. void ehca_cleanup_cq_cache(void);
  322. int ehca_init_qp_cache(void);
  323. void ehca_cleanup_qp_cache(void);
  324. int ehca_init_av_cache(void);
  325. void ehca_cleanup_av_cache(void);
  326. int ehca_init_mrmw_cache(void);
  327. void ehca_cleanup_mrmw_cache(void);
  328. int ehca_init_small_qp_cache(void);
  329. void ehca_cleanup_small_qp_cache(void);
  330. extern rwlock_t ehca_qp_idr_lock;
  331. extern rwlock_t ehca_cq_idr_lock;
  332. extern struct idr ehca_qp_idr;
  333. extern struct idr ehca_cq_idr;
  334. extern int ehca_static_rate;
  335. extern int ehca_port_act_time;
  336. extern int ehca_use_hp_mr;
  337. extern int ehca_scaling_code;
  338. extern int ehca_lock_hcalls;
  339. extern int ehca_nr_ports;
  340. extern int ehca_max_cq;
  341. extern int ehca_max_qp;
  342. struct ipzu_queue_resp {
  343. u32 qe_size; /* queue entry size */
  344. u32 act_nr_of_sg;
  345. u32 queue_length; /* queue length allocated in bytes */
  346. u32 pagesize;
  347. u32 toggle_state;
  348. u32 offset; /* save offset within a page for small_qp */
  349. };
  350. struct ehca_create_cq_resp {
  351. u32 cq_number;
  352. u32 token;
  353. struct ipzu_queue_resp ipz_queue;
  354. u32 fw_handle_ofs;
  355. u32 dummy;
  356. };
  357. struct ehca_create_qp_resp {
  358. u32 qp_num;
  359. u32 token;
  360. u32 qp_type;
  361. u32 ext_type;
  362. u32 qkey;
  363. /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
  364. u32 real_qp_num;
  365. u32 fw_handle_ofs;
  366. u32 dummy;
  367. struct ipzu_queue_resp ipz_squeue;
  368. struct ipzu_queue_resp ipz_rqueue;
  369. };
  370. struct ehca_alloc_cq_parms {
  371. u32 nr_cqe;
  372. u32 act_nr_of_entries;
  373. u32 act_pages;
  374. struct ipz_eq_handle eq_handle;
  375. };
  376. enum ehca_service_type {
  377. ST_RC = 0,
  378. ST_UC = 1,
  379. ST_RD = 2,
  380. ST_UD = 3,
  381. };
  382. enum ehca_ll_comp_flags {
  383. LLQP_SEND_COMP = 0x20,
  384. LLQP_RECV_COMP = 0x40,
  385. LLQP_COMP_MASK = 0x60,
  386. };
  387. struct ehca_alloc_queue_parms {
  388. /* input parameters */
  389. int max_wr;
  390. int max_sge;
  391. int page_size;
  392. int is_small;
  393. /* output parameters */
  394. u16 act_nr_wqes;
  395. u8 act_nr_sges;
  396. u32 queue_size; /* bytes for small queues, pages otherwise */
  397. };
  398. struct ehca_alloc_qp_parms {
  399. struct ehca_alloc_queue_parms squeue;
  400. struct ehca_alloc_queue_parms rqueue;
  401. /* input parameters */
  402. enum ehca_service_type servicetype;
  403. int qp_storage;
  404. int sigtype;
  405. enum ehca_ext_qp_type ext_type;
  406. enum ehca_ll_comp_flags ll_comp_flags;
  407. int ud_av_l_key_ctl;
  408. u32 token;
  409. struct ipz_eq_handle eq_handle;
  410. struct ipz_pd pd;
  411. struct ipz_cq_handle send_cq_handle, recv_cq_handle;
  412. u32 srq_qpn, srq_token, srq_limit;
  413. /* output parameters */
  414. u32 real_qp_num;
  415. struct ipz_qp_handle qp_handle;
  416. struct h_galpas galpas;
  417. };
  418. int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
  419. int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
  420. struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
  421. #endif