ehca_classes.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * Struct definition for eHCA internal structures
  5. *
  6. * Authors: Heiko J Schick <schickhj@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. * Joachim Fenkes <fenkes@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #ifndef __EHCA_CLASSES_H__
  43. #define __EHCA_CLASSES_H__
  44. struct ehca_module;
  45. struct ehca_qp;
  46. struct ehca_cq;
  47. struct ehca_eq;
  48. struct ehca_mr;
  49. struct ehca_mw;
  50. struct ehca_pd;
  51. struct ehca_av;
  52. #include <linux/wait.h>
  53. #include <linux/mutex.h>
  54. #include <rdma/ib_verbs.h>
  55. #include <rdma/ib_user_verbs.h>
  56. #ifdef CONFIG_PPC64
  57. #include "ehca_classes_pSeries.h"
  58. #endif
  59. #include "ipz_pt_fn.h"
  60. #include "ehca_qes.h"
  61. #include "ehca_irq.h"
  62. #define EHCA_EQE_CACHE_SIZE 20
  63. #define EHCA_MAX_NUM_QUEUES 0xffff
  64. struct ehca_eqe_cache_entry {
  65. struct ehca_eqe *eqe;
  66. struct ehca_cq *cq;
  67. };
  68. struct ehca_eq {
  69. u32 length;
  70. struct ipz_queue ipz_queue;
  71. struct ipz_eq_handle ipz_eq_handle;
  72. struct work_struct work;
  73. struct h_galpas galpas;
  74. int is_initialized;
  75. struct ehca_pfeq pf;
  76. spinlock_t spinlock;
  77. struct tasklet_struct interrupt_task;
  78. u32 ist;
  79. spinlock_t irq_spinlock;
  80. struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
  81. };
  82. struct ehca_sma_attr {
  83. u16 lid, lmc, sm_sl, sm_lid;
  84. u16 pkey_tbl_len, pkeys[16];
  85. };
  86. struct ehca_sport {
  87. struct ib_cq *ibcq_aqp1;
  88. struct ib_qp *ibqp_sqp[2];
  89. /* lock to serialze modify_qp() calls for sqp in normal
  90. * and irq path (when event PORT_ACTIVE is received first time)
  91. */
  92. spinlock_t mod_sqp_lock;
  93. enum ib_port_state port_state;
  94. struct ehca_sma_attr saved_attr;
  95. u32 pma_qp_nr;
  96. };
  97. #define HCA_CAP_MR_PGSIZE_4K 0x80000000
  98. #define HCA_CAP_MR_PGSIZE_64K 0x40000000
  99. #define HCA_CAP_MR_PGSIZE_1M 0x20000000
  100. #define HCA_CAP_MR_PGSIZE_16M 0x10000000
  101. struct ehca_shca {
  102. struct ib_device ib_device;
  103. struct of_device *ofdev;
  104. u8 num_ports;
  105. int hw_level;
  106. struct list_head shca_list;
  107. struct ipz_adapter_handle ipz_hca_handle;
  108. struct ehca_sport sport[2];
  109. struct ehca_eq eq;
  110. struct ehca_eq neq;
  111. struct ehca_mr *maxmr;
  112. struct ehca_pd *pd;
  113. struct h_galpas galpas;
  114. struct mutex modify_mutex;
  115. u64 hca_cap;
  116. /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
  117. u32 hca_cap_mr_pgsize;
  118. int max_mtu;
  119. int max_num_qps;
  120. int max_num_cqs;
  121. atomic_t num_cqs;
  122. atomic_t num_qps;
  123. };
  124. struct ehca_pd {
  125. struct ib_pd ib_pd;
  126. struct ipz_pd fw_pd;
  127. /* small queue mgmt */
  128. struct mutex lock;
  129. struct list_head free[2];
  130. struct list_head full[2];
  131. };
  132. enum ehca_ext_qp_type {
  133. EQPT_NORMAL = 0,
  134. EQPT_LLQP = 1,
  135. EQPT_SRQBASE = 2,
  136. EQPT_SRQ = 3,
  137. };
  138. /* struct to cache modify_qp()'s parms for GSI/SMI qp */
  139. struct ehca_mod_qp_parm {
  140. int mask;
  141. struct ib_qp_attr attr;
  142. };
  143. #define EHCA_MOD_QP_PARM_MAX 4
  144. #define QMAP_IDX_MASK 0xFFFFULL
  145. /* struct for tracking if cqes have been reported to the application */
  146. struct ehca_qmap_entry {
  147. u16 app_wr_id;
  148. u16 reported;
  149. };
  150. struct ehca_queue_map {
  151. struct ehca_qmap_entry *map;
  152. unsigned int entries;
  153. unsigned int tail;
  154. unsigned int left_to_poll;
  155. };
  156. struct ehca_qp {
  157. union {
  158. struct ib_qp ib_qp;
  159. struct ib_srq ib_srq;
  160. };
  161. u32 qp_type;
  162. enum ehca_ext_qp_type ext_type;
  163. enum ib_qp_state state;
  164. struct ipz_queue ipz_squeue;
  165. struct ehca_queue_map sq_map;
  166. struct ipz_queue ipz_rqueue;
  167. struct ehca_queue_map rq_map;
  168. struct h_galpas galpas;
  169. u32 qkey;
  170. u32 real_qp_num;
  171. u32 token;
  172. spinlock_t spinlock_s;
  173. spinlock_t spinlock_r;
  174. u32 sq_max_inline_data_size;
  175. struct ipz_qp_handle ipz_qp_handle;
  176. struct ehca_pfqp pf;
  177. struct ib_qp_init_attr init_attr;
  178. struct ehca_cq *send_cq;
  179. struct ehca_cq *recv_cq;
  180. unsigned int sqerr_purgeflag;
  181. struct hlist_node list_entries;
  182. /* array to cache modify_qp()'s parms for GSI/SMI qp */
  183. struct ehca_mod_qp_parm *mod_qp_parm;
  184. int mod_qp_parm_idx;
  185. /* mmap counter for resources mapped into user space */
  186. u32 mm_count_squeue;
  187. u32 mm_count_rqueue;
  188. u32 mm_count_galpa;
  189. /* unsolicited ack circumvention */
  190. int unsol_ack_circ;
  191. int mtu_shift;
  192. u32 message_count;
  193. u32 packet_count;
  194. atomic_t nr_events; /* events seen */
  195. wait_queue_head_t wait_completion;
  196. int mig_armed;
  197. struct list_head sq_err_node;
  198. struct list_head rq_err_node;
  199. };
  200. #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
  201. #define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
  202. #define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
  203. /* must be power of 2 */
  204. #define QP_HASHTAB_LEN 8
  205. struct ehca_cq {
  206. struct ib_cq ib_cq;
  207. struct ipz_queue ipz_queue;
  208. struct h_galpas galpas;
  209. spinlock_t spinlock;
  210. u32 cq_number;
  211. u32 token;
  212. u32 nr_of_entries;
  213. struct ipz_cq_handle ipz_cq_handle;
  214. struct ehca_pfcq pf;
  215. spinlock_t cb_lock;
  216. struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
  217. struct list_head entry;
  218. u32 nr_callbacks; /* #events assigned to cpu by scaling code */
  219. atomic_t nr_events; /* #events seen */
  220. wait_queue_head_t wait_completion;
  221. spinlock_t task_lock;
  222. /* mmap counter for resources mapped into user space */
  223. u32 mm_count_queue;
  224. u32 mm_count_galpa;
  225. struct list_head sqp_err_list;
  226. struct list_head rqp_err_list;
  227. };
  228. enum ehca_mr_flag {
  229. EHCA_MR_FLAG_FMR = 0x80000000, /* FMR, created with ehca_alloc_fmr */
  230. EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR */
  231. };
  232. struct ehca_mr {
  233. union {
  234. struct ib_mr ib_mr; /* must always be first in ehca_mr */
  235. struct ib_fmr ib_fmr; /* must always be first in ehca_mr */
  236. } ib;
  237. struct ib_umem *umem;
  238. spinlock_t mrlock;
  239. enum ehca_mr_flag flags;
  240. u32 num_kpages; /* number of kernel pages */
  241. u32 num_hwpages; /* number of hw pages to form MR */
  242. u64 hwpage_size; /* hw page size used for this MR */
  243. int acl; /* ACL (stored here for usage in reregister) */
  244. u64 *start; /* virtual start address (stored here for */
  245. /* usage in reregister) */
  246. u64 size; /* size (stored here for usage in reregister) */
  247. u32 fmr_page_size; /* page size for FMR */
  248. u32 fmr_max_pages; /* max pages for FMR */
  249. u32 fmr_max_maps; /* max outstanding maps for FMR */
  250. u32 fmr_map_cnt; /* map counter for FMR */
  251. /* fw specific data */
  252. struct ipz_mrmw_handle ipz_mr_handle; /* MR handle for h-calls */
  253. struct h_galpas galpas;
  254. };
  255. struct ehca_mw {
  256. struct ib_mw ib_mw; /* gen2 mw, must always be first in ehca_mw */
  257. spinlock_t mwlock;
  258. u8 never_bound; /* indication MW was never bound */
  259. struct ipz_mrmw_handle ipz_mw_handle; /* MW handle for h-calls */
  260. struct h_galpas galpas;
  261. };
  262. enum ehca_mr_pgi_type {
  263. EHCA_MR_PGI_PHYS = 1, /* type of ehca_reg_phys_mr,
  264. * ehca_rereg_phys_mr,
  265. * ehca_reg_internal_maxmr */
  266. EHCA_MR_PGI_USER = 2, /* type of ehca_reg_user_mr */
  267. EHCA_MR_PGI_FMR = 3 /* type of ehca_map_phys_fmr */
  268. };
  269. struct ehca_mr_pginfo {
  270. enum ehca_mr_pgi_type type;
  271. u64 num_kpages;
  272. u64 kpage_cnt;
  273. u64 hwpage_size; /* hw page size used for this MR */
  274. u64 num_hwpages; /* number of hw pages */
  275. u64 hwpage_cnt; /* counter for hw pages */
  276. u64 next_hwpage; /* next hw page in buffer/chunk/listelem */
  277. union {
  278. struct { /* type EHCA_MR_PGI_PHYS section */
  279. int num_phys_buf;
  280. struct ib_phys_buf *phys_buf_array;
  281. u64 next_buf;
  282. } phy;
  283. struct { /* type EHCA_MR_PGI_USER section */
  284. struct ib_umem *region;
  285. struct ib_umem_chunk *next_chunk;
  286. u64 next_nmap;
  287. } usr;
  288. struct { /* type EHCA_MR_PGI_FMR section */
  289. u64 fmr_pgsize;
  290. u64 *page_list;
  291. u64 next_listelem;
  292. } fmr;
  293. } u;
  294. };
  295. /* output parameters for MR/FMR hipz calls */
  296. struct ehca_mr_hipzout_parms {
  297. struct ipz_mrmw_handle handle;
  298. u32 lkey;
  299. u32 rkey;
  300. u64 len;
  301. u64 vaddr;
  302. u32 acl;
  303. };
  304. /* output parameters for MW hipz calls */
  305. struct ehca_mw_hipzout_parms {
  306. struct ipz_mrmw_handle handle;
  307. u32 rkey;
  308. };
  309. struct ehca_av {
  310. struct ib_ah ib_ah;
  311. struct ehca_ud_av av;
  312. };
  313. struct ehca_ucontext {
  314. struct ib_ucontext ib_ucontext;
  315. };
  316. int ehca_init_pd_cache(void);
  317. void ehca_cleanup_pd_cache(void);
  318. int ehca_init_cq_cache(void);
  319. void ehca_cleanup_cq_cache(void);
  320. int ehca_init_qp_cache(void);
  321. void ehca_cleanup_qp_cache(void);
  322. int ehca_init_av_cache(void);
  323. void ehca_cleanup_av_cache(void);
  324. int ehca_init_mrmw_cache(void);
  325. void ehca_cleanup_mrmw_cache(void);
  326. int ehca_init_small_qp_cache(void);
  327. void ehca_cleanup_small_qp_cache(void);
  328. extern rwlock_t ehca_qp_idr_lock;
  329. extern rwlock_t ehca_cq_idr_lock;
  330. extern struct idr ehca_qp_idr;
  331. extern struct idr ehca_cq_idr;
  332. extern int ehca_static_rate;
  333. extern int ehca_port_act_time;
  334. extern int ehca_use_hp_mr;
  335. extern int ehca_scaling_code;
  336. extern int ehca_lock_hcalls;
  337. extern int ehca_nr_ports;
  338. extern int ehca_max_cq;
  339. extern int ehca_max_qp;
  340. struct ipzu_queue_resp {
  341. u32 qe_size; /* queue entry size */
  342. u32 act_nr_of_sg;
  343. u32 queue_length; /* queue length allocated in bytes */
  344. u32 pagesize;
  345. u32 toggle_state;
  346. u32 offset; /* save offset within a page for small_qp */
  347. };
  348. struct ehca_create_cq_resp {
  349. u32 cq_number;
  350. u32 token;
  351. struct ipzu_queue_resp ipz_queue;
  352. u32 fw_handle_ofs;
  353. u32 dummy;
  354. };
  355. struct ehca_create_qp_resp {
  356. u32 qp_num;
  357. u32 token;
  358. u32 qp_type;
  359. u32 ext_type;
  360. u32 qkey;
  361. /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
  362. u32 real_qp_num;
  363. u32 fw_handle_ofs;
  364. u32 dummy;
  365. struct ipzu_queue_resp ipz_squeue;
  366. struct ipzu_queue_resp ipz_rqueue;
  367. };
  368. struct ehca_alloc_cq_parms {
  369. u32 nr_cqe;
  370. u32 act_nr_of_entries;
  371. u32 act_pages;
  372. struct ipz_eq_handle eq_handle;
  373. };
  374. enum ehca_service_type {
  375. ST_RC = 0,
  376. ST_UC = 1,
  377. ST_RD = 2,
  378. ST_UD = 3,
  379. };
  380. enum ehca_ll_comp_flags {
  381. LLQP_SEND_COMP = 0x20,
  382. LLQP_RECV_COMP = 0x40,
  383. LLQP_COMP_MASK = 0x60,
  384. };
  385. struct ehca_alloc_queue_parms {
  386. /* input parameters */
  387. int max_wr;
  388. int max_sge;
  389. int page_size;
  390. int is_small;
  391. /* output parameters */
  392. u16 act_nr_wqes;
  393. u8 act_nr_sges;
  394. u32 queue_size; /* bytes for small queues, pages otherwise */
  395. };
  396. struct ehca_alloc_qp_parms {
  397. struct ehca_alloc_queue_parms squeue;
  398. struct ehca_alloc_queue_parms rqueue;
  399. /* input parameters */
  400. enum ehca_service_type servicetype;
  401. int qp_storage;
  402. int sigtype;
  403. enum ehca_ext_qp_type ext_type;
  404. enum ehca_ll_comp_flags ll_comp_flags;
  405. int ud_av_l_key_ctl;
  406. u32 token;
  407. struct ipz_eq_handle eq_handle;
  408. struct ipz_pd pd;
  409. struct ipz_cq_handle send_cq_handle, recv_cq_handle;
  410. u32 srq_qpn, srq_token, srq_limit;
  411. /* output parameters */
  412. u32 real_qp_num;
  413. struct ipz_qp_handle qp_handle;
  414. struct h_galpas galpas;
  415. };
  416. int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
  417. int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
  418. struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
  419. #endif