ehca_hca.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * HCA query functions
  5. *
  6. * Authors: Heiko J Schick <schickhj@de.ibm.com>
  7. * Christoph Raisch <raisch@de.ibm.com>
  8. *
  9. * Copyright (c) 2005 IBM Corporation
  10. *
  11. * All rights reserved.
  12. *
  13. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  14. * BSD.
  15. *
  16. * OpenIB BSD License
  17. *
  18. * Redistribution and use in source and binary forms, with or without
  19. * modification, are permitted provided that the following conditions are met:
  20. *
  21. * Redistributions of source code must retain the above copyright notice, this
  22. * list of conditions and the following disclaimer.
  23. *
  24. * Redistributions in binary form must reproduce the above copyright notice,
  25. * this list of conditions and the following disclaimer in the documentation
  26. * and/or other materials
  27. * provided with the distribution.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  30. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  31. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  32. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  33. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  34. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  35. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  36. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  37. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  38. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  39. * POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. #include "ehca_tools.h"
  42. #include "ehca_iverbs.h"
  43. #include "hcp_if.h"
  44. int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props)
  45. {
  46. int i, ret = 0;
  47. struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
  48. ib_device);
  49. struct hipz_query_hca *rblock;
  50. static const u32 cap_mapping[] = {
  51. IB_DEVICE_RESIZE_MAX_WR, HCA_CAP_WQE_RESIZE,
  52. IB_DEVICE_BAD_PKEY_CNTR, HCA_CAP_BAD_P_KEY_CTR,
  53. IB_DEVICE_BAD_QKEY_CNTR, HCA_CAP_Q_KEY_VIOL_CTR,
  54. IB_DEVICE_RAW_MULTI, HCA_CAP_RAW_PACKET_MCAST,
  55. IB_DEVICE_AUTO_PATH_MIG, HCA_CAP_AUTO_PATH_MIG,
  56. IB_DEVICE_CHANGE_PHY_PORT, HCA_CAP_SQD_RTS_PORT_CHANGE,
  57. IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
  58. IB_DEVICE_CURR_QP_STATE_MOD, HCA_CAP_CUR_QP_STATE_MOD,
  59. IB_DEVICE_SHUTDOWN_PORT, HCA_CAP_SHUTDOWN_PORT,
  60. IB_DEVICE_INIT_TYPE, HCA_CAP_INIT_TYPE,
  61. IB_DEVICE_PORT_ACTIVE_EVENT, HCA_CAP_PORT_ACTIVE_EVENT,
  62. };
  63. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  64. if (!rblock) {
  65. ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
  66. return -ENOMEM;
  67. }
  68. if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
  69. ehca_err(&shca->ib_device, "Can't query device properties");
  70. ret = -EINVAL;
  71. goto query_device1;
  72. }
  73. memset(props, 0, sizeof(struct ib_device_attr));
  74. props->fw_ver = rblock->hw_ver;
  75. props->max_mr_size = rblock->max_mr_size;
  76. props->vendor_id = rblock->vendor_id >> 8;
  77. props->vendor_part_id = rblock->vendor_part_id >> 16;
  78. props->hw_ver = rblock->hw_ver;
  79. props->max_qp = min_t(int, rblock->max_qp, INT_MAX);
  80. props->max_qp_wr = min_t(int, rblock->max_wqes_wq, INT_MAX);
  81. props->max_sge = min_t(int, rblock->max_sge, INT_MAX);
  82. props->max_sge_rd = min_t(int, rblock->max_sge_rd, INT_MAX);
  83. props->max_cq = min_t(int, rblock->max_cq, INT_MAX);
  84. props->max_cqe = min_t(int, rblock->max_cqe, INT_MAX);
  85. props->max_mr = min_t(int, rblock->max_mr, INT_MAX);
  86. props->max_mw = min_t(int, rblock->max_mw, INT_MAX);
  87. props->max_pd = min_t(int, rblock->max_pd, INT_MAX);
  88. props->max_ah = min_t(int, rblock->max_ah, INT_MAX);
  89. props->max_fmr = min_t(int, rblock->max_mr, INT_MAX);
  90. props->max_srq = 0;
  91. props->max_srq_wr = 0;
  92. props->max_srq_sge = 0;
  93. props->max_pkeys = 16;
  94. props->local_ca_ack_delay
  95. = rblock->local_ca_ack_delay;
  96. props->max_raw_ipv6_qp
  97. = min_t(int, rblock->max_raw_ipv6_qp, INT_MAX);
  98. props->max_raw_ethy_qp
  99. = min_t(int, rblock->max_raw_ethy_qp, INT_MAX);
  100. props->max_mcast_grp
  101. = min_t(int, rblock->max_mcast_grp, INT_MAX);
  102. props->max_mcast_qp_attach
  103. = min_t(int, rblock->max_mcast_qp_attach, INT_MAX);
  104. props->max_total_mcast_qp_attach
  105. = min_t(int, rblock->max_total_mcast_qp_attach, INT_MAX);
  106. /* translate device capabilities */
  107. props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
  108. IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
  109. for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
  110. if (rblock->hca_cap_indicators & cap_mapping[i + 1])
  111. props->device_cap_flags |= cap_mapping[i];
  112. query_device1:
  113. ehca_free_fw_ctrlblock(rblock);
  114. return ret;
  115. }
  116. int ehca_query_port(struct ib_device *ibdev,
  117. u8 port, struct ib_port_attr *props)
  118. {
  119. int ret = 0;
  120. struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
  121. ib_device);
  122. struct hipz_query_port *rblock;
  123. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  124. if (!rblock) {
  125. ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
  126. return -ENOMEM;
  127. }
  128. if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
  129. ehca_err(&shca->ib_device, "Can't query port properties");
  130. ret = -EINVAL;
  131. goto query_port1;
  132. }
  133. memset(props, 0, sizeof(struct ib_port_attr));
  134. props->state = rblock->state;
  135. switch (rblock->max_mtu) {
  136. case 0x1:
  137. props->active_mtu = props->max_mtu = IB_MTU_256;
  138. break;
  139. case 0x2:
  140. props->active_mtu = props->max_mtu = IB_MTU_512;
  141. break;
  142. case 0x3:
  143. props->active_mtu = props->max_mtu = IB_MTU_1024;
  144. break;
  145. case 0x4:
  146. props->active_mtu = props->max_mtu = IB_MTU_2048;
  147. break;
  148. case 0x5:
  149. props->active_mtu = props->max_mtu = IB_MTU_4096;
  150. break;
  151. default:
  152. ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
  153. rblock->max_mtu);
  154. break;
  155. }
  156. props->port_cap_flags = rblock->capability_mask;
  157. props->gid_tbl_len = rblock->gid_tbl_len;
  158. props->max_msg_sz = rblock->max_msg_sz;
  159. props->bad_pkey_cntr = rblock->bad_pkey_cntr;
  160. props->qkey_viol_cntr = rblock->qkey_viol_cntr;
  161. props->pkey_tbl_len = rblock->pkey_tbl_len;
  162. props->lid = rblock->lid;
  163. props->sm_lid = rblock->sm_lid;
  164. props->lmc = rblock->lmc;
  165. props->sm_sl = rblock->sm_sl;
  166. props->subnet_timeout = rblock->subnet_timeout;
  167. props->init_type_reply = rblock->init_type_reply;
  168. props->active_width = IB_WIDTH_12X;
  169. props->active_speed = 0x1;
  170. /* at the moment (logical) link state is always LINK_UP */
  171. props->phys_state = 0x5;
  172. query_port1:
  173. ehca_free_fw_ctrlblock(rblock);
  174. return ret;
  175. }
  176. int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
  177. {
  178. int ret = 0;
  179. struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
  180. struct hipz_query_port *rblock;
  181. if (index > 16) {
  182. ehca_err(&shca->ib_device, "Invalid index: %x.", index);
  183. return -EINVAL;
  184. }
  185. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  186. if (!rblock) {
  187. ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
  188. return -ENOMEM;
  189. }
  190. if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
  191. ehca_err(&shca->ib_device, "Can't query port properties");
  192. ret = -EINVAL;
  193. goto query_pkey1;
  194. }
  195. memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
  196. query_pkey1:
  197. ehca_free_fw_ctrlblock(rblock);
  198. return ret;
  199. }
  200. int ehca_query_gid(struct ib_device *ibdev, u8 port,
  201. int index, union ib_gid *gid)
  202. {
  203. int ret = 0;
  204. struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
  205. ib_device);
  206. struct hipz_query_port *rblock;
  207. if (index > 255) {
  208. ehca_err(&shca->ib_device, "Invalid index: %x.", index);
  209. return -EINVAL;
  210. }
  211. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  212. if (!rblock) {
  213. ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
  214. return -ENOMEM;
  215. }
  216. if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
  217. ehca_err(&shca->ib_device, "Can't query port properties");
  218. ret = -EINVAL;
  219. goto query_gid1;
  220. }
  221. memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
  222. memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
  223. query_gid1:
  224. ehca_free_fw_ctrlblock(rblock);
  225. return ret;
  226. }
  227. const u32 allowed_port_caps = (
  228. IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
  229. IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
  230. IB_PORT_VENDOR_CLASS_SUP);
  231. int ehca_modify_port(struct ib_device *ibdev,
  232. u8 port, int port_modify_mask,
  233. struct ib_port_modify *props)
  234. {
  235. int ret = 0;
  236. struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
  237. struct hipz_query_port *rblock;
  238. u32 cap;
  239. u64 hret;
  240. if ((props->set_port_cap_mask | props->clr_port_cap_mask)
  241. & ~allowed_port_caps) {
  242. ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
  243. "set=%x clr=%x allowed=%x", props->set_port_cap_mask,
  244. props->clr_port_cap_mask, allowed_port_caps);
  245. return -EINVAL;
  246. }
  247. if (mutex_lock_interruptible(&shca->modify_mutex))
  248. return -ERESTARTSYS;
  249. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  250. if (!rblock) {
  251. ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
  252. ret = -ENOMEM;
  253. goto modify_port1;
  254. }
  255. if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
  256. ehca_err(&shca->ib_device, "Can't query port properties");
  257. ret = -EINVAL;
  258. goto modify_port2;
  259. }
  260. cap = (rblock->capability_mask | props->set_port_cap_mask)
  261. & ~props->clr_port_cap_mask;
  262. hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
  263. cap, props->init_type, port_modify_mask);
  264. if (hret != H_SUCCESS) {
  265. ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
  266. ret = -EINVAL;
  267. }
  268. modify_port2:
  269. ehca_free_fw_ctrlblock(rblock);
  270. modify_port1:
  271. mutex_unlock(&shca->modify_mutex);
  272. return ret;
  273. }