mthca_mad.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
  35. */
  36. #include <rdma/ib_verbs.h>
  37. #include <rdma/ib_mad.h>
  38. #include <rdma/ib_smi.h>
  39. #include "mthca_dev.h"
  40. #include "mthca_cmd.h"
  41. enum {
  42. MTHCA_VENDOR_CLASS1 = 0x9,
  43. MTHCA_VENDOR_CLASS2 = 0xa
  44. };
  45. struct mthca_trap_mad {
  46. struct ib_mad *mad;
  47. DECLARE_PCI_UNMAP_ADDR(mapping)
  48. };
  49. static void update_sm_ah(struct mthca_dev *dev,
  50. u8 port_num, u16 lid, u8 sl)
  51. {
  52. struct ib_ah *new_ah;
  53. struct ib_ah_attr ah_attr;
  54. unsigned long flags;
  55. if (!dev->send_agent[port_num - 1][0])
  56. return;
  57. memset(&ah_attr, 0, sizeof ah_attr);
  58. ah_attr.dlid = lid;
  59. ah_attr.sl = sl;
  60. ah_attr.port_num = port_num;
  61. new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
  62. &ah_attr);
  63. if (IS_ERR(new_ah))
  64. return;
  65. spin_lock_irqsave(&dev->sm_lock, flags);
  66. if (dev->sm_ah[port_num - 1])
  67. ib_destroy_ah(dev->sm_ah[port_num - 1]);
  68. dev->sm_ah[port_num - 1] = new_ah;
  69. spin_unlock_irqrestore(&dev->sm_lock, flags);
  70. }
  71. /*
  72. * Snoop SM MADs for port info and P_Key table sets, so we can
  73. * synthesize LID change and P_Key change events.
  74. */
  75. static void smp_snoop(struct ib_device *ibdev,
  76. u8 port_num,
  77. struct ib_mad *mad)
  78. {
  79. struct ib_event event;
  80. if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  81. mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  82. mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
  83. if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
  84. update_sm_ah(to_mdev(ibdev), port_num,
  85. be16_to_cpup((__be16 *) (mad->data + 58)),
  86. (*(u8 *) (mad->data + 76)) & 0xf);
  87. event.device = ibdev;
  88. event.event = IB_EVENT_LID_CHANGE;
  89. event.element.port_num = port_num;
  90. ib_dispatch_event(&event);
  91. }
  92. if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
  93. event.device = ibdev;
  94. event.event = IB_EVENT_PKEY_CHANGE;
  95. event.element.port_num = port_num;
  96. ib_dispatch_event(&event);
  97. }
  98. }
  99. }
  100. static void forward_trap(struct mthca_dev *dev,
  101. u8 port_num,
  102. struct ib_mad *mad)
  103. {
  104. int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
  105. struct mthca_trap_mad *tmad;
  106. struct ib_sge gather_list;
  107. struct ib_send_wr *bad_wr, wr = {
  108. .opcode = IB_WR_SEND,
  109. .sg_list = &gather_list,
  110. .num_sge = 1,
  111. .send_flags = IB_SEND_SIGNALED,
  112. .wr = {
  113. .ud = {
  114. .remote_qpn = qpn,
  115. .remote_qkey = qpn ? IB_QP1_QKEY : 0,
  116. .timeout_ms = 0
  117. }
  118. }
  119. };
  120. struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
  121. int ret;
  122. unsigned long flags;
  123. if (agent) {
  124. tmad = kmalloc(sizeof *tmad, GFP_KERNEL);
  125. if (!tmad)
  126. return;
  127. tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL);
  128. if (!tmad->mad) {
  129. kfree(tmad);
  130. return;
  131. }
  132. memcpy(tmad->mad, mad, sizeof *mad);
  133. wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr;
  134. wr.wr_id = (unsigned long) tmad;
  135. gather_list.addr = dma_map_single(agent->device->dma_device,
  136. tmad->mad,
  137. sizeof *tmad->mad,
  138. DMA_TO_DEVICE);
  139. gather_list.length = sizeof *tmad->mad;
  140. gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;
  141. pci_unmap_addr_set(tmad, mapping, gather_list.addr);
  142. /*
  143. * We rely here on the fact that MLX QPs don't use the
  144. * address handle after the send is posted (this is
  145. * wrong following the IB spec strictly, but we know
  146. * it's OK for our devices).
  147. */
  148. spin_lock_irqsave(&dev->sm_lock, flags);
  149. wr.wr.ud.ah = dev->sm_ah[port_num - 1];
  150. if (wr.wr.ud.ah)
  151. ret = ib_post_send_mad(agent, &wr, &bad_wr);
  152. else
  153. ret = -EINVAL;
  154. spin_unlock_irqrestore(&dev->sm_lock, flags);
  155. if (ret) {
  156. dma_unmap_single(agent->device->dma_device,
  157. pci_unmap_addr(tmad, mapping),
  158. sizeof *tmad->mad,
  159. DMA_TO_DEVICE);
  160. kfree(tmad->mad);
  161. kfree(tmad);
  162. }
  163. }
  164. }
  165. int mthca_process_mad(struct ib_device *ibdev,
  166. int mad_flags,
  167. u8 port_num,
  168. struct ib_wc *in_wc,
  169. struct ib_grh *in_grh,
  170. struct ib_mad *in_mad,
  171. struct ib_mad *out_mad)
  172. {
  173. int err;
  174. u8 status;
  175. u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
  176. /* Forward locally generated traps to the SM */
  177. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
  178. slid == 0) {
  179. forward_trap(to_mdev(ibdev), port_num, in_mad);
  180. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  181. }
  182. /*
  183. * Only handle SM gets, sets and trap represses for SM class
  184. *
  185. * Only handle PMA and Mellanox vendor-specific class gets and
  186. * sets for other classes.
  187. */
  188. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  189. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
  190. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  191. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
  192. in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
  193. return IB_MAD_RESULT_SUCCESS;
  194. /*
  195. * Don't process SMInfo queries or vendor-specific
  196. * MADs -- the SMA can't handle them.
  197. */
  198. if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
  199. ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
  200. IB_SMP_ATTR_VENDOR_MASK))
  201. return IB_MAD_RESULT_SUCCESS;
  202. } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
  203. in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
  204. in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
  205. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  206. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
  207. return IB_MAD_RESULT_SUCCESS;
  208. } else
  209. return IB_MAD_RESULT_SUCCESS;
  210. err = mthca_MAD_IFC(to_mdev(ibdev),
  211. mad_flags & IB_MAD_IGNORE_MKEY,
  212. mad_flags & IB_MAD_IGNORE_BKEY,
  213. port_num, in_wc, in_grh, in_mad, out_mad,
  214. &status);
  215. if (err) {
  216. mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
  217. return IB_MAD_RESULT_FAILURE;
  218. }
  219. if (status == MTHCA_CMD_STAT_BAD_PKT)
  220. return IB_MAD_RESULT_SUCCESS;
  221. if (status) {
  222. mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
  223. status);
  224. return IB_MAD_RESULT_FAILURE;
  225. }
  226. if (!out_mad->mad_hdr.status)
  227. smp_snoop(ibdev, port_num, in_mad);
  228. /* set return bit in status of directed route responses */
  229. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
  230. out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
  231. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
  232. /* no response for trap repress */
  233. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  234. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  235. }
  236. static void send_handler(struct ib_mad_agent *agent,
  237. struct ib_mad_send_wc *mad_send_wc)
  238. {
  239. struct mthca_trap_mad *tmad =
  240. (void *) (unsigned long) mad_send_wc->wr_id;
  241. dma_unmap_single(agent->device->dma_device,
  242. pci_unmap_addr(tmad, mapping),
  243. sizeof *tmad->mad,
  244. DMA_TO_DEVICE);
  245. kfree(tmad->mad);
  246. kfree(tmad);
  247. }
  248. int mthca_create_agents(struct mthca_dev *dev)
  249. {
  250. struct ib_mad_agent *agent;
  251. int p, q;
  252. spin_lock_init(&dev->sm_lock);
  253. for (p = 0; p < dev->limits.num_ports; ++p)
  254. for (q = 0; q <= 1; ++q) {
  255. agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
  256. q ? IB_QPT_GSI : IB_QPT_SMI,
  257. NULL, 0, send_handler,
  258. NULL, NULL);
  259. if (IS_ERR(agent))
  260. goto err;
  261. dev->send_agent[p][q] = agent;
  262. }
  263. return 0;
  264. err:
  265. for (p = 0; p < dev->limits.num_ports; ++p)
  266. for (q = 0; q <= 1; ++q)
  267. if (dev->send_agent[p][q])
  268. ib_unregister_mad_agent(dev->send_agent[p][q]);
  269. return PTR_ERR(agent);
  270. }
  271. void mthca_free_agents(struct mthca_dev *dev)
  272. {
  273. struct ib_mad_agent *agent;
  274. int p, q;
  275. for (p = 0; p < dev->limits.num_ports; ++p) {
  276. for (q = 0; q <= 1; ++q) {
  277. agent = dev->send_agent[p][q];
  278. dev->send_agent[p][q] = NULL;
  279. ib_unregister_mad_agent(agent);
  280. }
  281. if (dev->sm_ah[p])
  282. ib_destroy_ah(dev->sm_ah[p]);
  283. }
  284. }