mthca_mad.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $
  33. */
  34. #include <ib_verbs.h>
  35. #include <ib_mad.h>
  36. #include <ib_smi.h>
  37. #include "mthca_dev.h"
  38. #include "mthca_cmd.h"
  39. enum {
  40. MTHCA_VENDOR_CLASS1 = 0x9,
  41. MTHCA_VENDOR_CLASS2 = 0xa
  42. };
  43. struct mthca_trap_mad {
  44. struct ib_mad *mad;
  45. DECLARE_PCI_UNMAP_ADDR(mapping)
  46. };
  47. static void update_sm_ah(struct mthca_dev *dev,
  48. u8 port_num, u16 lid, u8 sl)
  49. {
  50. struct ib_ah *new_ah;
  51. struct ib_ah_attr ah_attr;
  52. unsigned long flags;
  53. if (!dev->send_agent[port_num - 1][0])
  54. return;
  55. memset(&ah_attr, 0, sizeof ah_attr);
  56. ah_attr.dlid = lid;
  57. ah_attr.sl = sl;
  58. ah_attr.port_num = port_num;
  59. new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
  60. &ah_attr);
  61. if (IS_ERR(new_ah))
  62. return;
  63. spin_lock_irqsave(&dev->sm_lock, flags);
  64. if (dev->sm_ah[port_num - 1])
  65. ib_destroy_ah(dev->sm_ah[port_num - 1]);
  66. dev->sm_ah[port_num - 1] = new_ah;
  67. spin_unlock_irqrestore(&dev->sm_lock, flags);
  68. }
  69. /*
  70. * Snoop SM MADs for port info and P_Key table sets, so we can
  71. * synthesize LID change and P_Key change events.
  72. */
  73. static void smp_snoop(struct ib_device *ibdev,
  74. u8 port_num,
  75. struct ib_mad *mad)
  76. {
  77. struct ib_event event;
  78. if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  79. mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
  80. mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
  81. if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
  82. update_sm_ah(to_mdev(ibdev), port_num,
  83. be16_to_cpup((__be16 *) (mad->data + 58)),
  84. (*(u8 *) (mad->data + 76)) & 0xf);
  85. event.device = ibdev;
  86. event.event = IB_EVENT_LID_CHANGE;
  87. event.element.port_num = port_num;
  88. ib_dispatch_event(&event);
  89. }
  90. if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
  91. event.device = ibdev;
  92. event.event = IB_EVENT_PKEY_CHANGE;
  93. event.element.port_num = port_num;
  94. ib_dispatch_event(&event);
  95. }
  96. }
  97. }
  98. static void forward_trap(struct mthca_dev *dev,
  99. u8 port_num,
  100. struct ib_mad *mad)
  101. {
  102. int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
  103. struct mthca_trap_mad *tmad;
  104. struct ib_sge gather_list;
  105. struct ib_send_wr *bad_wr, wr = {
  106. .opcode = IB_WR_SEND,
  107. .sg_list = &gather_list,
  108. .num_sge = 1,
  109. .send_flags = IB_SEND_SIGNALED,
  110. .wr = {
  111. .ud = {
  112. .remote_qpn = qpn,
  113. .remote_qkey = qpn ? IB_QP1_QKEY : 0,
  114. .timeout_ms = 0
  115. }
  116. }
  117. };
  118. struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
  119. int ret;
  120. unsigned long flags;
  121. if (agent) {
  122. tmad = kmalloc(sizeof *tmad, GFP_KERNEL);
  123. if (!tmad)
  124. return;
  125. tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL);
  126. if (!tmad->mad) {
  127. kfree(tmad);
  128. return;
  129. }
  130. memcpy(tmad->mad, mad, sizeof *mad);
  131. wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr;
  132. wr.wr_id = (unsigned long) tmad;
  133. gather_list.addr = dma_map_single(agent->device->dma_device,
  134. tmad->mad,
  135. sizeof *tmad->mad,
  136. DMA_TO_DEVICE);
  137. gather_list.length = sizeof *tmad->mad;
  138. gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey;
  139. pci_unmap_addr_set(tmad, mapping, gather_list.addr);
  140. /*
  141. * We rely here on the fact that MLX QPs don't use the
  142. * address handle after the send is posted (this is
  143. * wrong following the IB spec strictly, but we know
  144. * it's OK for our devices).
  145. */
  146. spin_lock_irqsave(&dev->sm_lock, flags);
  147. wr.wr.ud.ah = dev->sm_ah[port_num - 1];
  148. if (wr.wr.ud.ah)
  149. ret = ib_post_send_mad(agent, &wr, &bad_wr);
  150. else
  151. ret = -EINVAL;
  152. spin_unlock_irqrestore(&dev->sm_lock, flags);
  153. if (ret) {
  154. dma_unmap_single(agent->device->dma_device,
  155. pci_unmap_addr(tmad, mapping),
  156. sizeof *tmad->mad,
  157. DMA_TO_DEVICE);
  158. kfree(tmad->mad);
  159. kfree(tmad);
  160. }
  161. }
  162. }
  163. int mthca_process_mad(struct ib_device *ibdev,
  164. int mad_flags,
  165. u8 port_num,
  166. struct ib_wc *in_wc,
  167. struct ib_grh *in_grh,
  168. struct ib_mad *in_mad,
  169. struct ib_mad *out_mad)
  170. {
  171. int err;
  172. u8 status;
  173. u16 slid = in_wc ? in_wc->slid : IB_LID_PERMISSIVE;
  174. /* Forward locally generated traps to the SM */
  175. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP &&
  176. slid == 0) {
  177. forward_trap(to_mdev(ibdev), port_num, in_mad);
  178. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  179. }
  180. /*
  181. * Only handle SM gets, sets and trap represses for SM class
  182. *
  183. * Only handle PMA and Mellanox vendor-specific class gets and
  184. * sets for other classes.
  185. */
  186. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
  187. in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
  188. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  189. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
  190. in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
  191. return IB_MAD_RESULT_SUCCESS;
  192. /*
  193. * Don't process SMInfo queries or vendor-specific
  194. * MADs -- the SMA can't handle them.
  195. */
  196. if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
  197. ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
  198. IB_SMP_ATTR_VENDOR_MASK))
  199. return IB_MAD_RESULT_SUCCESS;
  200. } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
  201. in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 ||
  202. in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) {
  203. if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
  204. in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
  205. return IB_MAD_RESULT_SUCCESS;
  206. } else
  207. return IB_MAD_RESULT_SUCCESS;
  208. err = mthca_MAD_IFC(to_mdev(ibdev),
  209. mad_flags & IB_MAD_IGNORE_MKEY,
  210. mad_flags & IB_MAD_IGNORE_BKEY,
  211. port_num, in_wc, in_grh, in_mad, out_mad,
  212. &status);
  213. if (err) {
  214. mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
  215. return IB_MAD_RESULT_FAILURE;
  216. }
  217. if (status == MTHCA_CMD_STAT_BAD_PKT)
  218. return IB_MAD_RESULT_SUCCESS;
  219. if (status) {
  220. mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
  221. status);
  222. return IB_MAD_RESULT_FAILURE;
  223. }
  224. if (!out_mad->mad_hdr.status)
  225. smp_snoop(ibdev, port_num, in_mad);
  226. /* set return bit in status of directed route responses */
  227. if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
  228. out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
  229. if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
  230. /* no response for trap repress */
  231. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
  232. return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
  233. }
  234. static void send_handler(struct ib_mad_agent *agent,
  235. struct ib_mad_send_wc *mad_send_wc)
  236. {
  237. struct mthca_trap_mad *tmad =
  238. (void *) (unsigned long) mad_send_wc->wr_id;
  239. dma_unmap_single(agent->device->dma_device,
  240. pci_unmap_addr(tmad, mapping),
  241. sizeof *tmad->mad,
  242. DMA_TO_DEVICE);
  243. kfree(tmad->mad);
  244. kfree(tmad);
  245. }
  246. int mthca_create_agents(struct mthca_dev *dev)
  247. {
  248. struct ib_mad_agent *agent;
  249. int p, q;
  250. spin_lock_init(&dev->sm_lock);
  251. for (p = 0; p < dev->limits.num_ports; ++p)
  252. for (q = 0; q <= 1; ++q) {
  253. agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
  254. q ? IB_QPT_GSI : IB_QPT_SMI,
  255. NULL, 0, send_handler,
  256. NULL, NULL);
  257. if (IS_ERR(agent))
  258. goto err;
  259. dev->send_agent[p][q] = agent;
  260. }
  261. return 0;
  262. err:
  263. for (p = 0; p < dev->limits.num_ports; ++p)
  264. for (q = 0; q <= 1; ++q)
  265. if (dev->send_agent[p][q])
  266. ib_unregister_mad_agent(dev->send_agent[p][q]);
  267. return PTR_ERR(agent);
  268. }
  269. void mthca_free_agents(struct mthca_dev *dev)
  270. {
  271. struct ib_mad_agent *agent;
  272. int p, q;
  273. for (p = 0; p < dev->limits.num_ports; ++p) {
  274. for (q = 0; q <= 1; ++q) {
  275. agent = dev->send_agent[p][q];
  276. dev->send_agent[p][q] = NULL;
  277. ib_unregister_mad_agent(agent);
  278. }
  279. if (dev->sm_ah[p])
  280. ib_destroy_ah(dev->sm_ah[p]);
  281. }
  282. }