agent.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. /*
  2. * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
  4. * Copyright (c) 2004 Intel Corporation. All rights reserved.
  5. * Copyright (c) 2004 Topspin Corporation. All rights reserved.
  6. * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
  7. *
  8. * This software is available to you under a choice of one of two
  9. * licenses. You may choose to be licensed under the terms of the GNU
  10. * General Public License (GPL) Version 2, available from the file
  11. * COPYING in the main directory of this source tree, or the
  12. * OpenIB.org BSD license below:
  13. *
  14. * Redistribution and use in source and binary forms, with or
  15. * without modification, are permitted provided that the following
  16. * conditions are met:
  17. *
  18. * - Redistributions of source code must retain the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer.
  21. *
  22. * - Redistributions in binary form must reproduce the above
  23. * copyright notice, this list of conditions and the following
  24. * disclaimer in the documentation and/or other materials
  25. * provided with the distribution.
  26. *
  27. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  28. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  29. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  30. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  31. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  32. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  33. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  34. * SOFTWARE.
  35. *
  36. * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $
  37. */
  38. #include <linux/dma-mapping.h>
  39. #include <asm/bug.h>
  40. #include <ib_smi.h>
  41. #include "smi.h"
  42. #include "agent_priv.h"
  43. #include "mad_priv.h"
  44. #include "agent.h"
  45. spinlock_t ib_agent_port_list_lock;
  46. static LIST_HEAD(ib_agent_port_list);
  47. /*
  48. * Caller must hold ib_agent_port_list_lock
  49. */
  50. static inline struct ib_agent_port_private *
  51. __ib_get_agent_port(struct ib_device *device, int port_num,
  52. struct ib_mad_agent *mad_agent)
  53. {
  54. struct ib_agent_port_private *entry;
  55. BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */
  56. if (device) {
  57. list_for_each_entry(entry, &ib_agent_port_list, port_list) {
  58. if (entry->smp_agent->device == device &&
  59. entry->port_num == port_num)
  60. return entry;
  61. }
  62. } else {
  63. list_for_each_entry(entry, &ib_agent_port_list, port_list) {
  64. if ((entry->smp_agent == mad_agent) ||
  65. (entry->perf_mgmt_agent == mad_agent))
  66. return entry;
  67. }
  68. }
  69. return NULL;
  70. }
  71. static inline struct ib_agent_port_private *
  72. ib_get_agent_port(struct ib_device *device, int port_num,
  73. struct ib_mad_agent *mad_agent)
  74. {
  75. struct ib_agent_port_private *entry;
  76. unsigned long flags;
  77. spin_lock_irqsave(&ib_agent_port_list_lock, flags);
  78. entry = __ib_get_agent_port(device, port_num, mad_agent);
  79. spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
  80. return entry;
  81. }
  82. int smi_check_local_dr_smp(struct ib_smp *smp,
  83. struct ib_device *device,
  84. int port_num)
  85. {
  86. struct ib_agent_port_private *port_priv;
  87. if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
  88. return 1;
  89. port_priv = ib_get_agent_port(device, port_num, NULL);
  90. if (!port_priv) {
  91. printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d "
  92. "not open\n",
  93. device->name, port_num);
  94. return 1;
  95. }
  96. return smi_check_local_smp(port_priv->smp_agent, smp);
  97. }
  98. static int agent_mad_send(struct ib_mad_agent *mad_agent,
  99. struct ib_agent_port_private *port_priv,
  100. struct ib_mad_private *mad_priv,
  101. struct ib_grh *grh,
  102. struct ib_wc *wc)
  103. {
  104. struct ib_agent_send_wr *agent_send_wr;
  105. struct ib_sge gather_list;
  106. struct ib_send_wr send_wr;
  107. struct ib_send_wr *bad_send_wr;
  108. struct ib_ah_attr ah_attr;
  109. unsigned long flags;
  110. int ret = 1;
  111. agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL);
  112. if (!agent_send_wr)
  113. goto out;
  114. agent_send_wr->mad = mad_priv;
  115. gather_list.addr = dma_map_single(mad_agent->device->dma_device,
  116. &mad_priv->mad,
  117. sizeof(mad_priv->mad),
  118. DMA_TO_DEVICE);
  119. gather_list.length = sizeof(mad_priv->mad);
  120. gather_list.lkey = mad_agent->mr->lkey;
  121. send_wr.next = NULL;
  122. send_wr.opcode = IB_WR_SEND;
  123. send_wr.sg_list = &gather_list;
  124. send_wr.num_sge = 1;
  125. send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */
  126. send_wr.wr.ud.timeout_ms = 0;
  127. send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
  128. ah_attr.dlid = wc->slid;
  129. ah_attr.port_num = mad_agent->port_num;
  130. ah_attr.src_path_bits = wc->dlid_path_bits;
  131. ah_attr.sl = wc->sl;
  132. ah_attr.static_rate = 0;
  133. ah_attr.ah_flags = 0; /* No GRH */
  134. if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
  135. if (wc->wc_flags & IB_WC_GRH) {
  136. ah_attr.ah_flags = IB_AH_GRH;
  137. /* Should sgid be looked up ? */
  138. ah_attr.grh.sgid_index = 0;
  139. ah_attr.grh.hop_limit = grh->hop_limit;
  140. ah_attr.grh.flow_label = be32_to_cpup(
  141. &grh->version_tclass_flow) & 0xfffff;
  142. ah_attr.grh.traffic_class = (be32_to_cpup(
  143. &grh->version_tclass_flow) >> 20) & 0xff;
  144. memcpy(ah_attr.grh.dgid.raw,
  145. grh->sgid.raw,
  146. sizeof(ah_attr.grh.dgid));
  147. }
  148. }
  149. agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr);
  150. if (IS_ERR(agent_send_wr->ah)) {
  151. printk(KERN_ERR SPFX "No memory for address handle\n");
  152. kfree(agent_send_wr);
  153. goto out;
  154. }
  155. send_wr.wr.ud.ah = agent_send_wr->ah;
  156. if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
  157. send_wr.wr.ud.pkey_index = wc->pkey_index;
  158. send_wr.wr.ud.remote_qkey = IB_QP1_QKEY;
  159. } else { /* for SMPs */
  160. send_wr.wr.ud.pkey_index = 0;
  161. send_wr.wr.ud.remote_qkey = 0;
  162. }
  163. send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr;
  164. send_wr.wr_id = (unsigned long)agent_send_wr;
  165. pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr);
  166. /* Send */
  167. spin_lock_irqsave(&port_priv->send_list_lock, flags);
  168. if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) {
  169. spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
  170. dma_unmap_single(mad_agent->device->dma_device,
  171. pci_unmap_addr(agent_send_wr, mapping),
  172. sizeof(mad_priv->mad),
  173. DMA_TO_DEVICE);
  174. ib_destroy_ah(agent_send_wr->ah);
  175. kfree(agent_send_wr);
  176. } else {
  177. list_add_tail(&agent_send_wr->send_list,
  178. &port_priv->send_posted_list);
  179. spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
  180. ret = 0;
  181. }
  182. out:
  183. return ret;
  184. }
  185. int agent_send(struct ib_mad_private *mad,
  186. struct ib_grh *grh,
  187. struct ib_wc *wc,
  188. struct ib_device *device,
  189. int port_num)
  190. {
  191. struct ib_agent_port_private *port_priv;
  192. struct ib_mad_agent *mad_agent;
  193. port_priv = ib_get_agent_port(device, port_num, NULL);
  194. if (!port_priv) {
  195. printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n",
  196. device->name, port_num);
  197. return 1;
  198. }
  199. /* Get mad agent based on mgmt_class in MAD */
  200. switch (mad->mad.mad.mad_hdr.mgmt_class) {
  201. case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
  202. case IB_MGMT_CLASS_SUBN_LID_ROUTED:
  203. mad_agent = port_priv->smp_agent;
  204. break;
  205. case IB_MGMT_CLASS_PERF_MGMT:
  206. mad_agent = port_priv->perf_mgmt_agent;
  207. break;
  208. default:
  209. return 1;
  210. }
  211. return agent_mad_send(mad_agent, port_priv, mad, grh, wc);
  212. }
  213. static void agent_send_handler(struct ib_mad_agent *mad_agent,
  214. struct ib_mad_send_wc *mad_send_wc)
  215. {
  216. struct ib_agent_port_private *port_priv;
  217. struct ib_agent_send_wr *agent_send_wr;
  218. unsigned long flags;
  219. /* Find matching MAD agent */
  220. port_priv = ib_get_agent_port(NULL, 0, mad_agent);
  221. if (!port_priv) {
  222. printk(KERN_ERR SPFX "agent_send_handler: no matching MAD "
  223. "agent %p\n", mad_agent);
  224. return;
  225. }
  226. agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id;
  227. spin_lock_irqsave(&port_priv->send_list_lock, flags);
  228. /* Remove completed send from posted send MAD list */
  229. list_del(&agent_send_wr->send_list);
  230. spin_unlock_irqrestore(&port_priv->send_list_lock, flags);
  231. dma_unmap_single(mad_agent->device->dma_device,
  232. pci_unmap_addr(agent_send_wr, mapping),
  233. sizeof(agent_send_wr->mad->mad),
  234. DMA_TO_DEVICE);
  235. ib_destroy_ah(agent_send_wr->ah);
  236. /* Release allocated memory */
  237. kmem_cache_free(ib_mad_cache, agent_send_wr->mad);
  238. kfree(agent_send_wr);
  239. }
  240. int ib_agent_port_open(struct ib_device *device, int port_num)
  241. {
  242. int ret;
  243. struct ib_agent_port_private *port_priv;
  244. unsigned long flags;
  245. /* First, check if port already open for SMI */
  246. port_priv = ib_get_agent_port(device, port_num, NULL);
  247. if (port_priv) {
  248. printk(KERN_DEBUG SPFX "%s port %d already open\n",
  249. device->name, port_num);
  250. return 0;
  251. }
  252. /* Create new device info */
  253. port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL);
  254. if (!port_priv) {
  255. printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n");
  256. ret = -ENOMEM;
  257. goto error1;
  258. }
  259. memset(port_priv, 0, sizeof *port_priv);
  260. port_priv->port_num = port_num;
  261. spin_lock_init(&port_priv->send_list_lock);
  262. INIT_LIST_HEAD(&port_priv->send_posted_list);
  263. /* Obtain send only MAD agent for SM class (SMI QP) */
  264. port_priv->smp_agent = ib_register_mad_agent(device, port_num,
  265. IB_QPT_SMI,
  266. NULL, 0,
  267. &agent_send_handler,
  268. NULL, NULL);
  269. if (IS_ERR(port_priv->smp_agent)) {
  270. ret = PTR_ERR(port_priv->smp_agent);
  271. goto error2;
  272. }
  273. /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */
  274. port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num,
  275. IB_QPT_GSI,
  276. NULL, 0,
  277. &agent_send_handler,
  278. NULL, NULL);
  279. if (IS_ERR(port_priv->perf_mgmt_agent)) {
  280. ret = PTR_ERR(port_priv->perf_mgmt_agent);
  281. goto error3;
  282. }
  283. spin_lock_irqsave(&ib_agent_port_list_lock, flags);
  284. list_add_tail(&port_priv->port_list, &ib_agent_port_list);
  285. spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
  286. return 0;
  287. error3:
  288. ib_unregister_mad_agent(port_priv->smp_agent);
  289. error2:
  290. kfree(port_priv);
  291. error1:
  292. return ret;
  293. }
  294. int ib_agent_port_close(struct ib_device *device, int port_num)
  295. {
  296. struct ib_agent_port_private *port_priv;
  297. unsigned long flags;
  298. spin_lock_irqsave(&ib_agent_port_list_lock, flags);
  299. port_priv = __ib_get_agent_port(device, port_num, NULL);
  300. if (port_priv == NULL) {
  301. spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
  302. printk(KERN_ERR SPFX "Port %d not found\n", port_num);
  303. return -ENODEV;
  304. }
  305. list_del(&port_priv->port_list);
  306. spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
  307. ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
  308. ib_unregister_mad_agent(port_priv->smp_agent);
  309. kfree(port_priv);
  310. return 0;
  311. }