sa_query.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
  34. */
  35. #include <linux/module.h>
  36. #include <linux/init.h>
  37. #include <linux/err.h>
  38. #include <linux/random.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/slab.h>
  41. #include <linux/pci.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/kref.h>
  44. #include <linux/idr.h>
  45. #include <rdma/ib_pack.h>
  46. #include <rdma/ib_sa.h>
  47. MODULE_AUTHOR("Roland Dreier");
  48. MODULE_DESCRIPTION("InfiniBand subnet administration query support");
  49. MODULE_LICENSE("Dual BSD/GPL");
  50. struct ib_sa_sm_ah {
  51. struct ib_ah *ah;
  52. struct kref ref;
  53. };
  54. struct ib_sa_port {
  55. struct ib_mad_agent *agent;
  56. struct ib_sa_sm_ah *sm_ah;
  57. struct work_struct update_task;
  58. spinlock_t ah_lock;
  59. u8 port_num;
  60. };
  61. struct ib_sa_device {
  62. int start_port, end_port;
  63. struct ib_event_handler event_handler;
  64. struct ib_sa_port port[0];
  65. };
  66. struct ib_sa_query {
  67. void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
  68. void (*release)(struct ib_sa_query *);
  69. struct ib_sa_port *port;
  70. struct ib_sa_mad *mad;
  71. struct ib_sa_sm_ah *sm_ah;
  72. DECLARE_PCI_UNMAP_ADDR(mapping)
  73. int id;
  74. };
  75. struct ib_sa_service_query {
  76. void (*callback)(int, struct ib_sa_service_rec *, void *);
  77. void *context;
  78. struct ib_sa_query sa_query;
  79. };
  80. struct ib_sa_path_query {
  81. void (*callback)(int, struct ib_sa_path_rec *, void *);
  82. void *context;
  83. struct ib_sa_query sa_query;
  84. };
  85. struct ib_sa_mcmember_query {
  86. void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
  87. void *context;
  88. struct ib_sa_query sa_query;
  89. };
  90. static void ib_sa_add_one(struct ib_device *device);
  91. static void ib_sa_remove_one(struct ib_device *device);
  92. static struct ib_client sa_client = {
  93. .name = "sa",
  94. .add = ib_sa_add_one,
  95. .remove = ib_sa_remove_one
  96. };
  97. static spinlock_t idr_lock;
  98. static DEFINE_IDR(query_idr);
  99. static spinlock_t tid_lock;
  100. static u32 tid;
  101. enum {
  102. IB_SA_ATTR_CLASS_PORTINFO = 0x01,
  103. IB_SA_ATTR_NOTICE = 0x02,
  104. IB_SA_ATTR_INFORM_INFO = 0x03,
  105. IB_SA_ATTR_NODE_REC = 0x11,
  106. IB_SA_ATTR_PORT_INFO_REC = 0x12,
  107. IB_SA_ATTR_SL2VL_REC = 0x13,
  108. IB_SA_ATTR_SWITCH_REC = 0x14,
  109. IB_SA_ATTR_LINEAR_FDB_REC = 0x15,
  110. IB_SA_ATTR_RANDOM_FDB_REC = 0x16,
  111. IB_SA_ATTR_MCAST_FDB_REC = 0x17,
  112. IB_SA_ATTR_SM_INFO_REC = 0x18,
  113. IB_SA_ATTR_LINK_REC = 0x20,
  114. IB_SA_ATTR_GUID_INFO_REC = 0x30,
  115. IB_SA_ATTR_SERVICE_REC = 0x31,
  116. IB_SA_ATTR_PARTITION_REC = 0x33,
  117. IB_SA_ATTR_RANGE_REC = 0x34,
  118. IB_SA_ATTR_PATH_REC = 0x35,
  119. IB_SA_ATTR_VL_ARB_REC = 0x36,
  120. IB_SA_ATTR_MC_GROUP_REC = 0x37,
  121. IB_SA_ATTR_MC_MEMBER_REC = 0x38,
  122. IB_SA_ATTR_TRACE_REC = 0x39,
  123. IB_SA_ATTR_MULTI_PATH_REC = 0x3a,
  124. IB_SA_ATTR_SERVICE_ASSOC_REC = 0x3b
  125. };
  126. #define PATH_REC_FIELD(field) \
  127. .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
  128. .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
  129. .field_name = "sa_path_rec:" #field
  130. static const struct ib_field path_rec_table[] = {
  131. { RESERVED,
  132. .offset_words = 0,
  133. .offset_bits = 0,
  134. .size_bits = 32 },
  135. { RESERVED,
  136. .offset_words = 1,
  137. .offset_bits = 0,
  138. .size_bits = 32 },
  139. { PATH_REC_FIELD(dgid),
  140. .offset_words = 2,
  141. .offset_bits = 0,
  142. .size_bits = 128 },
  143. { PATH_REC_FIELD(sgid),
  144. .offset_words = 6,
  145. .offset_bits = 0,
  146. .size_bits = 128 },
  147. { PATH_REC_FIELD(dlid),
  148. .offset_words = 10,
  149. .offset_bits = 0,
  150. .size_bits = 16 },
  151. { PATH_REC_FIELD(slid),
  152. .offset_words = 10,
  153. .offset_bits = 16,
  154. .size_bits = 16 },
  155. { PATH_REC_FIELD(raw_traffic),
  156. .offset_words = 11,
  157. .offset_bits = 0,
  158. .size_bits = 1 },
  159. { RESERVED,
  160. .offset_words = 11,
  161. .offset_bits = 1,
  162. .size_bits = 3 },
  163. { PATH_REC_FIELD(flow_label),
  164. .offset_words = 11,
  165. .offset_bits = 4,
  166. .size_bits = 20 },
  167. { PATH_REC_FIELD(hop_limit),
  168. .offset_words = 11,
  169. .offset_bits = 24,
  170. .size_bits = 8 },
  171. { PATH_REC_FIELD(traffic_class),
  172. .offset_words = 12,
  173. .offset_bits = 0,
  174. .size_bits = 8 },
  175. { PATH_REC_FIELD(reversible),
  176. .offset_words = 12,
  177. .offset_bits = 8,
  178. .size_bits = 1 },
  179. { PATH_REC_FIELD(numb_path),
  180. .offset_words = 12,
  181. .offset_bits = 9,
  182. .size_bits = 7 },
  183. { PATH_REC_FIELD(pkey),
  184. .offset_words = 12,
  185. .offset_bits = 16,
  186. .size_bits = 16 },
  187. { RESERVED,
  188. .offset_words = 13,
  189. .offset_bits = 0,
  190. .size_bits = 12 },
  191. { PATH_REC_FIELD(sl),
  192. .offset_words = 13,
  193. .offset_bits = 12,
  194. .size_bits = 4 },
  195. { PATH_REC_FIELD(mtu_selector),
  196. .offset_words = 13,
  197. .offset_bits = 16,
  198. .size_bits = 2 },
  199. { PATH_REC_FIELD(mtu),
  200. .offset_words = 13,
  201. .offset_bits = 18,
  202. .size_bits = 6 },
  203. { PATH_REC_FIELD(rate_selector),
  204. .offset_words = 13,
  205. .offset_bits = 24,
  206. .size_bits = 2 },
  207. { PATH_REC_FIELD(rate),
  208. .offset_words = 13,
  209. .offset_bits = 26,
  210. .size_bits = 6 },
  211. { PATH_REC_FIELD(packet_life_time_selector),
  212. .offset_words = 14,
  213. .offset_bits = 0,
  214. .size_bits = 2 },
  215. { PATH_REC_FIELD(packet_life_time),
  216. .offset_words = 14,
  217. .offset_bits = 2,
  218. .size_bits = 6 },
  219. { PATH_REC_FIELD(preference),
  220. .offset_words = 14,
  221. .offset_bits = 8,
  222. .size_bits = 8 },
  223. { RESERVED,
  224. .offset_words = 14,
  225. .offset_bits = 16,
  226. .size_bits = 48 },
  227. };
  228. #define MCMEMBER_REC_FIELD(field) \
  229. .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
  230. .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
  231. .field_name = "sa_mcmember_rec:" #field
  232. static const struct ib_field mcmember_rec_table[] = {
  233. { MCMEMBER_REC_FIELD(mgid),
  234. .offset_words = 0,
  235. .offset_bits = 0,
  236. .size_bits = 128 },
  237. { MCMEMBER_REC_FIELD(port_gid),
  238. .offset_words = 4,
  239. .offset_bits = 0,
  240. .size_bits = 128 },
  241. { MCMEMBER_REC_FIELD(qkey),
  242. .offset_words = 8,
  243. .offset_bits = 0,
  244. .size_bits = 32 },
  245. { MCMEMBER_REC_FIELD(mlid),
  246. .offset_words = 9,
  247. .offset_bits = 0,
  248. .size_bits = 16 },
  249. { MCMEMBER_REC_FIELD(mtu_selector),
  250. .offset_words = 9,
  251. .offset_bits = 16,
  252. .size_bits = 2 },
  253. { MCMEMBER_REC_FIELD(mtu),
  254. .offset_words = 9,
  255. .offset_bits = 18,
  256. .size_bits = 6 },
  257. { MCMEMBER_REC_FIELD(traffic_class),
  258. .offset_words = 9,
  259. .offset_bits = 24,
  260. .size_bits = 8 },
  261. { MCMEMBER_REC_FIELD(pkey),
  262. .offset_words = 10,
  263. .offset_bits = 0,
  264. .size_bits = 16 },
  265. { MCMEMBER_REC_FIELD(rate_selector),
  266. .offset_words = 10,
  267. .offset_bits = 16,
  268. .size_bits = 2 },
  269. { MCMEMBER_REC_FIELD(rate),
  270. .offset_words = 10,
  271. .offset_bits = 18,
  272. .size_bits = 6 },
  273. { MCMEMBER_REC_FIELD(packet_life_time_selector),
  274. .offset_words = 10,
  275. .offset_bits = 24,
  276. .size_bits = 2 },
  277. { MCMEMBER_REC_FIELD(packet_life_time),
  278. .offset_words = 10,
  279. .offset_bits = 26,
  280. .size_bits = 6 },
  281. { MCMEMBER_REC_FIELD(sl),
  282. .offset_words = 11,
  283. .offset_bits = 0,
  284. .size_bits = 4 },
  285. { MCMEMBER_REC_FIELD(flow_label),
  286. .offset_words = 11,
  287. .offset_bits = 4,
  288. .size_bits = 20 },
  289. { MCMEMBER_REC_FIELD(hop_limit),
  290. .offset_words = 11,
  291. .offset_bits = 24,
  292. .size_bits = 8 },
  293. { MCMEMBER_REC_FIELD(scope),
  294. .offset_words = 12,
  295. .offset_bits = 0,
  296. .size_bits = 4 },
  297. { MCMEMBER_REC_FIELD(join_state),
  298. .offset_words = 12,
  299. .offset_bits = 4,
  300. .size_bits = 4 },
  301. { MCMEMBER_REC_FIELD(proxy_join),
  302. .offset_words = 12,
  303. .offset_bits = 8,
  304. .size_bits = 1 },
  305. { RESERVED,
  306. .offset_words = 12,
  307. .offset_bits = 9,
  308. .size_bits = 23 },
  309. };
  310. #define SERVICE_REC_FIELD(field) \
  311. .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
  312. .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
  313. .field_name = "sa_service_rec:" #field
  314. static const struct ib_field service_rec_table[] = {
  315. { SERVICE_REC_FIELD(id),
  316. .offset_words = 0,
  317. .offset_bits = 0,
  318. .size_bits = 64 },
  319. { SERVICE_REC_FIELD(gid),
  320. .offset_words = 2,
  321. .offset_bits = 0,
  322. .size_bits = 128 },
  323. { SERVICE_REC_FIELD(pkey),
  324. .offset_words = 6,
  325. .offset_bits = 0,
  326. .size_bits = 16 },
  327. { SERVICE_REC_FIELD(lease),
  328. .offset_words = 7,
  329. .offset_bits = 0,
  330. .size_bits = 32 },
  331. { SERVICE_REC_FIELD(key),
  332. .offset_words = 8,
  333. .offset_bits = 0,
  334. .size_bits = 128 },
  335. { SERVICE_REC_FIELD(name),
  336. .offset_words = 12,
  337. .offset_bits = 0,
  338. .size_bits = 64*8 },
  339. { SERVICE_REC_FIELD(data8),
  340. .offset_words = 28,
  341. .offset_bits = 0,
  342. .size_bits = 16*8 },
  343. { SERVICE_REC_FIELD(data16),
  344. .offset_words = 32,
  345. .offset_bits = 0,
  346. .size_bits = 8*16 },
  347. { SERVICE_REC_FIELD(data32),
  348. .offset_words = 36,
  349. .offset_bits = 0,
  350. .size_bits = 4*32 },
  351. { SERVICE_REC_FIELD(data64),
  352. .offset_words = 40,
  353. .offset_bits = 0,
  354. .size_bits = 2*64 },
  355. };
  356. static void free_sm_ah(struct kref *kref)
  357. {
  358. struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
  359. ib_destroy_ah(sm_ah->ah);
  360. kfree(sm_ah);
  361. }
  362. static void update_sm_ah(void *port_ptr)
  363. {
  364. struct ib_sa_port *port = port_ptr;
  365. struct ib_sa_sm_ah *new_ah, *old_ah;
  366. struct ib_port_attr port_attr;
  367. struct ib_ah_attr ah_attr;
  368. if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
  369. printk(KERN_WARNING "Couldn't query port\n");
  370. return;
  371. }
  372. new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
  373. if (!new_ah) {
  374. printk(KERN_WARNING "Couldn't allocate new SM AH\n");
  375. return;
  376. }
  377. kref_init(&new_ah->ref);
  378. memset(&ah_attr, 0, sizeof ah_attr);
  379. ah_attr.dlid = port_attr.sm_lid;
  380. ah_attr.sl = port_attr.sm_sl;
  381. ah_attr.port_num = port->port_num;
  382. new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
  383. if (IS_ERR(new_ah->ah)) {
  384. printk(KERN_WARNING "Couldn't create new SM AH\n");
  385. kfree(new_ah);
  386. return;
  387. }
  388. spin_lock_irq(&port->ah_lock);
  389. old_ah = port->sm_ah;
  390. port->sm_ah = new_ah;
  391. spin_unlock_irq(&port->ah_lock);
  392. if (old_ah)
  393. kref_put(&old_ah->ref, free_sm_ah);
  394. }
  395. static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
  396. {
  397. if (event->event == IB_EVENT_PORT_ERR ||
  398. event->event == IB_EVENT_PORT_ACTIVE ||
  399. event->event == IB_EVENT_LID_CHANGE ||
  400. event->event == IB_EVENT_PKEY_CHANGE ||
  401. event->event == IB_EVENT_SM_CHANGE) {
  402. struct ib_sa_device *sa_dev =
  403. ib_get_client_data(event->device, &sa_client);
  404. schedule_work(&sa_dev->port[event->element.port_num -
  405. sa_dev->start_port].update_task);
  406. }
  407. }
  408. /**
  409. * ib_sa_cancel_query - try to cancel an SA query
  410. * @id:ID of query to cancel
  411. * @query:query pointer to cancel
  412. *
  413. * Try to cancel an SA query. If the id and query don't match up or
  414. * the query has already completed, nothing is done. Otherwise the
  415. * query is canceled and will complete with a status of -EINTR.
  416. */
  417. void ib_sa_cancel_query(int id, struct ib_sa_query *query)
  418. {
  419. unsigned long flags;
  420. struct ib_mad_agent *agent;
  421. spin_lock_irqsave(&idr_lock, flags);
  422. if (idr_find(&query_idr, id) != query) {
  423. spin_unlock_irqrestore(&idr_lock, flags);
  424. return;
  425. }
  426. agent = query->port->agent;
  427. spin_unlock_irqrestore(&idr_lock, flags);
  428. ib_cancel_mad(agent, id);
  429. }
  430. EXPORT_SYMBOL(ib_sa_cancel_query);
  431. static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
  432. {
  433. unsigned long flags;
  434. memset(mad, 0, sizeof *mad);
  435. mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
  436. mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
  437. mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
  438. spin_lock_irqsave(&tid_lock, flags);
  439. mad->mad_hdr.tid =
  440. cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
  441. spin_unlock_irqrestore(&tid_lock, flags);
  442. }
  443. static int send_mad(struct ib_sa_query *query, int timeout_ms)
  444. {
  445. struct ib_sa_port *port = query->port;
  446. unsigned long flags;
  447. int ret;
  448. struct ib_sge gather_list;
  449. struct ib_send_wr *bad_wr, wr = {
  450. .opcode = IB_WR_SEND,
  451. .sg_list = &gather_list,
  452. .num_sge = 1,
  453. .send_flags = IB_SEND_SIGNALED,
  454. .wr = {
  455. .ud = {
  456. .mad_hdr = &query->mad->mad_hdr,
  457. .remote_qpn = 1,
  458. .remote_qkey = IB_QP1_QKEY,
  459. .timeout_ms = timeout_ms,
  460. }
  461. }
  462. };
  463. retry:
  464. if (!idr_pre_get(&query_idr, GFP_ATOMIC))
  465. return -ENOMEM;
  466. spin_lock_irqsave(&idr_lock, flags);
  467. ret = idr_get_new(&query_idr, query, &query->id);
  468. spin_unlock_irqrestore(&idr_lock, flags);
  469. if (ret == -EAGAIN)
  470. goto retry;
  471. if (ret)
  472. return ret;
  473. wr.wr_id = query->id;
  474. spin_lock_irqsave(&port->ah_lock, flags);
  475. kref_get(&port->sm_ah->ref);
  476. query->sm_ah = port->sm_ah;
  477. wr.wr.ud.ah = port->sm_ah->ah;
  478. spin_unlock_irqrestore(&port->ah_lock, flags);
  479. gather_list.addr = dma_map_single(port->agent->device->dma_device,
  480. query->mad,
  481. sizeof (struct ib_sa_mad),
  482. DMA_TO_DEVICE);
  483. gather_list.length = sizeof (struct ib_sa_mad);
  484. gather_list.lkey = port->agent->mr->lkey;
  485. pci_unmap_addr_set(query, mapping, gather_list.addr);
  486. ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
  487. if (ret) {
  488. dma_unmap_single(port->agent->device->dma_device,
  489. pci_unmap_addr(query, mapping),
  490. sizeof (struct ib_sa_mad),
  491. DMA_TO_DEVICE);
  492. kref_put(&query->sm_ah->ref, free_sm_ah);
  493. spin_lock_irqsave(&idr_lock, flags);
  494. idr_remove(&query_idr, query->id);
  495. spin_unlock_irqrestore(&idr_lock, flags);
  496. }
  497. /*
  498. * It's not safe to dereference query any more, because the
  499. * send may already have completed and freed the query in
  500. * another context. So use wr.wr_id, which has a copy of the
  501. * query's id.
  502. */
  503. return ret ? ret : wr.wr_id;
  504. }
  505. static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
  506. int status,
  507. struct ib_sa_mad *mad)
  508. {
  509. struct ib_sa_path_query *query =
  510. container_of(sa_query, struct ib_sa_path_query, sa_query);
  511. if (mad) {
  512. struct ib_sa_path_rec rec;
  513. ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
  514. mad->data, &rec);
  515. query->callback(status, &rec, query->context);
  516. } else
  517. query->callback(status, NULL, query->context);
  518. }
  519. static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
  520. {
  521. kfree(sa_query->mad);
  522. kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
  523. }
  524. /**
  525. * ib_sa_path_rec_get - Start a Path get query
  526. * @device:device to send query on
  527. * @port_num: port number to send query on
  528. * @rec:Path Record to send in query
  529. * @comp_mask:component mask to send in query
  530. * @timeout_ms:time to wait for response
  531. * @gfp_mask:GFP mask to use for internal allocations
  532. * @callback:function called when query completes, times out or is
  533. * canceled
  534. * @context:opaque user context passed to callback
  535. * @sa_query:query context, used to cancel query
  536. *
  537. * Send a Path Record Get query to the SA to look up a path. The
  538. * callback function will be called when the query completes (or
  539. * fails); status is 0 for a successful response, -EINTR if the query
  540. * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
  541. * occurred sending the query. The resp parameter of the callback is
  542. * only valid if status is 0.
  543. *
  544. * If the return value of ib_sa_path_rec_get() is negative, it is an
  545. * error code. Otherwise it is a query ID that can be used to cancel
  546. * the query.
  547. */
  548. int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
  549. struct ib_sa_path_rec *rec,
  550. ib_sa_comp_mask comp_mask,
  551. int timeout_ms, unsigned int __nocast gfp_mask,
  552. void (*callback)(int status,
  553. struct ib_sa_path_rec *resp,
  554. void *context),
  555. void *context,
  556. struct ib_sa_query **sa_query)
  557. {
  558. struct ib_sa_path_query *query;
  559. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  560. struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
  561. struct ib_mad_agent *agent = port->agent;
  562. int ret;
  563. query = kmalloc(sizeof *query, gfp_mask);
  564. if (!query)
  565. return -ENOMEM;
  566. query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
  567. if (!query->sa_query.mad) {
  568. kfree(query);
  569. return -ENOMEM;
  570. }
  571. query->callback = callback;
  572. query->context = context;
  573. init_mad(query->sa_query.mad, agent);
  574. query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
  575. query->sa_query.release = ib_sa_path_rec_release;
  576. query->sa_query.port = port;
  577. query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET;
  578. query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
  579. query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
  580. ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table),
  581. rec, query->sa_query.mad->data);
  582. *sa_query = &query->sa_query;
  583. ret = send_mad(&query->sa_query, timeout_ms);
  584. if (ret < 0) {
  585. *sa_query = NULL;
  586. kfree(query->sa_query.mad);
  587. kfree(query);
  588. }
  589. return ret;
  590. }
  591. EXPORT_SYMBOL(ib_sa_path_rec_get);
  592. static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
  593. int status,
  594. struct ib_sa_mad *mad)
  595. {
  596. struct ib_sa_service_query *query =
  597. container_of(sa_query, struct ib_sa_service_query, sa_query);
  598. if (mad) {
  599. struct ib_sa_service_rec rec;
  600. ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
  601. mad->data, &rec);
  602. query->callback(status, &rec, query->context);
  603. } else
  604. query->callback(status, NULL, query->context);
  605. }
  606. static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
  607. {
  608. kfree(sa_query->mad);
  609. kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
  610. }
  611. /**
  612. * ib_sa_service_rec_query - Start Service Record operation
  613. * @device:device to send request on
  614. * @port_num: port number to send request on
  615. * @method:SA method - should be get, set, or delete
  616. * @rec:Service Record to send in request
  617. * @comp_mask:component mask to send in request
  618. * @timeout_ms:time to wait for response
  619. * @gfp_mask:GFP mask to use for internal allocations
  620. * @callback:function called when request completes, times out or is
  621. * canceled
  622. * @context:opaque user context passed to callback
  623. * @sa_query:request context, used to cancel request
  624. *
  625. * Send a Service Record set/get/delete to the SA to register,
  626. * unregister or query a service record.
  627. * The callback function will be called when the request completes (or
  628. * fails); status is 0 for a successful response, -EINTR if the query
  629. * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
  630. * occurred sending the query. The resp parameter of the callback is
  631. * only valid if status is 0.
  632. *
  633. * If the return value of ib_sa_service_rec_query() is negative, it is an
  634. * error code. Otherwise it is a request ID that can be used to cancel
  635. * the query.
  636. */
  637. int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
  638. struct ib_sa_service_rec *rec,
  639. ib_sa_comp_mask comp_mask,
  640. int timeout_ms, unsigned int __nocast gfp_mask,
  641. void (*callback)(int status,
  642. struct ib_sa_service_rec *resp,
  643. void *context),
  644. void *context,
  645. struct ib_sa_query **sa_query)
  646. {
  647. struct ib_sa_service_query *query;
  648. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  649. struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
  650. struct ib_mad_agent *agent = port->agent;
  651. int ret;
  652. if (method != IB_MGMT_METHOD_GET &&
  653. method != IB_MGMT_METHOD_SET &&
  654. method != IB_SA_METHOD_DELETE)
  655. return -EINVAL;
  656. query = kmalloc(sizeof *query, gfp_mask);
  657. if (!query)
  658. return -ENOMEM;
  659. query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
  660. if (!query->sa_query.mad) {
  661. kfree(query);
  662. return -ENOMEM;
  663. }
  664. query->callback = callback;
  665. query->context = context;
  666. init_mad(query->sa_query.mad, agent);
  667. query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
  668. query->sa_query.release = ib_sa_service_rec_release;
  669. query->sa_query.port = port;
  670. query->sa_query.mad->mad_hdr.method = method;
  671. query->sa_query.mad->mad_hdr.attr_id =
  672. cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
  673. query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
  674. ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
  675. rec, query->sa_query.mad->data);
  676. *sa_query = &query->sa_query;
  677. ret = send_mad(&query->sa_query, timeout_ms);
  678. if (ret < 0) {
  679. *sa_query = NULL;
  680. kfree(query->sa_query.mad);
  681. kfree(query);
  682. }
  683. return ret;
  684. }
  685. EXPORT_SYMBOL(ib_sa_service_rec_query);
  686. static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
  687. int status,
  688. struct ib_sa_mad *mad)
  689. {
  690. struct ib_sa_mcmember_query *query =
  691. container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
  692. if (mad) {
  693. struct ib_sa_mcmember_rec rec;
  694. ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
  695. mad->data, &rec);
  696. query->callback(status, &rec, query->context);
  697. } else
  698. query->callback(status, NULL, query->context);
  699. }
  700. static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
  701. {
  702. kfree(sa_query->mad);
  703. kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
  704. }
  705. int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
  706. u8 method,
  707. struct ib_sa_mcmember_rec *rec,
  708. ib_sa_comp_mask comp_mask,
  709. int timeout_ms, unsigned int __nocast gfp_mask,
  710. void (*callback)(int status,
  711. struct ib_sa_mcmember_rec *resp,
  712. void *context),
  713. void *context,
  714. struct ib_sa_query **sa_query)
  715. {
  716. struct ib_sa_mcmember_query *query;
  717. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  718. struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
  719. struct ib_mad_agent *agent = port->agent;
  720. int ret;
  721. query = kmalloc(sizeof *query, gfp_mask);
  722. if (!query)
  723. return -ENOMEM;
  724. query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
  725. if (!query->sa_query.mad) {
  726. kfree(query);
  727. return -ENOMEM;
  728. }
  729. query->callback = callback;
  730. query->context = context;
  731. init_mad(query->sa_query.mad, agent);
  732. query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
  733. query->sa_query.release = ib_sa_mcmember_rec_release;
  734. query->sa_query.port = port;
  735. query->sa_query.mad->mad_hdr.method = method;
  736. query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
  737. query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
  738. ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
  739. rec, query->sa_query.mad->data);
  740. *sa_query = &query->sa_query;
  741. ret = send_mad(&query->sa_query, timeout_ms);
  742. if (ret < 0) {
  743. *sa_query = NULL;
  744. kfree(query->sa_query.mad);
  745. kfree(query);
  746. }
  747. return ret;
  748. }
  749. EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
  750. static void send_handler(struct ib_mad_agent *agent,
  751. struct ib_mad_send_wc *mad_send_wc)
  752. {
  753. struct ib_sa_query *query;
  754. unsigned long flags;
  755. spin_lock_irqsave(&idr_lock, flags);
  756. query = idr_find(&query_idr, mad_send_wc->wr_id);
  757. spin_unlock_irqrestore(&idr_lock, flags);
  758. if (!query)
  759. return;
  760. if (query->callback)
  761. switch (mad_send_wc->status) {
  762. case IB_WC_SUCCESS:
  763. /* No callback -- already got recv */
  764. break;
  765. case IB_WC_RESP_TIMEOUT_ERR:
  766. query->callback(query, -ETIMEDOUT, NULL);
  767. break;
  768. case IB_WC_WR_FLUSH_ERR:
  769. query->callback(query, -EINTR, NULL);
  770. break;
  771. default:
  772. query->callback(query, -EIO, NULL);
  773. break;
  774. }
  775. dma_unmap_single(agent->device->dma_device,
  776. pci_unmap_addr(query, mapping),
  777. sizeof (struct ib_sa_mad),
  778. DMA_TO_DEVICE);
  779. kref_put(&query->sm_ah->ref, free_sm_ah);
  780. query->release(query);
  781. spin_lock_irqsave(&idr_lock, flags);
  782. idr_remove(&query_idr, mad_send_wc->wr_id);
  783. spin_unlock_irqrestore(&idr_lock, flags);
  784. }
  785. static void recv_handler(struct ib_mad_agent *mad_agent,
  786. struct ib_mad_recv_wc *mad_recv_wc)
  787. {
  788. struct ib_sa_query *query;
  789. unsigned long flags;
  790. spin_lock_irqsave(&idr_lock, flags);
  791. query = idr_find(&query_idr, mad_recv_wc->wc->wr_id);
  792. spin_unlock_irqrestore(&idr_lock, flags);
  793. if (query && query->callback) {
  794. if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
  795. query->callback(query,
  796. mad_recv_wc->recv_buf.mad->mad_hdr.status ?
  797. -EINVAL : 0,
  798. (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
  799. else
  800. query->callback(query, -EIO, NULL);
  801. }
  802. ib_free_recv_mad(mad_recv_wc);
  803. }
  804. static void ib_sa_add_one(struct ib_device *device)
  805. {
  806. struct ib_sa_device *sa_dev;
  807. int s, e, i;
  808. if (device->node_type == IB_NODE_SWITCH)
  809. s = e = 0;
  810. else {
  811. s = 1;
  812. e = device->phys_port_cnt;
  813. }
  814. sa_dev = kmalloc(sizeof *sa_dev +
  815. (e - s + 1) * sizeof (struct ib_sa_port),
  816. GFP_KERNEL);
  817. if (!sa_dev)
  818. return;
  819. sa_dev->start_port = s;
  820. sa_dev->end_port = e;
  821. for (i = 0; i <= e - s; ++i) {
  822. sa_dev->port[i].sm_ah = NULL;
  823. sa_dev->port[i].port_num = i + s;
  824. spin_lock_init(&sa_dev->port[i].ah_lock);
  825. sa_dev->port[i].agent =
  826. ib_register_mad_agent(device, i + s, IB_QPT_GSI,
  827. NULL, 0, send_handler,
  828. recv_handler, sa_dev);
  829. if (IS_ERR(sa_dev->port[i].agent))
  830. goto err;
  831. INIT_WORK(&sa_dev->port[i].update_task,
  832. update_sm_ah, &sa_dev->port[i]);
  833. }
  834. ib_set_client_data(device, &sa_client, sa_dev);
  835. /*
  836. * We register our event handler after everything is set up,
  837. * and then update our cached info after the event handler is
  838. * registered to avoid any problems if a port changes state
  839. * during our initialization.
  840. */
  841. INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
  842. if (ib_register_event_handler(&sa_dev->event_handler))
  843. goto err;
  844. for (i = 0; i <= e - s; ++i)
  845. update_sm_ah(&sa_dev->port[i]);
  846. return;
  847. err:
  848. while (--i >= 0)
  849. ib_unregister_mad_agent(sa_dev->port[i].agent);
  850. kfree(sa_dev);
  851. return;
  852. }
  853. static void ib_sa_remove_one(struct ib_device *device)
  854. {
  855. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  856. int i;
  857. if (!sa_dev)
  858. return;
  859. ib_unregister_event_handler(&sa_dev->event_handler);
  860. for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
  861. ib_unregister_mad_agent(sa_dev->port[i].agent);
  862. kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
  863. }
  864. kfree(sa_dev);
  865. }
  866. static int __init ib_sa_init(void)
  867. {
  868. int ret;
  869. spin_lock_init(&idr_lock);
  870. spin_lock_init(&tid_lock);
  871. get_random_bytes(&tid, sizeof tid);
  872. ret = ib_register_client(&sa_client);
  873. if (ret)
  874. printk(KERN_ERR "Couldn't register ib_sa client\n");
  875. return ret;
  876. }
  877. static void __exit ib_sa_cleanup(void)
  878. {
  879. ib_unregister_client(&sa_client);
  880. }
  881. module_init(ib_sa_init);
  882. module_exit(ib_sa_cleanup);