sa_query.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
  4. * Copyright (c) 2006 Intel Corporation. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
  35. */
  36. #include <linux/module.h>
  37. #include <linux/init.h>
  38. #include <linux/err.h>
  39. #include <linux/random.h>
  40. #include <linux/spinlock.h>
  41. #include <linux/slab.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/kref.h>
  44. #include <linux/idr.h>
  45. #include <linux/workqueue.h>
  46. #include <rdma/ib_pack.h>
  47. #include <rdma/ib_cache.h>
  48. #include "sa.h"
  49. MODULE_AUTHOR("Roland Dreier");
  50. MODULE_DESCRIPTION("InfiniBand subnet administration query support");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. struct ib_sa_sm_ah {
  53. struct ib_ah *ah;
  54. struct kref ref;
  55. u16 pkey_index;
  56. u8 src_path_mask;
  57. };
  58. struct ib_sa_port {
  59. struct ib_mad_agent *agent;
  60. struct ib_sa_sm_ah *sm_ah;
  61. struct work_struct update_task;
  62. spinlock_t ah_lock;
  63. u8 port_num;
  64. };
  65. struct ib_sa_device {
  66. int start_port, end_port;
  67. struct ib_event_handler event_handler;
  68. struct ib_sa_port port[0];
  69. };
  70. struct ib_sa_query {
  71. void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
  72. void (*release)(struct ib_sa_query *);
  73. struct ib_sa_client *client;
  74. struct ib_sa_port *port;
  75. struct ib_mad_send_buf *mad_buf;
  76. struct ib_sa_sm_ah *sm_ah;
  77. int id;
  78. };
  79. struct ib_sa_service_query {
  80. void (*callback)(int, struct ib_sa_service_rec *, void *);
  81. void *context;
  82. struct ib_sa_query sa_query;
  83. };
  84. struct ib_sa_path_query {
  85. void (*callback)(int, struct ib_sa_path_rec *, void *);
  86. void *context;
  87. struct ib_sa_query sa_query;
  88. };
  89. struct ib_sa_mcmember_query {
  90. void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
  91. void *context;
  92. struct ib_sa_query sa_query;
  93. };
  94. static void ib_sa_add_one(struct ib_device *device);
  95. static void ib_sa_remove_one(struct ib_device *device);
  96. static struct ib_client sa_client = {
  97. .name = "sa",
  98. .add = ib_sa_add_one,
  99. .remove = ib_sa_remove_one
  100. };
  101. static spinlock_t idr_lock;
  102. static DEFINE_IDR(query_idr);
  103. static spinlock_t tid_lock;
  104. static u32 tid;
  105. #define PATH_REC_FIELD(field) \
  106. .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
  107. .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
  108. .field_name = "sa_path_rec:" #field
  109. static const struct ib_field path_rec_table[] = {
  110. { PATH_REC_FIELD(service_id),
  111. .offset_words = 0,
  112. .offset_bits = 0,
  113. .size_bits = 64 },
  114. { PATH_REC_FIELD(dgid),
  115. .offset_words = 2,
  116. .offset_bits = 0,
  117. .size_bits = 128 },
  118. { PATH_REC_FIELD(sgid),
  119. .offset_words = 6,
  120. .offset_bits = 0,
  121. .size_bits = 128 },
  122. { PATH_REC_FIELD(dlid),
  123. .offset_words = 10,
  124. .offset_bits = 0,
  125. .size_bits = 16 },
  126. { PATH_REC_FIELD(slid),
  127. .offset_words = 10,
  128. .offset_bits = 16,
  129. .size_bits = 16 },
  130. { PATH_REC_FIELD(raw_traffic),
  131. .offset_words = 11,
  132. .offset_bits = 0,
  133. .size_bits = 1 },
  134. { RESERVED,
  135. .offset_words = 11,
  136. .offset_bits = 1,
  137. .size_bits = 3 },
  138. { PATH_REC_FIELD(flow_label),
  139. .offset_words = 11,
  140. .offset_bits = 4,
  141. .size_bits = 20 },
  142. { PATH_REC_FIELD(hop_limit),
  143. .offset_words = 11,
  144. .offset_bits = 24,
  145. .size_bits = 8 },
  146. { PATH_REC_FIELD(traffic_class),
  147. .offset_words = 12,
  148. .offset_bits = 0,
  149. .size_bits = 8 },
  150. { PATH_REC_FIELD(reversible),
  151. .offset_words = 12,
  152. .offset_bits = 8,
  153. .size_bits = 1 },
  154. { PATH_REC_FIELD(numb_path),
  155. .offset_words = 12,
  156. .offset_bits = 9,
  157. .size_bits = 7 },
  158. { PATH_REC_FIELD(pkey),
  159. .offset_words = 12,
  160. .offset_bits = 16,
  161. .size_bits = 16 },
  162. { PATH_REC_FIELD(qos_class),
  163. .offset_words = 13,
  164. .offset_bits = 0,
  165. .size_bits = 12 },
  166. { PATH_REC_FIELD(sl),
  167. .offset_words = 13,
  168. .offset_bits = 12,
  169. .size_bits = 4 },
  170. { PATH_REC_FIELD(mtu_selector),
  171. .offset_words = 13,
  172. .offset_bits = 16,
  173. .size_bits = 2 },
  174. { PATH_REC_FIELD(mtu),
  175. .offset_words = 13,
  176. .offset_bits = 18,
  177. .size_bits = 6 },
  178. { PATH_REC_FIELD(rate_selector),
  179. .offset_words = 13,
  180. .offset_bits = 24,
  181. .size_bits = 2 },
  182. { PATH_REC_FIELD(rate),
  183. .offset_words = 13,
  184. .offset_bits = 26,
  185. .size_bits = 6 },
  186. { PATH_REC_FIELD(packet_life_time_selector),
  187. .offset_words = 14,
  188. .offset_bits = 0,
  189. .size_bits = 2 },
  190. { PATH_REC_FIELD(packet_life_time),
  191. .offset_words = 14,
  192. .offset_bits = 2,
  193. .size_bits = 6 },
  194. { PATH_REC_FIELD(preference),
  195. .offset_words = 14,
  196. .offset_bits = 8,
  197. .size_bits = 8 },
  198. { RESERVED,
  199. .offset_words = 14,
  200. .offset_bits = 16,
  201. .size_bits = 48 },
  202. };
  203. #define MCMEMBER_REC_FIELD(field) \
  204. .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
  205. .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
  206. .field_name = "sa_mcmember_rec:" #field
  207. static const struct ib_field mcmember_rec_table[] = {
  208. { MCMEMBER_REC_FIELD(mgid),
  209. .offset_words = 0,
  210. .offset_bits = 0,
  211. .size_bits = 128 },
  212. { MCMEMBER_REC_FIELD(port_gid),
  213. .offset_words = 4,
  214. .offset_bits = 0,
  215. .size_bits = 128 },
  216. { MCMEMBER_REC_FIELD(qkey),
  217. .offset_words = 8,
  218. .offset_bits = 0,
  219. .size_bits = 32 },
  220. { MCMEMBER_REC_FIELD(mlid),
  221. .offset_words = 9,
  222. .offset_bits = 0,
  223. .size_bits = 16 },
  224. { MCMEMBER_REC_FIELD(mtu_selector),
  225. .offset_words = 9,
  226. .offset_bits = 16,
  227. .size_bits = 2 },
  228. { MCMEMBER_REC_FIELD(mtu),
  229. .offset_words = 9,
  230. .offset_bits = 18,
  231. .size_bits = 6 },
  232. { MCMEMBER_REC_FIELD(traffic_class),
  233. .offset_words = 9,
  234. .offset_bits = 24,
  235. .size_bits = 8 },
  236. { MCMEMBER_REC_FIELD(pkey),
  237. .offset_words = 10,
  238. .offset_bits = 0,
  239. .size_bits = 16 },
  240. { MCMEMBER_REC_FIELD(rate_selector),
  241. .offset_words = 10,
  242. .offset_bits = 16,
  243. .size_bits = 2 },
  244. { MCMEMBER_REC_FIELD(rate),
  245. .offset_words = 10,
  246. .offset_bits = 18,
  247. .size_bits = 6 },
  248. { MCMEMBER_REC_FIELD(packet_life_time_selector),
  249. .offset_words = 10,
  250. .offset_bits = 24,
  251. .size_bits = 2 },
  252. { MCMEMBER_REC_FIELD(packet_life_time),
  253. .offset_words = 10,
  254. .offset_bits = 26,
  255. .size_bits = 6 },
  256. { MCMEMBER_REC_FIELD(sl),
  257. .offset_words = 11,
  258. .offset_bits = 0,
  259. .size_bits = 4 },
  260. { MCMEMBER_REC_FIELD(flow_label),
  261. .offset_words = 11,
  262. .offset_bits = 4,
  263. .size_bits = 20 },
  264. { MCMEMBER_REC_FIELD(hop_limit),
  265. .offset_words = 11,
  266. .offset_bits = 24,
  267. .size_bits = 8 },
  268. { MCMEMBER_REC_FIELD(scope),
  269. .offset_words = 12,
  270. .offset_bits = 0,
  271. .size_bits = 4 },
  272. { MCMEMBER_REC_FIELD(join_state),
  273. .offset_words = 12,
  274. .offset_bits = 4,
  275. .size_bits = 4 },
  276. { MCMEMBER_REC_FIELD(proxy_join),
  277. .offset_words = 12,
  278. .offset_bits = 8,
  279. .size_bits = 1 },
  280. { RESERVED,
  281. .offset_words = 12,
  282. .offset_bits = 9,
  283. .size_bits = 23 },
  284. };
  285. #define SERVICE_REC_FIELD(field) \
  286. .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
  287. .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
  288. .field_name = "sa_service_rec:" #field
  289. static const struct ib_field service_rec_table[] = {
  290. { SERVICE_REC_FIELD(id),
  291. .offset_words = 0,
  292. .offset_bits = 0,
  293. .size_bits = 64 },
  294. { SERVICE_REC_FIELD(gid),
  295. .offset_words = 2,
  296. .offset_bits = 0,
  297. .size_bits = 128 },
  298. { SERVICE_REC_FIELD(pkey),
  299. .offset_words = 6,
  300. .offset_bits = 0,
  301. .size_bits = 16 },
  302. { SERVICE_REC_FIELD(lease),
  303. .offset_words = 7,
  304. .offset_bits = 0,
  305. .size_bits = 32 },
  306. { SERVICE_REC_FIELD(key),
  307. .offset_words = 8,
  308. .offset_bits = 0,
  309. .size_bits = 128 },
  310. { SERVICE_REC_FIELD(name),
  311. .offset_words = 12,
  312. .offset_bits = 0,
  313. .size_bits = 64*8 },
  314. { SERVICE_REC_FIELD(data8),
  315. .offset_words = 28,
  316. .offset_bits = 0,
  317. .size_bits = 16*8 },
  318. { SERVICE_REC_FIELD(data16),
  319. .offset_words = 32,
  320. .offset_bits = 0,
  321. .size_bits = 8*16 },
  322. { SERVICE_REC_FIELD(data32),
  323. .offset_words = 36,
  324. .offset_bits = 0,
  325. .size_bits = 4*32 },
  326. { SERVICE_REC_FIELD(data64),
  327. .offset_words = 40,
  328. .offset_bits = 0,
  329. .size_bits = 2*64 },
  330. };
  331. static void free_sm_ah(struct kref *kref)
  332. {
  333. struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
  334. ib_destroy_ah(sm_ah->ah);
  335. kfree(sm_ah);
  336. }
  337. static void update_sm_ah(struct work_struct *work)
  338. {
  339. struct ib_sa_port *port =
  340. container_of(work, struct ib_sa_port, update_task);
  341. struct ib_sa_sm_ah *new_ah, *old_ah;
  342. struct ib_port_attr port_attr;
  343. struct ib_ah_attr ah_attr;
  344. if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
  345. printk(KERN_WARNING "Couldn't query port\n");
  346. return;
  347. }
  348. new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
  349. if (!new_ah) {
  350. printk(KERN_WARNING "Couldn't allocate new SM AH\n");
  351. return;
  352. }
  353. kref_init(&new_ah->ref);
  354. new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
  355. new_ah->pkey_index = 0;
  356. if (ib_find_pkey(port->agent->device, port->port_num,
  357. IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
  358. printk(KERN_ERR "Couldn't find index for default PKey\n");
  359. memset(&ah_attr, 0, sizeof ah_attr);
  360. ah_attr.dlid = port_attr.sm_lid;
  361. ah_attr.sl = port_attr.sm_sl;
  362. ah_attr.port_num = port->port_num;
  363. new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
  364. if (IS_ERR(new_ah->ah)) {
  365. printk(KERN_WARNING "Couldn't create new SM AH\n");
  366. kfree(new_ah);
  367. return;
  368. }
  369. spin_lock_irq(&port->ah_lock);
  370. old_ah = port->sm_ah;
  371. port->sm_ah = new_ah;
  372. spin_unlock_irq(&port->ah_lock);
  373. if (old_ah)
  374. kref_put(&old_ah->ref, free_sm_ah);
  375. }
  376. static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
  377. {
  378. if (event->event == IB_EVENT_PORT_ERR ||
  379. event->event == IB_EVENT_PORT_ACTIVE ||
  380. event->event == IB_EVENT_LID_CHANGE ||
  381. event->event == IB_EVENT_PKEY_CHANGE ||
  382. event->event == IB_EVENT_SM_CHANGE ||
  383. event->event == IB_EVENT_CLIENT_REREGISTER) {
  384. struct ib_sa_device *sa_dev;
  385. sa_dev = container_of(handler, typeof(*sa_dev), event_handler);
  386. schedule_work(&sa_dev->port[event->element.port_num -
  387. sa_dev->start_port].update_task);
  388. }
  389. }
  390. void ib_sa_register_client(struct ib_sa_client *client)
  391. {
  392. atomic_set(&client->users, 1);
  393. init_completion(&client->comp);
  394. }
  395. EXPORT_SYMBOL(ib_sa_register_client);
  396. void ib_sa_unregister_client(struct ib_sa_client *client)
  397. {
  398. ib_sa_client_put(client);
  399. wait_for_completion(&client->comp);
  400. }
  401. EXPORT_SYMBOL(ib_sa_unregister_client);
  402. /**
  403. * ib_sa_cancel_query - try to cancel an SA query
  404. * @id:ID of query to cancel
  405. * @query:query pointer to cancel
  406. *
  407. * Try to cancel an SA query. If the id and query don't match up or
  408. * the query has already completed, nothing is done. Otherwise the
  409. * query is canceled and will complete with a status of -EINTR.
  410. */
  411. void ib_sa_cancel_query(int id, struct ib_sa_query *query)
  412. {
  413. unsigned long flags;
  414. struct ib_mad_agent *agent;
  415. struct ib_mad_send_buf *mad_buf;
  416. spin_lock_irqsave(&idr_lock, flags);
  417. if (idr_find(&query_idr, id) != query) {
  418. spin_unlock_irqrestore(&idr_lock, flags);
  419. return;
  420. }
  421. agent = query->port->agent;
  422. mad_buf = query->mad_buf;
  423. spin_unlock_irqrestore(&idr_lock, flags);
  424. ib_cancel_mad(agent, mad_buf);
  425. }
  426. EXPORT_SYMBOL(ib_sa_cancel_query);
  427. static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
  428. {
  429. struct ib_sa_device *sa_dev;
  430. struct ib_sa_port *port;
  431. unsigned long flags;
  432. u8 src_path_mask;
  433. sa_dev = ib_get_client_data(device, &sa_client);
  434. if (!sa_dev)
  435. return 0x7f;
  436. port = &sa_dev->port[port_num - sa_dev->start_port];
  437. spin_lock_irqsave(&port->ah_lock, flags);
  438. src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
  439. spin_unlock_irqrestore(&port->ah_lock, flags);
  440. return src_path_mask;
  441. }
  442. int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
  443. struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
  444. {
  445. int ret;
  446. u16 gid_index;
  447. memset(ah_attr, 0, sizeof *ah_attr);
  448. ah_attr->dlid = be16_to_cpu(rec->dlid);
  449. ah_attr->sl = rec->sl;
  450. ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
  451. get_src_path_mask(device, port_num);
  452. ah_attr->port_num = port_num;
  453. ah_attr->static_rate = rec->rate;
  454. if (rec->hop_limit > 1) {
  455. ah_attr->ah_flags = IB_AH_GRH;
  456. ah_attr->grh.dgid = rec->dgid;
  457. ret = ib_find_cached_gid(device, &rec->sgid, &port_num,
  458. &gid_index);
  459. if (ret)
  460. return ret;
  461. ah_attr->grh.sgid_index = gid_index;
  462. ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
  463. ah_attr->grh.hop_limit = rec->hop_limit;
  464. ah_attr->grh.traffic_class = rec->traffic_class;
  465. }
  466. return 0;
  467. }
  468. EXPORT_SYMBOL(ib_init_ah_from_path);
  469. static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
  470. {
  471. unsigned long flags;
  472. spin_lock_irqsave(&query->port->ah_lock, flags);
  473. kref_get(&query->port->sm_ah->ref);
  474. query->sm_ah = query->port->sm_ah;
  475. spin_unlock_irqrestore(&query->port->ah_lock, flags);
  476. query->mad_buf = ib_create_send_mad(query->port->agent, 1,
  477. query->sm_ah->pkey_index,
  478. 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
  479. gfp_mask);
  480. if (IS_ERR(query->mad_buf)) {
  481. kref_put(&query->sm_ah->ref, free_sm_ah);
  482. return -ENOMEM;
  483. }
  484. query->mad_buf->ah = query->sm_ah->ah;
  485. return 0;
  486. }
  487. static void free_mad(struct ib_sa_query *query)
  488. {
  489. ib_free_send_mad(query->mad_buf);
  490. kref_put(&query->sm_ah->ref, free_sm_ah);
  491. }
  492. static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
  493. {
  494. unsigned long flags;
  495. memset(mad, 0, sizeof *mad);
  496. mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
  497. mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
  498. mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
  499. spin_lock_irqsave(&tid_lock, flags);
  500. mad->mad_hdr.tid =
  501. cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
  502. spin_unlock_irqrestore(&tid_lock, flags);
  503. }
  504. static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
  505. {
  506. unsigned long flags;
  507. int ret, id;
  508. retry:
  509. if (!idr_pre_get(&query_idr, gfp_mask))
  510. return -ENOMEM;
  511. spin_lock_irqsave(&idr_lock, flags);
  512. ret = idr_get_new(&query_idr, query, &id);
  513. spin_unlock_irqrestore(&idr_lock, flags);
  514. if (ret == -EAGAIN)
  515. goto retry;
  516. if (ret)
  517. return ret;
  518. query->mad_buf->timeout_ms = timeout_ms;
  519. query->mad_buf->context[0] = query;
  520. query->id = id;
  521. ret = ib_post_send_mad(query->mad_buf, NULL);
  522. if (ret) {
  523. spin_lock_irqsave(&idr_lock, flags);
  524. idr_remove(&query_idr, id);
  525. spin_unlock_irqrestore(&idr_lock, flags);
  526. }
  527. /*
  528. * It's not safe to dereference query any more, because the
  529. * send may already have completed and freed the query in
  530. * another context.
  531. */
  532. return ret ? ret : id;
  533. }
  534. static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
  535. int status,
  536. struct ib_sa_mad *mad)
  537. {
  538. struct ib_sa_path_query *query =
  539. container_of(sa_query, struct ib_sa_path_query, sa_query);
  540. if (mad) {
  541. struct ib_sa_path_rec rec;
  542. ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
  543. mad->data, &rec);
  544. query->callback(status, &rec, query->context);
  545. } else
  546. query->callback(status, NULL, query->context);
  547. }
  548. static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
  549. {
  550. kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
  551. }
  552. /**
  553. * ib_sa_path_rec_get - Start a Path get query
  554. * @client:SA client
  555. * @device:device to send query on
  556. * @port_num: port number to send query on
  557. * @rec:Path Record to send in query
  558. * @comp_mask:component mask to send in query
  559. * @timeout_ms:time to wait for response
  560. * @gfp_mask:GFP mask to use for internal allocations
  561. * @callback:function called when query completes, times out or is
  562. * canceled
  563. * @context:opaque user context passed to callback
  564. * @sa_query:query context, used to cancel query
  565. *
  566. * Send a Path Record Get query to the SA to look up a path. The
  567. * callback function will be called when the query completes (or
  568. * fails); status is 0 for a successful response, -EINTR if the query
  569. * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
  570. * occurred sending the query. The resp parameter of the callback is
  571. * only valid if status is 0.
  572. *
  573. * If the return value of ib_sa_path_rec_get() is negative, it is an
  574. * error code. Otherwise it is a query ID that can be used to cancel
  575. * the query.
  576. */
  577. int ib_sa_path_rec_get(struct ib_sa_client *client,
  578. struct ib_device *device, u8 port_num,
  579. struct ib_sa_path_rec *rec,
  580. ib_sa_comp_mask comp_mask,
  581. int timeout_ms, gfp_t gfp_mask,
  582. void (*callback)(int status,
  583. struct ib_sa_path_rec *resp,
  584. void *context),
  585. void *context,
  586. struct ib_sa_query **sa_query)
  587. {
  588. struct ib_sa_path_query *query;
  589. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  590. struct ib_sa_port *port;
  591. struct ib_mad_agent *agent;
  592. struct ib_sa_mad *mad;
  593. int ret;
  594. if (!sa_dev)
  595. return -ENODEV;
  596. port = &sa_dev->port[port_num - sa_dev->start_port];
  597. agent = port->agent;
  598. query = kmalloc(sizeof *query, gfp_mask);
  599. if (!query)
  600. return -ENOMEM;
  601. query->sa_query.port = port;
  602. ret = alloc_mad(&query->sa_query, gfp_mask);
  603. if (ret)
  604. goto err1;
  605. ib_sa_client_get(client);
  606. query->sa_query.client = client;
  607. query->callback = callback;
  608. query->context = context;
  609. mad = query->sa_query.mad_buf->mad;
  610. init_mad(mad, agent);
  611. query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
  612. query->sa_query.release = ib_sa_path_rec_release;
  613. mad->mad_hdr.method = IB_MGMT_METHOD_GET;
  614. mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
  615. mad->sa_hdr.comp_mask = comp_mask;
  616. ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
  617. *sa_query = &query->sa_query;
  618. ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
  619. if (ret < 0)
  620. goto err2;
  621. return ret;
  622. err2:
  623. *sa_query = NULL;
  624. ib_sa_client_put(query->sa_query.client);
  625. free_mad(&query->sa_query);
  626. err1:
  627. kfree(query);
  628. return ret;
  629. }
  630. EXPORT_SYMBOL(ib_sa_path_rec_get);
  631. static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
  632. int status,
  633. struct ib_sa_mad *mad)
  634. {
  635. struct ib_sa_service_query *query =
  636. container_of(sa_query, struct ib_sa_service_query, sa_query);
  637. if (mad) {
  638. struct ib_sa_service_rec rec;
  639. ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
  640. mad->data, &rec);
  641. query->callback(status, &rec, query->context);
  642. } else
  643. query->callback(status, NULL, query->context);
  644. }
  645. static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
  646. {
  647. kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
  648. }
  649. /**
  650. * ib_sa_service_rec_query - Start Service Record operation
  651. * @client:SA client
  652. * @device:device to send request on
  653. * @port_num: port number to send request on
  654. * @method:SA method - should be get, set, or delete
  655. * @rec:Service Record to send in request
  656. * @comp_mask:component mask to send in request
  657. * @timeout_ms:time to wait for response
  658. * @gfp_mask:GFP mask to use for internal allocations
  659. * @callback:function called when request completes, times out or is
  660. * canceled
  661. * @context:opaque user context passed to callback
  662. * @sa_query:request context, used to cancel request
  663. *
  664. * Send a Service Record set/get/delete to the SA to register,
  665. * unregister or query a service record.
  666. * The callback function will be called when the request completes (or
  667. * fails); status is 0 for a successful response, -EINTR if the query
  668. * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
  669. * occurred sending the query. The resp parameter of the callback is
  670. * only valid if status is 0.
  671. *
  672. * If the return value of ib_sa_service_rec_query() is negative, it is an
  673. * error code. Otherwise it is a request ID that can be used to cancel
  674. * the query.
  675. */
  676. int ib_sa_service_rec_query(struct ib_sa_client *client,
  677. struct ib_device *device, u8 port_num, u8 method,
  678. struct ib_sa_service_rec *rec,
  679. ib_sa_comp_mask comp_mask,
  680. int timeout_ms, gfp_t gfp_mask,
  681. void (*callback)(int status,
  682. struct ib_sa_service_rec *resp,
  683. void *context),
  684. void *context,
  685. struct ib_sa_query **sa_query)
  686. {
  687. struct ib_sa_service_query *query;
  688. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  689. struct ib_sa_port *port;
  690. struct ib_mad_agent *agent;
  691. struct ib_sa_mad *mad;
  692. int ret;
  693. if (!sa_dev)
  694. return -ENODEV;
  695. port = &sa_dev->port[port_num - sa_dev->start_port];
  696. agent = port->agent;
  697. if (method != IB_MGMT_METHOD_GET &&
  698. method != IB_MGMT_METHOD_SET &&
  699. method != IB_SA_METHOD_DELETE)
  700. return -EINVAL;
  701. query = kmalloc(sizeof *query, gfp_mask);
  702. if (!query)
  703. return -ENOMEM;
  704. query->sa_query.port = port;
  705. ret = alloc_mad(&query->sa_query, gfp_mask);
  706. if (ret)
  707. goto err1;
  708. ib_sa_client_get(client);
  709. query->sa_query.client = client;
  710. query->callback = callback;
  711. query->context = context;
  712. mad = query->sa_query.mad_buf->mad;
  713. init_mad(mad, agent);
  714. query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
  715. query->sa_query.release = ib_sa_service_rec_release;
  716. mad->mad_hdr.method = method;
  717. mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
  718. mad->sa_hdr.comp_mask = comp_mask;
  719. ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
  720. rec, mad->data);
  721. *sa_query = &query->sa_query;
  722. ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
  723. if (ret < 0)
  724. goto err2;
  725. return ret;
  726. err2:
  727. *sa_query = NULL;
  728. ib_sa_client_put(query->sa_query.client);
  729. free_mad(&query->sa_query);
  730. err1:
  731. kfree(query);
  732. return ret;
  733. }
  734. EXPORT_SYMBOL(ib_sa_service_rec_query);
  735. static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
  736. int status,
  737. struct ib_sa_mad *mad)
  738. {
  739. struct ib_sa_mcmember_query *query =
  740. container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
  741. if (mad) {
  742. struct ib_sa_mcmember_rec rec;
  743. ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
  744. mad->data, &rec);
  745. query->callback(status, &rec, query->context);
  746. } else
  747. query->callback(status, NULL, query->context);
  748. }
  749. static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
  750. {
  751. kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
  752. }
  753. int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
  754. struct ib_device *device, u8 port_num,
  755. u8 method,
  756. struct ib_sa_mcmember_rec *rec,
  757. ib_sa_comp_mask comp_mask,
  758. int timeout_ms, gfp_t gfp_mask,
  759. void (*callback)(int status,
  760. struct ib_sa_mcmember_rec *resp,
  761. void *context),
  762. void *context,
  763. struct ib_sa_query **sa_query)
  764. {
  765. struct ib_sa_mcmember_query *query;
  766. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  767. struct ib_sa_port *port;
  768. struct ib_mad_agent *agent;
  769. struct ib_sa_mad *mad;
  770. int ret;
  771. if (!sa_dev)
  772. return -ENODEV;
  773. port = &sa_dev->port[port_num - sa_dev->start_port];
  774. agent = port->agent;
  775. query = kmalloc(sizeof *query, gfp_mask);
  776. if (!query)
  777. return -ENOMEM;
  778. query->sa_query.port = port;
  779. ret = alloc_mad(&query->sa_query, gfp_mask);
  780. if (ret)
  781. goto err1;
  782. ib_sa_client_get(client);
  783. query->sa_query.client = client;
  784. query->callback = callback;
  785. query->context = context;
  786. mad = query->sa_query.mad_buf->mad;
  787. init_mad(mad, agent);
  788. query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
  789. query->sa_query.release = ib_sa_mcmember_rec_release;
  790. mad->mad_hdr.method = method;
  791. mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
  792. mad->sa_hdr.comp_mask = comp_mask;
  793. ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
  794. rec, mad->data);
  795. *sa_query = &query->sa_query;
  796. ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
  797. if (ret < 0)
  798. goto err2;
  799. return ret;
  800. err2:
  801. *sa_query = NULL;
  802. ib_sa_client_put(query->sa_query.client);
  803. free_mad(&query->sa_query);
  804. err1:
  805. kfree(query);
  806. return ret;
  807. }
  808. static void send_handler(struct ib_mad_agent *agent,
  809. struct ib_mad_send_wc *mad_send_wc)
  810. {
  811. struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
  812. unsigned long flags;
  813. if (query->callback)
  814. switch (mad_send_wc->status) {
  815. case IB_WC_SUCCESS:
  816. /* No callback -- already got recv */
  817. break;
  818. case IB_WC_RESP_TIMEOUT_ERR:
  819. query->callback(query, -ETIMEDOUT, NULL);
  820. break;
  821. case IB_WC_WR_FLUSH_ERR:
  822. query->callback(query, -EINTR, NULL);
  823. break;
  824. default:
  825. query->callback(query, -EIO, NULL);
  826. break;
  827. }
  828. spin_lock_irqsave(&idr_lock, flags);
  829. idr_remove(&query_idr, query->id);
  830. spin_unlock_irqrestore(&idr_lock, flags);
  831. free_mad(query);
  832. ib_sa_client_put(query->client);
  833. query->release(query);
  834. }
  835. static void recv_handler(struct ib_mad_agent *mad_agent,
  836. struct ib_mad_recv_wc *mad_recv_wc)
  837. {
  838. struct ib_sa_query *query;
  839. struct ib_mad_send_buf *mad_buf;
  840. mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id;
  841. query = mad_buf->context[0];
  842. if (query->callback) {
  843. if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
  844. query->callback(query,
  845. mad_recv_wc->recv_buf.mad->mad_hdr.status ?
  846. -EINVAL : 0,
  847. (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
  848. else
  849. query->callback(query, -EIO, NULL);
  850. }
  851. ib_free_recv_mad(mad_recv_wc);
  852. }
  853. static void ib_sa_add_one(struct ib_device *device)
  854. {
  855. struct ib_sa_device *sa_dev;
  856. int s, e, i;
  857. if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
  858. return;
  859. if (device->node_type == RDMA_NODE_IB_SWITCH)
  860. s = e = 0;
  861. else {
  862. s = 1;
  863. e = device->phys_port_cnt;
  864. }
  865. sa_dev = kmalloc(sizeof *sa_dev +
  866. (e - s + 1) * sizeof (struct ib_sa_port),
  867. GFP_KERNEL);
  868. if (!sa_dev)
  869. return;
  870. sa_dev->start_port = s;
  871. sa_dev->end_port = e;
  872. for (i = 0; i <= e - s; ++i) {
  873. sa_dev->port[i].sm_ah = NULL;
  874. sa_dev->port[i].port_num = i + s;
  875. spin_lock_init(&sa_dev->port[i].ah_lock);
  876. sa_dev->port[i].agent =
  877. ib_register_mad_agent(device, i + s, IB_QPT_GSI,
  878. NULL, 0, send_handler,
  879. recv_handler, sa_dev);
  880. if (IS_ERR(sa_dev->port[i].agent))
  881. goto err;
  882. INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
  883. }
  884. ib_set_client_data(device, &sa_client, sa_dev);
  885. /*
  886. * We register our event handler after everything is set up,
  887. * and then update our cached info after the event handler is
  888. * registered to avoid any problems if a port changes state
  889. * during our initialization.
  890. */
  891. INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
  892. if (ib_register_event_handler(&sa_dev->event_handler))
  893. goto err;
  894. for (i = 0; i <= e - s; ++i)
  895. update_sm_ah(&sa_dev->port[i].update_task);
  896. return;
  897. err:
  898. while (--i >= 0)
  899. ib_unregister_mad_agent(sa_dev->port[i].agent);
  900. kfree(sa_dev);
  901. return;
  902. }
  903. static void ib_sa_remove_one(struct ib_device *device)
  904. {
  905. struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
  906. int i;
  907. if (!sa_dev)
  908. return;
  909. ib_unregister_event_handler(&sa_dev->event_handler);
  910. flush_scheduled_work();
  911. for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
  912. ib_unregister_mad_agent(sa_dev->port[i].agent);
  913. kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
  914. }
  915. kfree(sa_dev);
  916. }
  917. static int __init ib_sa_init(void)
  918. {
  919. int ret;
  920. spin_lock_init(&idr_lock);
  921. spin_lock_init(&tid_lock);
  922. get_random_bytes(&tid, sizeof tid);
  923. ret = ib_register_client(&sa_client);
  924. if (ret) {
  925. printk(KERN_ERR "Couldn't register ib_sa client\n");
  926. goto err1;
  927. }
  928. ret = mcast_init();
  929. if (ret) {
  930. printk(KERN_ERR "Couldn't initialize multicast handling\n");
  931. goto err2;
  932. }
  933. return 0;
  934. err2:
  935. ib_unregister_client(&sa_client);
  936. err1:
  937. return ret;
  938. }
  939. static void __exit ib_sa_cleanup(void)
  940. {
  941. mcast_cleanup();
  942. ib_unregister_client(&sa_client);
  943. idr_destroy(&query_idr);
  944. }
  945. module_init(ib_sa_init);
  946. module_exit(ib_sa_cleanup);