device.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * $Id: device.c 1349 2004-12-16 21:09:43Z roland $
  33. */
  34. #include <linux/module.h>
  35. #include <linux/string.h>
  36. #include <linux/errno.h>
  37. #include <linux/slab.h>
  38. #include <linux/init.h>
  39. #include <asm/semaphore.h>
  40. #include "core_priv.h"
  41. MODULE_AUTHOR("Roland Dreier");
  42. MODULE_DESCRIPTION("core kernel InfiniBand API");
  43. MODULE_LICENSE("Dual BSD/GPL");
  44. struct ib_client_data {
  45. struct list_head list;
  46. struct ib_client *client;
  47. void * data;
  48. };
  49. static LIST_HEAD(device_list);
  50. static LIST_HEAD(client_list);
  51. /*
  52. * device_sem protects access to both device_list and client_list.
  53. * There's no real point to using multiple locks or something fancier
  54. * like an rwsem: we always access both lists, and we're always
  55. * modifying one list or the other list. In any case this is not a
  56. * hot path so there's no point in trying to optimize.
  57. */
  58. static DECLARE_MUTEX(device_sem);
  59. static int ib_device_check_mandatory(struct ib_device *device)
  60. {
  61. #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
  62. static const struct {
  63. size_t offset;
  64. char *name;
  65. } mandatory_table[] = {
  66. IB_MANDATORY_FUNC(query_device),
  67. IB_MANDATORY_FUNC(query_port),
  68. IB_MANDATORY_FUNC(query_pkey),
  69. IB_MANDATORY_FUNC(query_gid),
  70. IB_MANDATORY_FUNC(alloc_pd),
  71. IB_MANDATORY_FUNC(dealloc_pd),
  72. IB_MANDATORY_FUNC(create_ah),
  73. IB_MANDATORY_FUNC(destroy_ah),
  74. IB_MANDATORY_FUNC(create_qp),
  75. IB_MANDATORY_FUNC(modify_qp),
  76. IB_MANDATORY_FUNC(destroy_qp),
  77. IB_MANDATORY_FUNC(post_send),
  78. IB_MANDATORY_FUNC(post_recv),
  79. IB_MANDATORY_FUNC(create_cq),
  80. IB_MANDATORY_FUNC(destroy_cq),
  81. IB_MANDATORY_FUNC(poll_cq),
  82. IB_MANDATORY_FUNC(req_notify_cq),
  83. IB_MANDATORY_FUNC(get_dma_mr),
  84. IB_MANDATORY_FUNC(dereg_mr)
  85. };
  86. int i;
  87. for (i = 0; i < sizeof mandatory_table / sizeof mandatory_table[0]; ++i) {
  88. if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
  89. printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
  90. device->name, mandatory_table[i].name);
  91. return -EINVAL;
  92. }
  93. }
  94. return 0;
  95. }
  96. static struct ib_device *__ib_device_get_by_name(const char *name)
  97. {
  98. struct ib_device *device;
  99. list_for_each_entry(device, &device_list, core_list)
  100. if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
  101. return device;
  102. return NULL;
  103. }
  104. static int alloc_name(char *name)
  105. {
  106. long *inuse;
  107. char buf[IB_DEVICE_NAME_MAX];
  108. struct ib_device *device;
  109. int i;
  110. inuse = (long *) get_zeroed_page(GFP_KERNEL);
  111. if (!inuse)
  112. return -ENOMEM;
  113. list_for_each_entry(device, &device_list, core_list) {
  114. if (!sscanf(device->name, name, &i))
  115. continue;
  116. if (i < 0 || i >= PAGE_SIZE * 8)
  117. continue;
  118. snprintf(buf, sizeof buf, name, i);
  119. if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
  120. set_bit(i, inuse);
  121. }
  122. i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
  123. free_page((unsigned long) inuse);
  124. snprintf(buf, sizeof buf, name, i);
  125. if (__ib_device_get_by_name(buf))
  126. return -ENFILE;
  127. strlcpy(name, buf, IB_DEVICE_NAME_MAX);
  128. return 0;
  129. }
  130. /**
  131. * ib_alloc_device - allocate an IB device struct
  132. * @size:size of structure to allocate
  133. *
  134. * Low-level drivers should use ib_alloc_device() to allocate &struct
  135. * ib_device. @size is the size of the structure to be allocated,
  136. * including any private data used by the low-level driver.
  137. * ib_dealloc_device() must be used to free structures allocated with
  138. * ib_alloc_device().
  139. */
  140. struct ib_device *ib_alloc_device(size_t size)
  141. {
  142. void *dev;
  143. BUG_ON(size < sizeof (struct ib_device));
  144. dev = kmalloc(size, GFP_KERNEL);
  145. if (!dev)
  146. return NULL;
  147. memset(dev, 0, size);
  148. return dev;
  149. }
  150. EXPORT_SYMBOL(ib_alloc_device);
  151. /**
  152. * ib_dealloc_device - free an IB device struct
  153. * @device:structure to free
  154. *
  155. * Free a structure allocated with ib_alloc_device().
  156. */
  157. void ib_dealloc_device(struct ib_device *device)
  158. {
  159. if (device->reg_state == IB_DEV_UNINITIALIZED) {
  160. kfree(device);
  161. return;
  162. }
  163. BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
  164. ib_device_unregister_sysfs(device);
  165. }
  166. EXPORT_SYMBOL(ib_dealloc_device);
  167. static int add_client_context(struct ib_device *device, struct ib_client *client)
  168. {
  169. struct ib_client_data *context;
  170. unsigned long flags;
  171. context = kmalloc(sizeof *context, GFP_KERNEL);
  172. if (!context) {
  173. printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
  174. device->name, client->name);
  175. return -ENOMEM;
  176. }
  177. context->client = client;
  178. context->data = NULL;
  179. spin_lock_irqsave(&device->client_data_lock, flags);
  180. list_add(&context->list, &device->client_data_list);
  181. spin_unlock_irqrestore(&device->client_data_lock, flags);
  182. return 0;
  183. }
  184. /**
  185. * ib_register_device - Register an IB device with IB core
  186. * @device:Device to register
  187. *
  188. * Low-level drivers use ib_register_device() to register their
  189. * devices with the IB core. All registered clients will receive a
  190. * callback for each device that is added. @device must be allocated
  191. * with ib_alloc_device().
  192. */
  193. int ib_register_device(struct ib_device *device)
  194. {
  195. int ret;
  196. down(&device_sem);
  197. if (strchr(device->name, '%')) {
  198. ret = alloc_name(device->name);
  199. if (ret)
  200. goto out;
  201. }
  202. if (ib_device_check_mandatory(device)) {
  203. ret = -EINVAL;
  204. goto out;
  205. }
  206. INIT_LIST_HEAD(&device->event_handler_list);
  207. INIT_LIST_HEAD(&device->client_data_list);
  208. spin_lock_init(&device->event_handler_lock);
  209. spin_lock_init(&device->client_data_lock);
  210. ret = ib_device_register_sysfs(device);
  211. if (ret) {
  212. printk(KERN_WARNING "Couldn't register device %s with driver model\n",
  213. device->name);
  214. goto out;
  215. }
  216. list_add_tail(&device->core_list, &device_list);
  217. device->reg_state = IB_DEV_REGISTERED;
  218. {
  219. struct ib_client *client;
  220. list_for_each_entry(client, &client_list, list)
  221. if (client->add && !add_client_context(device, client))
  222. client->add(device);
  223. }
  224. out:
  225. up(&device_sem);
  226. return ret;
  227. }
  228. EXPORT_SYMBOL(ib_register_device);
  229. /**
  230. * ib_unregister_device - Unregister an IB device
  231. * @device:Device to unregister
  232. *
  233. * Unregister an IB device. All clients will receive a remove callback.
  234. */
  235. void ib_unregister_device(struct ib_device *device)
  236. {
  237. struct ib_client *client;
  238. struct ib_client_data *context, *tmp;
  239. unsigned long flags;
  240. down(&device_sem);
  241. list_for_each_entry_reverse(client, &client_list, list)
  242. if (client->remove)
  243. client->remove(device);
  244. list_del(&device->core_list);
  245. up(&device_sem);
  246. spin_lock_irqsave(&device->client_data_lock, flags);
  247. list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
  248. kfree(context);
  249. spin_unlock_irqrestore(&device->client_data_lock, flags);
  250. device->reg_state = IB_DEV_UNREGISTERED;
  251. }
  252. EXPORT_SYMBOL(ib_unregister_device);
  253. /**
  254. * ib_register_client - Register an IB client
  255. * @client:Client to register
  256. *
  257. * Upper level users of the IB drivers can use ib_register_client() to
  258. * register callbacks for IB device addition and removal. When an IB
  259. * device is added, each registered client's add method will be called
  260. * (in the order the clients were registered), and when a device is
  261. * removed, each client's remove method will be called (in the reverse
  262. * order that clients were registered). In addition, when
  263. * ib_register_client() is called, the client will receive an add
  264. * callback for all devices already registered.
  265. */
  266. int ib_register_client(struct ib_client *client)
  267. {
  268. struct ib_device *device;
  269. down(&device_sem);
  270. list_add_tail(&client->list, &client_list);
  271. list_for_each_entry(device, &device_list, core_list)
  272. if (client->add && !add_client_context(device, client))
  273. client->add(device);
  274. up(&device_sem);
  275. return 0;
  276. }
  277. EXPORT_SYMBOL(ib_register_client);
  278. /**
  279. * ib_unregister_client - Unregister an IB client
  280. * @client:Client to unregister
  281. *
  282. * Upper level users use ib_unregister_client() to remove their client
  283. * registration. When ib_unregister_client() is called, the client
  284. * will receive a remove callback for each IB device still registered.
  285. */
  286. void ib_unregister_client(struct ib_client *client)
  287. {
  288. struct ib_client_data *context, *tmp;
  289. struct ib_device *device;
  290. unsigned long flags;
  291. down(&device_sem);
  292. list_for_each_entry(device, &device_list, core_list) {
  293. if (client->remove)
  294. client->remove(device);
  295. spin_lock_irqsave(&device->client_data_lock, flags);
  296. list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
  297. if (context->client == client) {
  298. list_del(&context->list);
  299. kfree(context);
  300. }
  301. spin_unlock_irqrestore(&device->client_data_lock, flags);
  302. }
  303. list_del(&client->list);
  304. up(&device_sem);
  305. }
  306. EXPORT_SYMBOL(ib_unregister_client);
  307. /**
  308. * ib_get_client_data - Get IB client context
  309. * @device:Device to get context for
  310. * @client:Client to get context for
  311. *
  312. * ib_get_client_data() returns client context set with
  313. * ib_set_client_data().
  314. */
  315. void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
  316. {
  317. struct ib_client_data *context;
  318. void *ret = NULL;
  319. unsigned long flags;
  320. spin_lock_irqsave(&device->client_data_lock, flags);
  321. list_for_each_entry(context, &device->client_data_list, list)
  322. if (context->client == client) {
  323. ret = context->data;
  324. break;
  325. }
  326. spin_unlock_irqrestore(&device->client_data_lock, flags);
  327. return ret;
  328. }
  329. EXPORT_SYMBOL(ib_get_client_data);
  330. /**
  331. * ib_set_client_data - Get IB client context
  332. * @device:Device to set context for
  333. * @client:Client to set context for
  334. * @data:Context to set
  335. *
  336. * ib_set_client_data() sets client context that can be retrieved with
  337. * ib_get_client_data().
  338. */
  339. void ib_set_client_data(struct ib_device *device, struct ib_client *client,
  340. void *data)
  341. {
  342. struct ib_client_data *context;
  343. unsigned long flags;
  344. spin_lock_irqsave(&device->client_data_lock, flags);
  345. list_for_each_entry(context, &device->client_data_list, list)
  346. if (context->client == client) {
  347. context->data = data;
  348. goto out;
  349. }
  350. printk(KERN_WARNING "No client context found for %s/%s\n",
  351. device->name, client->name);
  352. out:
  353. spin_unlock_irqrestore(&device->client_data_lock, flags);
  354. }
  355. EXPORT_SYMBOL(ib_set_client_data);
  356. /**
  357. * ib_register_event_handler - Register an IB event handler
  358. * @event_handler:Handler to register
  359. *
  360. * ib_register_event_handler() registers an event handler that will be
  361. * called back when asynchronous IB events occur (as defined in
  362. * chapter 11 of the InfiniBand Architecture Specification). This
  363. * callback may occur in interrupt context.
  364. */
  365. int ib_register_event_handler (struct ib_event_handler *event_handler)
  366. {
  367. unsigned long flags;
  368. spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
  369. list_add_tail(&event_handler->list,
  370. &event_handler->device->event_handler_list);
  371. spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
  372. return 0;
  373. }
  374. EXPORT_SYMBOL(ib_register_event_handler);
  375. /**
  376. * ib_unregister_event_handler - Unregister an event handler
  377. * @event_handler:Handler to unregister
  378. *
  379. * Unregister an event handler registered with
  380. * ib_register_event_handler().
  381. */
  382. int ib_unregister_event_handler(struct ib_event_handler *event_handler)
  383. {
  384. unsigned long flags;
  385. spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
  386. list_del(&event_handler->list);
  387. spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
  388. return 0;
  389. }
  390. EXPORT_SYMBOL(ib_unregister_event_handler);
  391. /**
  392. * ib_dispatch_event - Dispatch an asynchronous event
  393. * @event:Event to dispatch
  394. *
  395. * Low-level drivers must call ib_dispatch_event() to dispatch the
  396. * event to all registered event handlers when an asynchronous event
  397. * occurs.
  398. */
  399. void ib_dispatch_event(struct ib_event *event)
  400. {
  401. unsigned long flags;
  402. struct ib_event_handler *handler;
  403. spin_lock_irqsave(&event->device->event_handler_lock, flags);
  404. list_for_each_entry(handler, &event->device->event_handler_list, list)
  405. handler->handler(handler, event);
  406. spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
  407. }
  408. EXPORT_SYMBOL(ib_dispatch_event);
  409. /**
  410. * ib_query_device - Query IB device attributes
  411. * @device:Device to query
  412. * @device_attr:Device attributes
  413. *
  414. * ib_query_device() returns the attributes of a device through the
  415. * @device_attr pointer.
  416. */
  417. int ib_query_device(struct ib_device *device,
  418. struct ib_device_attr *device_attr)
  419. {
  420. return device->query_device(device, device_attr);
  421. }
  422. EXPORT_SYMBOL(ib_query_device);
  423. /**
  424. * ib_query_port - Query IB port attributes
  425. * @device:Device to query
  426. * @port_num:Port number to query
  427. * @port_attr:Port attributes
  428. *
  429. * ib_query_port() returns the attributes of a port through the
  430. * @port_attr pointer.
  431. */
  432. int ib_query_port(struct ib_device *device,
  433. u8 port_num,
  434. struct ib_port_attr *port_attr)
  435. {
  436. return device->query_port(device, port_num, port_attr);
  437. }
  438. EXPORT_SYMBOL(ib_query_port);
  439. /**
  440. * ib_query_gid - Get GID table entry
  441. * @device:Device to query
  442. * @port_num:Port number to query
  443. * @index:GID table index to query
  444. * @gid:Returned GID
  445. *
  446. * ib_query_gid() fetches the specified GID table entry.
  447. */
  448. int ib_query_gid(struct ib_device *device,
  449. u8 port_num, int index, union ib_gid *gid)
  450. {
  451. return device->query_gid(device, port_num, index, gid);
  452. }
  453. EXPORT_SYMBOL(ib_query_gid);
  454. /**
  455. * ib_query_pkey - Get P_Key table entry
  456. * @device:Device to query
  457. * @port_num:Port number to query
  458. * @index:P_Key table index to query
  459. * @pkey:Returned P_Key
  460. *
  461. * ib_query_pkey() fetches the specified P_Key table entry.
  462. */
  463. int ib_query_pkey(struct ib_device *device,
  464. u8 port_num, u16 index, u16 *pkey)
  465. {
  466. return device->query_pkey(device, port_num, index, pkey);
  467. }
  468. EXPORT_SYMBOL(ib_query_pkey);
  469. /**
  470. * ib_modify_device - Change IB device attributes
  471. * @device:Device to modify
  472. * @device_modify_mask:Mask of attributes to change
  473. * @device_modify:New attribute values
  474. *
  475. * ib_modify_device() changes a device's attributes as specified by
  476. * the @device_modify_mask and @device_modify structure.
  477. */
  478. int ib_modify_device(struct ib_device *device,
  479. int device_modify_mask,
  480. struct ib_device_modify *device_modify)
  481. {
  482. return device->modify_device(device, device_modify_mask,
  483. device_modify);
  484. }
  485. EXPORT_SYMBOL(ib_modify_device);
  486. /**
  487. * ib_modify_port - Modifies the attributes for the specified port.
  488. * @device: The device to modify.
  489. * @port_num: The number of the port to modify.
  490. * @port_modify_mask: Mask used to specify which attributes of the port
  491. * to change.
  492. * @port_modify: New attribute values for the port.
  493. *
  494. * ib_modify_port() changes a port's attributes as specified by the
  495. * @port_modify_mask and @port_modify structure.
  496. */
  497. int ib_modify_port(struct ib_device *device,
  498. u8 port_num, int port_modify_mask,
  499. struct ib_port_modify *port_modify)
  500. {
  501. return device->modify_port(device, port_num, port_modify_mask,
  502. port_modify);
  503. }
  504. EXPORT_SYMBOL(ib_modify_port);
  505. static int __init ib_core_init(void)
  506. {
  507. int ret;
  508. ret = ib_sysfs_setup();
  509. if (ret)
  510. printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
  511. ret = ib_cache_setup();
  512. if (ret) {
  513. printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
  514. ib_sysfs_cleanup();
  515. }
  516. return ret;
  517. }
  518. static void __exit ib_core_cleanup(void)
  519. {
  520. ib_cache_cleanup();
  521. ib_sysfs_cleanup();
  522. }
  523. module_init(ib_core_init);
  524. module_exit(ib_core_cleanup);