ipoib_main.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
  4. * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
  35. */
  36. #include "ipoib.h"
  37. #include <linux/module.h>
  38. #include <linux/init.h>
  39. #include <linux/slab.h>
  40. #include <linux/vmalloc.h>
  41. #include <linux/if_arp.h> /* For ARPHRD_xxx */
  42. #include <linux/ip.h>
  43. #include <linux/in.h>
  44. #include <net/dst.h>
  45. MODULE_AUTHOR("Roland Dreier");
  46. MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
  47. MODULE_LICENSE("Dual BSD/GPL");
  48. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
  49. int ipoib_debug_level;
  50. module_param_named(debug_level, ipoib_debug_level, int, 0644);
  51. MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
  52. #endif
  53. struct ipoib_path_iter {
  54. struct net_device *dev;
  55. struct ipoib_path path;
  56. };
  57. static const u8 ipv4_bcast_addr[] = {
  58. 0x00, 0xff, 0xff, 0xff,
  59. 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
  60. 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
  61. };
  62. struct workqueue_struct *ipoib_workqueue;
  63. static void ipoib_add_one(struct ib_device *device);
  64. static void ipoib_remove_one(struct ib_device *device);
  65. static struct ib_client ipoib_client = {
  66. .name = "ipoib",
  67. .add = ipoib_add_one,
  68. .remove = ipoib_remove_one
  69. };
  70. int ipoib_open(struct net_device *dev)
  71. {
  72. struct ipoib_dev_priv *priv = netdev_priv(dev);
  73. ipoib_dbg(priv, "bringing up interface\n");
  74. set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
  75. if (ipoib_pkey_dev_delay_open(dev))
  76. return 0;
  77. if (ipoib_ib_dev_open(dev))
  78. return -EINVAL;
  79. if (ipoib_ib_dev_up(dev)) {
  80. ipoib_ib_dev_stop(dev);
  81. return -EINVAL;
  82. }
  83. if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
  84. struct ipoib_dev_priv *cpriv;
  85. /* Bring up any child interfaces too */
  86. mutex_lock(&priv->vlan_mutex);
  87. list_for_each_entry(cpriv, &priv->child_intfs, list) {
  88. int flags;
  89. flags = cpriv->dev->flags;
  90. if (flags & IFF_UP)
  91. continue;
  92. dev_change_flags(cpriv->dev, flags | IFF_UP);
  93. }
  94. mutex_unlock(&priv->vlan_mutex);
  95. }
  96. netif_start_queue(dev);
  97. return 0;
  98. }
  99. static int ipoib_stop(struct net_device *dev)
  100. {
  101. struct ipoib_dev_priv *priv = netdev_priv(dev);
  102. ipoib_dbg(priv, "stopping interface\n");
  103. clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
  104. netif_stop_queue(dev);
  105. /*
  106. * Now flush workqueue to make sure a scheduled task doesn't
  107. * bring our internal state back up.
  108. */
  109. flush_workqueue(ipoib_workqueue);
  110. ipoib_ib_dev_down(dev, 1);
  111. ipoib_ib_dev_stop(dev);
  112. if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
  113. struct ipoib_dev_priv *cpriv;
  114. /* Bring down any child interfaces too */
  115. mutex_lock(&priv->vlan_mutex);
  116. list_for_each_entry(cpriv, &priv->child_intfs, list) {
  117. int flags;
  118. flags = cpriv->dev->flags;
  119. if (!(flags & IFF_UP))
  120. continue;
  121. dev_change_flags(cpriv->dev, flags & ~IFF_UP);
  122. }
  123. mutex_unlock(&priv->vlan_mutex);
  124. }
  125. return 0;
  126. }
  127. static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
  128. {
  129. struct ipoib_dev_priv *priv = netdev_priv(dev);
  130. if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
  131. return -EINVAL;
  132. priv->admin_mtu = new_mtu;
  133. dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
  134. return 0;
  135. }
  136. static struct ipoib_path *__path_find(struct net_device *dev,
  137. union ib_gid *gid)
  138. {
  139. struct ipoib_dev_priv *priv = netdev_priv(dev);
  140. struct rb_node *n = priv->path_tree.rb_node;
  141. struct ipoib_path *path;
  142. int ret;
  143. while (n) {
  144. path = rb_entry(n, struct ipoib_path, rb_node);
  145. ret = memcmp(gid->raw, path->pathrec.dgid.raw,
  146. sizeof (union ib_gid));
  147. if (ret < 0)
  148. n = n->rb_left;
  149. else if (ret > 0)
  150. n = n->rb_right;
  151. else
  152. return path;
  153. }
  154. return NULL;
  155. }
  156. static int __path_add(struct net_device *dev, struct ipoib_path *path)
  157. {
  158. struct ipoib_dev_priv *priv = netdev_priv(dev);
  159. struct rb_node **n = &priv->path_tree.rb_node;
  160. struct rb_node *pn = NULL;
  161. struct ipoib_path *tpath;
  162. int ret;
  163. while (*n) {
  164. pn = *n;
  165. tpath = rb_entry(pn, struct ipoib_path, rb_node);
  166. ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
  167. sizeof (union ib_gid));
  168. if (ret < 0)
  169. n = &pn->rb_left;
  170. else if (ret > 0)
  171. n = &pn->rb_right;
  172. else
  173. return -EEXIST;
  174. }
  175. rb_link_node(&path->rb_node, pn, n);
  176. rb_insert_color(&path->rb_node, &priv->path_tree);
  177. list_add_tail(&path->list, &priv->path_list);
  178. return 0;
  179. }
  180. static void path_free(struct net_device *dev, struct ipoib_path *path)
  181. {
  182. struct ipoib_dev_priv *priv = netdev_priv(dev);
  183. struct ipoib_neigh *neigh, *tn;
  184. struct sk_buff *skb;
  185. unsigned long flags;
  186. while ((skb = __skb_dequeue(&path->queue)))
  187. dev_kfree_skb_irq(skb);
  188. spin_lock_irqsave(&priv->lock, flags);
  189. list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
  190. /*
  191. * It's safe to call ipoib_put_ah() inside priv->lock
  192. * here, because we know that path->ah will always
  193. * hold one more reference, so ipoib_put_ah() will
  194. * never do more than decrement the ref count.
  195. */
  196. if (neigh->ah)
  197. ipoib_put_ah(neigh->ah);
  198. *to_ipoib_neigh(neigh->neighbour) = NULL;
  199. kfree(neigh);
  200. }
  201. spin_unlock_irqrestore(&priv->lock, flags);
  202. if (path->ah)
  203. ipoib_put_ah(path->ah);
  204. kfree(path);
  205. }
  206. #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
  207. struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
  208. {
  209. struct ipoib_path_iter *iter;
  210. iter = kmalloc(sizeof *iter, GFP_KERNEL);
  211. if (!iter)
  212. return NULL;
  213. iter->dev = dev;
  214. memset(iter->path.pathrec.dgid.raw, 0, 16);
  215. if (ipoib_path_iter_next(iter)) {
  216. kfree(iter);
  217. return NULL;
  218. }
  219. return iter;
  220. }
  221. int ipoib_path_iter_next(struct ipoib_path_iter *iter)
  222. {
  223. struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
  224. struct rb_node *n;
  225. struct ipoib_path *path;
  226. int ret = 1;
  227. spin_lock_irq(&priv->lock);
  228. n = rb_first(&priv->path_tree);
  229. while (n) {
  230. path = rb_entry(n, struct ipoib_path, rb_node);
  231. if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
  232. sizeof (union ib_gid)) < 0) {
  233. iter->path = *path;
  234. ret = 0;
  235. break;
  236. }
  237. n = rb_next(n);
  238. }
  239. spin_unlock_irq(&priv->lock);
  240. return ret;
  241. }
  242. void ipoib_path_iter_read(struct ipoib_path_iter *iter,
  243. struct ipoib_path *path)
  244. {
  245. *path = iter->path;
  246. }
  247. #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
  248. void ipoib_flush_paths(struct net_device *dev)
  249. {
  250. struct ipoib_dev_priv *priv = netdev_priv(dev);
  251. struct ipoib_path *path, *tp;
  252. LIST_HEAD(remove_list);
  253. unsigned long flags;
  254. spin_lock_irqsave(&priv->lock, flags);
  255. list_splice(&priv->path_list, &remove_list);
  256. INIT_LIST_HEAD(&priv->path_list);
  257. list_for_each_entry(path, &remove_list, list)
  258. rb_erase(&path->rb_node, &priv->path_tree);
  259. spin_unlock_irqrestore(&priv->lock, flags);
  260. list_for_each_entry_safe(path, tp, &remove_list, list) {
  261. if (path->query)
  262. ib_sa_cancel_query(path->query_id, path->query);
  263. wait_for_completion(&path->done);
  264. path_free(dev, path);
  265. }
  266. }
  267. static void path_rec_completion(int status,
  268. struct ib_sa_path_rec *pathrec,
  269. void *path_ptr)
  270. {
  271. struct ipoib_path *path = path_ptr;
  272. struct net_device *dev = path->dev;
  273. struct ipoib_dev_priv *priv = netdev_priv(dev);
  274. struct ipoib_ah *ah = NULL;
  275. struct ipoib_neigh *neigh;
  276. struct sk_buff_head skqueue;
  277. struct sk_buff *skb;
  278. unsigned long flags;
  279. if (pathrec)
  280. ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
  281. be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
  282. else
  283. ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
  284. status, IPOIB_GID_ARG(path->pathrec.dgid));
  285. skb_queue_head_init(&skqueue);
  286. if (!status) {
  287. struct ib_ah_attr av = {
  288. .dlid = be16_to_cpu(pathrec->dlid),
  289. .sl = pathrec->sl,
  290. .port_num = priv->port
  291. };
  292. int path_rate = ib_sa_rate_enum_to_int(pathrec->rate);
  293. if (path_rate > 0 && priv->local_rate > path_rate)
  294. av.static_rate = (priv->local_rate - 1) / path_rate;
  295. ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
  296. av.static_rate, priv->local_rate,
  297. ib_sa_rate_enum_to_int(pathrec->rate));
  298. ah = ipoib_create_ah(dev, priv->pd, &av);
  299. }
  300. spin_lock_irqsave(&priv->lock, flags);
  301. path->ah = ah;
  302. if (ah) {
  303. path->pathrec = *pathrec;
  304. ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
  305. ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
  306. while ((skb = __skb_dequeue(&path->queue)))
  307. __skb_queue_tail(&skqueue, skb);
  308. list_for_each_entry(neigh, &path->neigh_list, list) {
  309. kref_get(&path->ah->ref);
  310. neigh->ah = path->ah;
  311. while ((skb = __skb_dequeue(&neigh->queue)))
  312. __skb_queue_tail(&skqueue, skb);
  313. }
  314. }
  315. path->query = NULL;
  316. complete(&path->done);
  317. spin_unlock_irqrestore(&priv->lock, flags);
  318. while ((skb = __skb_dequeue(&skqueue))) {
  319. skb->dev = dev;
  320. if (dev_queue_xmit(skb))
  321. ipoib_warn(priv, "dev_queue_xmit failed "
  322. "to requeue packet\n");
  323. }
  324. }
  325. static struct ipoib_path *path_rec_create(struct net_device *dev,
  326. union ib_gid *gid)
  327. {
  328. struct ipoib_dev_priv *priv = netdev_priv(dev);
  329. struct ipoib_path *path;
  330. path = kzalloc(sizeof *path, GFP_ATOMIC);
  331. if (!path)
  332. return NULL;
  333. path->dev = dev;
  334. skb_queue_head_init(&path->queue);
  335. INIT_LIST_HEAD(&path->neigh_list);
  336. memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
  337. path->pathrec.sgid = priv->local_gid;
  338. path->pathrec.pkey = cpu_to_be16(priv->pkey);
  339. path->pathrec.numb_path = 1;
  340. return path;
  341. }
  342. static int path_rec_start(struct net_device *dev,
  343. struct ipoib_path *path)
  344. {
  345. struct ipoib_dev_priv *priv = netdev_priv(dev);
  346. ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
  347. IPOIB_GID_ARG(path->pathrec.dgid));
  348. init_completion(&path->done);
  349. path->query_id =
  350. ib_sa_path_rec_get(priv->ca, priv->port,
  351. &path->pathrec,
  352. IB_SA_PATH_REC_DGID |
  353. IB_SA_PATH_REC_SGID |
  354. IB_SA_PATH_REC_NUMB_PATH |
  355. IB_SA_PATH_REC_PKEY,
  356. 1000, GFP_ATOMIC,
  357. path_rec_completion,
  358. path, &path->query);
  359. if (path->query_id < 0) {
  360. ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
  361. path->query = NULL;
  362. return path->query_id;
  363. }
  364. return 0;
  365. }
  366. static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
  367. {
  368. struct ipoib_dev_priv *priv = netdev_priv(dev);
  369. struct ipoib_path *path;
  370. struct ipoib_neigh *neigh;
  371. neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
  372. if (!neigh) {
  373. ++priv->stats.tx_dropped;
  374. dev_kfree_skb_any(skb);
  375. return;
  376. }
  377. skb_queue_head_init(&neigh->queue);
  378. neigh->neighbour = skb->dst->neighbour;
  379. *to_ipoib_neigh(skb->dst->neighbour) = neigh;
  380. /*
  381. * We can only be called from ipoib_start_xmit, so we're
  382. * inside tx_lock -- no need to save/restore flags.
  383. */
  384. spin_lock(&priv->lock);
  385. path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
  386. if (!path) {
  387. path = path_rec_create(dev,
  388. (union ib_gid *) (skb->dst->neighbour->ha + 4));
  389. if (!path)
  390. goto err;
  391. __path_add(dev, path);
  392. }
  393. list_add_tail(&neigh->list, &path->neigh_list);
  394. if (path->ah) {
  395. kref_get(&path->ah->ref);
  396. neigh->ah = path->ah;
  397. ipoib_send(dev, skb, path->ah,
  398. be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
  399. } else {
  400. neigh->ah = NULL;
  401. __skb_queue_tail(&neigh->queue, skb);
  402. if (!path->query && path_rec_start(dev, path))
  403. goto err;
  404. }
  405. spin_unlock(&priv->lock);
  406. return;
  407. err:
  408. *to_ipoib_neigh(skb->dst->neighbour) = NULL;
  409. list_del(&neigh->list);
  410. kfree(neigh);
  411. ++priv->stats.tx_dropped;
  412. dev_kfree_skb_any(skb);
  413. spin_unlock(&priv->lock);
  414. }
  415. static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
  416. {
  417. struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
  418. /* Look up path record for unicasts */
  419. if (skb->dst->neighbour->ha[4] != 0xff) {
  420. neigh_add_path(skb, dev);
  421. return;
  422. }
  423. /* Add in the P_Key for multicasts */
  424. skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
  425. skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
  426. ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
  427. }
  428. static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
  429. struct ipoib_pseudoheader *phdr)
  430. {
  431. struct ipoib_dev_priv *priv = netdev_priv(dev);
  432. struct ipoib_path *path;
  433. /*
  434. * We can only be called from ipoib_start_xmit, so we're
  435. * inside tx_lock -- no need to save/restore flags.
  436. */
  437. spin_lock(&priv->lock);
  438. path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
  439. if (!path) {
  440. path = path_rec_create(dev,
  441. (union ib_gid *) (phdr->hwaddr + 4));
  442. if (path) {
  443. /* put pseudoheader back on for next time */
  444. skb_push(skb, sizeof *phdr);
  445. __skb_queue_tail(&path->queue, skb);
  446. if (path_rec_start(dev, path)) {
  447. spin_unlock(&priv->lock);
  448. path_free(dev, path);
  449. return;
  450. } else
  451. __path_add(dev, path);
  452. } else {
  453. ++priv->stats.tx_dropped;
  454. dev_kfree_skb_any(skb);
  455. }
  456. spin_unlock(&priv->lock);
  457. return;
  458. }
  459. if (path->ah) {
  460. ipoib_dbg(priv, "Send unicast ARP to %04x\n",
  461. be16_to_cpu(path->pathrec.dlid));
  462. ipoib_send(dev, skb, path->ah,
  463. be32_to_cpup((__be32 *) phdr->hwaddr));
  464. } else if ((path->query || !path_rec_start(dev, path)) &&
  465. skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
  466. /* put pseudoheader back on for next time */
  467. skb_push(skb, sizeof *phdr);
  468. __skb_queue_tail(&path->queue, skb);
  469. } else {
  470. ++priv->stats.tx_dropped;
  471. dev_kfree_skb_any(skb);
  472. }
  473. spin_unlock(&priv->lock);
  474. }
  475. static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
  476. {
  477. struct ipoib_dev_priv *priv = netdev_priv(dev);
  478. struct ipoib_neigh *neigh;
  479. unsigned long flags;
  480. if (!spin_trylock_irqsave(&priv->tx_lock, flags))
  481. return NETDEV_TX_LOCKED;
  482. /*
  483. * Check if our queue is stopped. Since we have the LLTX bit
  484. * set, we can't rely on netif_stop_queue() preventing our
  485. * xmit function from being called with a full queue.
  486. */
  487. if (unlikely(netif_queue_stopped(dev))) {
  488. spin_unlock_irqrestore(&priv->tx_lock, flags);
  489. return NETDEV_TX_BUSY;
  490. }
  491. if (skb->dst && skb->dst->neighbour) {
  492. if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
  493. ipoib_path_lookup(skb, dev);
  494. goto out;
  495. }
  496. neigh = *to_ipoib_neigh(skb->dst->neighbour);
  497. if (likely(neigh->ah)) {
  498. ipoib_send(dev, skb, neigh->ah,
  499. be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
  500. goto out;
  501. }
  502. if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
  503. spin_lock(&priv->lock);
  504. __skb_queue_tail(&neigh->queue, skb);
  505. spin_unlock(&priv->lock);
  506. } else {
  507. ++priv->stats.tx_dropped;
  508. dev_kfree_skb_any(skb);
  509. }
  510. } else {
  511. struct ipoib_pseudoheader *phdr =
  512. (struct ipoib_pseudoheader *) skb->data;
  513. skb_pull(skb, sizeof *phdr);
  514. if (phdr->hwaddr[4] == 0xff) {
  515. /* Add in the P_Key for multicast*/
  516. phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
  517. phdr->hwaddr[9] = priv->pkey & 0xff;
  518. ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
  519. } else {
  520. /* unicast GID -- should be ARP or RARP reply */
  521. if ((be16_to_cpup((__be16 *) skb->data) != ETH_P_ARP) &&
  522. (be16_to_cpup((__be16 *) skb->data) != ETH_P_RARP)) {
  523. ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
  524. IPOIB_GID_FMT "\n",
  525. skb->dst ? "neigh" : "dst",
  526. be16_to_cpup((__be16 *) skb->data),
  527. be32_to_cpup((__be32 *) phdr->hwaddr),
  528. IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
  529. dev_kfree_skb_any(skb);
  530. ++priv->stats.tx_dropped;
  531. goto out;
  532. }
  533. unicast_arp_send(skb, dev, phdr);
  534. }
  535. }
  536. out:
  537. spin_unlock_irqrestore(&priv->tx_lock, flags);
  538. return NETDEV_TX_OK;
  539. }
  540. static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
  541. {
  542. struct ipoib_dev_priv *priv = netdev_priv(dev);
  543. return &priv->stats;
  544. }
  545. static void ipoib_timeout(struct net_device *dev)
  546. {
  547. struct ipoib_dev_priv *priv = netdev_priv(dev);
  548. ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
  549. jiffies_to_msecs(jiffies - dev->trans_start));
  550. ipoib_warn(priv, "queue stopped %d, tx_head %u, tx_tail %u\n",
  551. netif_queue_stopped(dev),
  552. priv->tx_head, priv->tx_tail);
  553. /* XXX reset QP, etc. */
  554. }
  555. static int ipoib_hard_header(struct sk_buff *skb,
  556. struct net_device *dev,
  557. unsigned short type,
  558. void *daddr, void *saddr, unsigned len)
  559. {
  560. struct ipoib_header *header;
  561. header = (struct ipoib_header *) skb_push(skb, sizeof *header);
  562. header->proto = htons(type);
  563. header->reserved = 0;
  564. /*
  565. * If we don't have a neighbour structure, stuff the
  566. * destination address onto the front of the skb so we can
  567. * figure out where to send the packet later.
  568. */
  569. if ((!skb->dst || !skb->dst->neighbour) && daddr) {
  570. struct ipoib_pseudoheader *phdr =
  571. (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
  572. memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
  573. }
  574. return 0;
  575. }
  576. static void ipoib_set_mcast_list(struct net_device *dev)
  577. {
  578. struct ipoib_dev_priv *priv = netdev_priv(dev);
  579. if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
  580. ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
  581. return;
  582. }
  583. queue_work(ipoib_workqueue, &priv->restart_task);
  584. }
  585. static void ipoib_neigh_destructor(struct neighbour *n)
  586. {
  587. struct ipoib_neigh *neigh;
  588. struct ipoib_dev_priv *priv = netdev_priv(n->dev);
  589. unsigned long flags;
  590. struct ipoib_ah *ah = NULL;
  591. ipoib_dbg(priv,
  592. "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
  593. be32_to_cpup((__be32 *) n->ha),
  594. IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
  595. spin_lock_irqsave(&priv->lock, flags);
  596. neigh = *to_ipoib_neigh(n);
  597. if (neigh) {
  598. if (neigh->ah)
  599. ah = neigh->ah;
  600. list_del(&neigh->list);
  601. *to_ipoib_neigh(n) = NULL;
  602. kfree(neigh);
  603. }
  604. spin_unlock_irqrestore(&priv->lock, flags);
  605. if (ah)
  606. ipoib_put_ah(ah);
  607. }
  608. static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
  609. {
  610. parms->neigh_destructor = ipoib_neigh_destructor;
  611. return 0;
  612. }
  613. int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
  614. {
  615. struct ipoib_dev_priv *priv = netdev_priv(dev);
  616. /* Allocate RX/TX "rings" to hold queued skbs */
  617. priv->rx_ring = kzalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_rx_buf),
  618. GFP_KERNEL);
  619. if (!priv->rx_ring) {
  620. printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
  621. ca->name, IPOIB_RX_RING_SIZE);
  622. goto out;
  623. }
  624. priv->tx_ring = kzalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_tx_buf),
  625. GFP_KERNEL);
  626. if (!priv->tx_ring) {
  627. printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
  628. ca->name, IPOIB_TX_RING_SIZE);
  629. goto out_rx_ring_cleanup;
  630. }
  631. /* priv->tx_head & tx_tail are already 0 */
  632. if (ipoib_ib_dev_init(dev, ca, port))
  633. goto out_tx_ring_cleanup;
  634. return 0;
  635. out_tx_ring_cleanup:
  636. kfree(priv->tx_ring);
  637. out_rx_ring_cleanup:
  638. kfree(priv->rx_ring);
  639. out:
  640. return -ENOMEM;
  641. }
  642. void ipoib_dev_cleanup(struct net_device *dev)
  643. {
  644. struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
  645. ipoib_delete_debug_files(dev);
  646. /* Delete any child interfaces first */
  647. list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
  648. unregister_netdev(cpriv->dev);
  649. ipoib_dev_cleanup(cpriv->dev);
  650. free_netdev(cpriv->dev);
  651. }
  652. ipoib_ib_dev_cleanup(dev);
  653. kfree(priv->rx_ring);
  654. kfree(priv->tx_ring);
  655. priv->rx_ring = NULL;
  656. priv->tx_ring = NULL;
  657. }
  658. static void ipoib_setup(struct net_device *dev)
  659. {
  660. struct ipoib_dev_priv *priv = netdev_priv(dev);
  661. dev->open = ipoib_open;
  662. dev->stop = ipoib_stop;
  663. dev->change_mtu = ipoib_change_mtu;
  664. dev->hard_start_xmit = ipoib_start_xmit;
  665. dev->get_stats = ipoib_get_stats;
  666. dev->tx_timeout = ipoib_timeout;
  667. dev->hard_header = ipoib_hard_header;
  668. dev->set_multicast_list = ipoib_set_mcast_list;
  669. dev->neigh_setup = ipoib_neigh_setup_dev;
  670. dev->watchdog_timeo = HZ;
  671. dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
  672. /*
  673. * We add in INFINIBAND_ALEN to allow for the destination
  674. * address "pseudoheader" for skbs without neighbour struct.
  675. */
  676. dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
  677. dev->addr_len = INFINIBAND_ALEN;
  678. dev->type = ARPHRD_INFINIBAND;
  679. dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
  680. dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
  681. /* MTU will be reset when mcast join happens */
  682. dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
  683. priv->mcast_mtu = priv->admin_mtu = dev->mtu;
  684. memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
  685. netif_carrier_off(dev);
  686. SET_MODULE_OWNER(dev);
  687. priv->dev = dev;
  688. spin_lock_init(&priv->lock);
  689. spin_lock_init(&priv->tx_lock);
  690. mutex_init(&priv->mcast_mutex);
  691. mutex_init(&priv->vlan_mutex);
  692. INIT_LIST_HEAD(&priv->path_list);
  693. INIT_LIST_HEAD(&priv->child_intfs);
  694. INIT_LIST_HEAD(&priv->dead_ahs);
  695. INIT_LIST_HEAD(&priv->multicast_list);
  696. INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
  697. INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
  698. INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
  699. INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
  700. INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
  701. }
  702. struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
  703. {
  704. struct net_device *dev;
  705. dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
  706. ipoib_setup);
  707. if (!dev)
  708. return NULL;
  709. return netdev_priv(dev);
  710. }
  711. static ssize_t show_pkey(struct class_device *cdev, char *buf)
  712. {
  713. struct ipoib_dev_priv *priv =
  714. netdev_priv(container_of(cdev, struct net_device, class_dev));
  715. return sprintf(buf, "0x%04x\n", priv->pkey);
  716. }
  717. static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
  718. static ssize_t create_child(struct class_device *cdev,
  719. const char *buf, size_t count)
  720. {
  721. int pkey;
  722. int ret;
  723. if (sscanf(buf, "%i", &pkey) != 1)
  724. return -EINVAL;
  725. if (pkey < 0 || pkey > 0xffff)
  726. return -EINVAL;
  727. /*
  728. * Set the full membership bit, so that we join the right
  729. * broadcast group, etc.
  730. */
  731. pkey |= 0x8000;
  732. ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
  733. pkey);
  734. return ret ? ret : count;
  735. }
  736. static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
  737. static ssize_t delete_child(struct class_device *cdev,
  738. const char *buf, size_t count)
  739. {
  740. int pkey;
  741. int ret;
  742. if (sscanf(buf, "%i", &pkey) != 1)
  743. return -EINVAL;
  744. if (pkey < 0 || pkey > 0xffff)
  745. return -EINVAL;
  746. ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
  747. pkey);
  748. return ret ? ret : count;
  749. }
  750. static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
  751. int ipoib_add_pkey_attr(struct net_device *dev)
  752. {
  753. return class_device_create_file(&dev->class_dev,
  754. &class_device_attr_pkey);
  755. }
  756. static struct net_device *ipoib_add_port(const char *format,
  757. struct ib_device *hca, u8 port)
  758. {
  759. struct ipoib_dev_priv *priv;
  760. int result = -ENOMEM;
  761. priv = ipoib_intf_alloc(format);
  762. if (!priv)
  763. goto alloc_mem_failed;
  764. SET_NETDEV_DEV(priv->dev, hca->dma_device);
  765. result = ib_query_pkey(hca, port, 0, &priv->pkey);
  766. if (result) {
  767. printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
  768. hca->name, port, result);
  769. goto alloc_mem_failed;
  770. }
  771. /*
  772. * Set the full membership bit, so that we join the right
  773. * broadcast group, etc.
  774. */
  775. priv->pkey |= 0x8000;
  776. priv->dev->broadcast[8] = priv->pkey >> 8;
  777. priv->dev->broadcast[9] = priv->pkey & 0xff;
  778. result = ib_query_gid(hca, port, 0, &priv->local_gid);
  779. if (result) {
  780. printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
  781. hca->name, port, result);
  782. goto alloc_mem_failed;
  783. } else
  784. memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
  785. result = ipoib_dev_init(priv->dev, hca, port);
  786. if (result < 0) {
  787. printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
  788. hca->name, port, result);
  789. goto device_init_failed;
  790. }
  791. INIT_IB_EVENT_HANDLER(&priv->event_handler,
  792. priv->ca, ipoib_event);
  793. result = ib_register_event_handler(&priv->event_handler);
  794. if (result < 0) {
  795. printk(KERN_WARNING "%s: ib_register_event_handler failed for "
  796. "port %d (ret = %d)\n",
  797. hca->name, port, result);
  798. goto event_failed;
  799. }
  800. result = register_netdev(priv->dev);
  801. if (result) {
  802. printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
  803. hca->name, port, result);
  804. goto register_failed;
  805. }
  806. ipoib_create_debug_files(priv->dev);
  807. if (ipoib_add_pkey_attr(priv->dev))
  808. goto sysfs_failed;
  809. if (class_device_create_file(&priv->dev->class_dev,
  810. &class_device_attr_create_child))
  811. goto sysfs_failed;
  812. if (class_device_create_file(&priv->dev->class_dev,
  813. &class_device_attr_delete_child))
  814. goto sysfs_failed;
  815. return priv->dev;
  816. sysfs_failed:
  817. ipoib_delete_debug_files(priv->dev);
  818. unregister_netdev(priv->dev);
  819. register_failed:
  820. ib_unregister_event_handler(&priv->event_handler);
  821. flush_scheduled_work();
  822. event_failed:
  823. ipoib_dev_cleanup(priv->dev);
  824. device_init_failed:
  825. free_netdev(priv->dev);
  826. alloc_mem_failed:
  827. return ERR_PTR(result);
  828. }
  829. static void ipoib_add_one(struct ib_device *device)
  830. {
  831. struct list_head *dev_list;
  832. struct net_device *dev;
  833. struct ipoib_dev_priv *priv;
  834. int s, e, p;
  835. dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
  836. if (!dev_list)
  837. return;
  838. INIT_LIST_HEAD(dev_list);
  839. if (device->node_type == IB_NODE_SWITCH) {
  840. s = 0;
  841. e = 0;
  842. } else {
  843. s = 1;
  844. e = device->phys_port_cnt;
  845. }
  846. for (p = s; p <= e; ++p) {
  847. dev = ipoib_add_port("ib%d", device, p);
  848. if (!IS_ERR(dev)) {
  849. priv = netdev_priv(dev);
  850. list_add_tail(&priv->list, dev_list);
  851. }
  852. }
  853. ib_set_client_data(device, &ipoib_client, dev_list);
  854. }
  855. static void ipoib_remove_one(struct ib_device *device)
  856. {
  857. struct ipoib_dev_priv *priv, *tmp;
  858. struct list_head *dev_list;
  859. dev_list = ib_get_client_data(device, &ipoib_client);
  860. list_for_each_entry_safe(priv, tmp, dev_list, list) {
  861. ib_unregister_event_handler(&priv->event_handler);
  862. flush_scheduled_work();
  863. unregister_netdev(priv->dev);
  864. ipoib_dev_cleanup(priv->dev);
  865. free_netdev(priv->dev);
  866. }
  867. kfree(dev_list);
  868. }
  869. static int __init ipoib_init_module(void)
  870. {
  871. int ret;
  872. ret = ipoib_register_debugfs();
  873. if (ret)
  874. return ret;
  875. /*
  876. * We create our own workqueue mainly because we want to be
  877. * able to flush it when devices are being removed. We can't
  878. * use schedule_work()/flush_scheduled_work() because both
  879. * unregister_netdev() and linkwatch_event take the rtnl lock,
  880. * so flush_scheduled_work() can deadlock during device
  881. * removal.
  882. */
  883. ipoib_workqueue = create_singlethread_workqueue("ipoib");
  884. if (!ipoib_workqueue) {
  885. ret = -ENOMEM;
  886. goto err_fs;
  887. }
  888. ret = ib_register_client(&ipoib_client);
  889. if (ret)
  890. goto err_wq;
  891. return 0;
  892. err_wq:
  893. destroy_workqueue(ipoib_workqueue);
  894. err_fs:
  895. ipoib_unregister_debugfs();
  896. return ret;
  897. }
  898. static void __exit ipoib_cleanup_module(void)
  899. {
  900. ib_unregister_client(&ipoib_client);
  901. ipoib_unregister_debugfs();
  902. destroy_workqueue(ipoib_workqueue);
  903. }
  904. module_init(ipoib_init_module);
  905. module_exit(ipoib_cleanup_module);