hard-interface.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "hard-interface.h"
  23. #include "soft-interface.h"
  24. #include "send.h"
  25. #include "translation-table.h"
  26. #include "routing.h"
  27. #include "bat_sysfs.h"
  28. #include "originator.h"
  29. #include "hash.h"
  30. #include "bridge_loop_avoidance.h"
  31. #include <linux/if_arp.h>
  32. static int batman_skb_recv(struct sk_buff *skb,
  33. struct net_device *dev,
  34. struct packet_type *ptype,
  35. struct net_device *orig_dev);
  36. void hardif_free_rcu(struct rcu_head *rcu)
  37. {
  38. struct hard_iface *hard_iface;
  39. hard_iface = container_of(rcu, struct hard_iface, rcu);
  40. dev_put(hard_iface->net_dev);
  41. kfree(hard_iface);
  42. }
  43. struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
  44. {
  45. struct hard_iface *hard_iface;
  46. rcu_read_lock();
  47. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  48. if (hard_iface->net_dev == net_dev &&
  49. atomic_inc_not_zero(&hard_iface->refcount))
  50. goto out;
  51. }
  52. hard_iface = NULL;
  53. out:
  54. rcu_read_unlock();
  55. return hard_iface;
  56. }
  57. static int is_valid_iface(const struct net_device *net_dev)
  58. {
  59. if (net_dev->flags & IFF_LOOPBACK)
  60. return 0;
  61. if (net_dev->type != ARPHRD_ETHER)
  62. return 0;
  63. if (net_dev->addr_len != ETH_ALEN)
  64. return 0;
  65. /* no batman over batman */
  66. if (softif_is_valid(net_dev))
  67. return 0;
  68. /* Device is being bridged */
  69. /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
  70. return 0; */
  71. return 1;
  72. }
  73. static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
  74. {
  75. struct hard_iface *hard_iface;
  76. rcu_read_lock();
  77. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  78. if (hard_iface->soft_iface != soft_iface)
  79. continue;
  80. if (hard_iface->if_status == IF_ACTIVE &&
  81. atomic_inc_not_zero(&hard_iface->refcount))
  82. goto out;
  83. }
  84. hard_iface = NULL;
  85. out:
  86. rcu_read_unlock();
  87. return hard_iface;
  88. }
  89. static void primary_if_update_addr(struct bat_priv *bat_priv,
  90. struct hard_iface *oldif)
  91. {
  92. struct vis_packet *vis_packet;
  93. struct hard_iface *primary_if;
  94. primary_if = primary_if_get_selected(bat_priv);
  95. if (!primary_if)
  96. goto out;
  97. vis_packet = (struct vis_packet *)
  98. bat_priv->my_vis_info->skb_packet->data;
  99. memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  100. memcpy(vis_packet->sender_orig,
  101. primary_if->net_dev->dev_addr, ETH_ALEN);
  102. bla_update_orig_address(bat_priv, primary_if, oldif);
  103. out:
  104. if (primary_if)
  105. hardif_free_ref(primary_if);
  106. }
  107. static void primary_if_select(struct bat_priv *bat_priv,
  108. struct hard_iface *new_hard_iface)
  109. {
  110. struct hard_iface *curr_hard_iface;
  111. ASSERT_RTNL();
  112. if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
  113. new_hard_iface = NULL;
  114. curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
  115. rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
  116. if (!new_hard_iface)
  117. goto out;
  118. bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
  119. primary_if_update_addr(bat_priv, curr_hard_iface);
  120. out:
  121. if (curr_hard_iface)
  122. hardif_free_ref(curr_hard_iface);
  123. }
  124. static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
  125. {
  126. if (hard_iface->net_dev->flags & IFF_UP)
  127. return true;
  128. return false;
  129. }
  130. static void check_known_mac_addr(const struct net_device *net_dev)
  131. {
  132. const struct hard_iface *hard_iface;
  133. rcu_read_lock();
  134. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  135. if ((hard_iface->if_status != IF_ACTIVE) &&
  136. (hard_iface->if_status != IF_TO_BE_ACTIVATED))
  137. continue;
  138. if (hard_iface->net_dev == net_dev)
  139. continue;
  140. if (!compare_eth(hard_iface->net_dev->dev_addr,
  141. net_dev->dev_addr))
  142. continue;
  143. pr_warning("The newly added mac address (%pM) already exists on: %s\n",
  144. net_dev->dev_addr, hard_iface->net_dev->name);
  145. pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n");
  146. }
  147. rcu_read_unlock();
  148. }
  149. int hardif_min_mtu(struct net_device *soft_iface)
  150. {
  151. const struct bat_priv *bat_priv = netdev_priv(soft_iface);
  152. const struct hard_iface *hard_iface;
  153. /* allow big frames if all devices are capable to do so
  154. * (have MTU > 1500 + BAT_HEADER_LEN) */
  155. int min_mtu = ETH_DATA_LEN;
  156. if (atomic_read(&bat_priv->fragmentation))
  157. goto out;
  158. rcu_read_lock();
  159. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  160. if ((hard_iface->if_status != IF_ACTIVE) &&
  161. (hard_iface->if_status != IF_TO_BE_ACTIVATED))
  162. continue;
  163. if (hard_iface->soft_iface != soft_iface)
  164. continue;
  165. min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
  166. min_mtu);
  167. }
  168. rcu_read_unlock();
  169. out:
  170. return min_mtu;
  171. }
  172. /* adjusts the MTU if a new interface with a smaller MTU appeared. */
  173. void update_min_mtu(struct net_device *soft_iface)
  174. {
  175. int min_mtu;
  176. min_mtu = hardif_min_mtu(soft_iface);
  177. if (soft_iface->mtu != min_mtu)
  178. soft_iface->mtu = min_mtu;
  179. }
  180. static void hardif_activate_interface(struct hard_iface *hard_iface)
  181. {
  182. struct bat_priv *bat_priv;
  183. struct hard_iface *primary_if = NULL;
  184. if (hard_iface->if_status != IF_INACTIVE)
  185. goto out;
  186. bat_priv = netdev_priv(hard_iface->soft_iface);
  187. bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
  188. hard_iface->if_status = IF_TO_BE_ACTIVATED;
  189. /**
  190. * the first active interface becomes our primary interface or
  191. * the next active interface after the old primary interface was removed
  192. */
  193. primary_if = primary_if_get_selected(bat_priv);
  194. if (!primary_if)
  195. primary_if_select(bat_priv, hard_iface);
  196. bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
  197. hard_iface->net_dev->name);
  198. update_min_mtu(hard_iface->soft_iface);
  199. out:
  200. if (primary_if)
  201. hardif_free_ref(primary_if);
  202. }
  203. static void hardif_deactivate_interface(struct hard_iface *hard_iface)
  204. {
  205. if ((hard_iface->if_status != IF_ACTIVE) &&
  206. (hard_iface->if_status != IF_TO_BE_ACTIVATED))
  207. return;
  208. hard_iface->if_status = IF_INACTIVE;
  209. bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
  210. hard_iface->net_dev->name);
  211. update_min_mtu(hard_iface->soft_iface);
  212. }
  213. int hardif_enable_interface(struct hard_iface *hard_iface,
  214. const char *iface_name)
  215. {
  216. struct bat_priv *bat_priv;
  217. struct net_device *soft_iface;
  218. int ret;
  219. if (hard_iface->if_status != IF_NOT_IN_USE)
  220. goto out;
  221. if (!atomic_inc_not_zero(&hard_iface->refcount))
  222. goto out;
  223. /* hard-interface is part of a bridge */
  224. if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
  225. pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n",
  226. hard_iface->net_dev->name);
  227. soft_iface = dev_get_by_name(&init_net, iface_name);
  228. if (!soft_iface) {
  229. soft_iface = softif_create(iface_name);
  230. if (!soft_iface) {
  231. ret = -ENOMEM;
  232. goto err;
  233. }
  234. /* dev_get_by_name() increases the reference counter for us */
  235. dev_hold(soft_iface);
  236. }
  237. if (!softif_is_valid(soft_iface)) {
  238. pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
  239. soft_iface->name);
  240. ret = -EINVAL;
  241. goto err_dev;
  242. }
  243. hard_iface->soft_iface = soft_iface;
  244. bat_priv = netdev_priv(hard_iface->soft_iface);
  245. ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
  246. if (ret < 0) {
  247. ret = -ENOMEM;
  248. goto err_dev;
  249. }
  250. hard_iface->if_num = bat_priv->num_ifaces;
  251. bat_priv->num_ifaces++;
  252. hard_iface->if_status = IF_INACTIVE;
  253. orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
  254. hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
  255. hard_iface->batman_adv_ptype.func = batman_skb_recv;
  256. hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
  257. dev_add_pack(&hard_iface->batman_adv_ptype);
  258. atomic_set(&hard_iface->frag_seqno, 1);
  259. bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
  260. hard_iface->net_dev->name);
  261. if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
  262. ETH_DATA_LEN + BAT_HEADER_LEN)
  263. bat_info(hard_iface->soft_iface,
  264. "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
  265. hard_iface->net_dev->name, hard_iface->net_dev->mtu,
  266. ETH_DATA_LEN + BAT_HEADER_LEN);
  267. if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
  268. ETH_DATA_LEN + BAT_HEADER_LEN)
  269. bat_info(hard_iface->soft_iface,
  270. "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
  271. hard_iface->net_dev->name, hard_iface->net_dev->mtu,
  272. ETH_DATA_LEN + BAT_HEADER_LEN);
  273. if (hardif_is_iface_up(hard_iface))
  274. hardif_activate_interface(hard_iface);
  275. else
  276. bat_err(hard_iface->soft_iface,
  277. "Not using interface %s (retrying later): interface not active\n",
  278. hard_iface->net_dev->name);
  279. /* begin scheduling originator messages on that interface */
  280. schedule_bat_ogm(hard_iface);
  281. out:
  282. return 0;
  283. err_dev:
  284. dev_put(soft_iface);
  285. err:
  286. hardif_free_ref(hard_iface);
  287. return ret;
  288. }
  289. void hardif_disable_interface(struct hard_iface *hard_iface)
  290. {
  291. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  292. struct hard_iface *primary_if = NULL;
  293. if (hard_iface->if_status == IF_ACTIVE)
  294. hardif_deactivate_interface(hard_iface);
  295. if (hard_iface->if_status != IF_INACTIVE)
  296. goto out;
  297. bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
  298. hard_iface->net_dev->name);
  299. dev_remove_pack(&hard_iface->batman_adv_ptype);
  300. bat_priv->num_ifaces--;
  301. orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
  302. primary_if = primary_if_get_selected(bat_priv);
  303. if (hard_iface == primary_if) {
  304. struct hard_iface *new_if;
  305. new_if = hardif_get_active(hard_iface->soft_iface);
  306. primary_if_select(bat_priv, new_if);
  307. if (new_if)
  308. hardif_free_ref(new_if);
  309. }
  310. bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
  311. hard_iface->if_status = IF_NOT_IN_USE;
  312. /* delete all references to this hard_iface */
  313. purge_orig_ref(bat_priv);
  314. purge_outstanding_packets(bat_priv, hard_iface);
  315. dev_put(hard_iface->soft_iface);
  316. /* nobody uses this interface anymore */
  317. if (!bat_priv->num_ifaces)
  318. softif_destroy(hard_iface->soft_iface);
  319. hard_iface->soft_iface = NULL;
  320. hardif_free_ref(hard_iface);
  321. out:
  322. if (primary_if)
  323. hardif_free_ref(primary_if);
  324. }
  325. static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
  326. {
  327. struct hard_iface *hard_iface;
  328. int ret;
  329. ASSERT_RTNL();
  330. ret = is_valid_iface(net_dev);
  331. if (ret != 1)
  332. goto out;
  333. dev_hold(net_dev);
  334. hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
  335. if (!hard_iface)
  336. goto release_dev;
  337. ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
  338. if (ret)
  339. goto free_if;
  340. hard_iface->if_num = -1;
  341. hard_iface->net_dev = net_dev;
  342. hard_iface->soft_iface = NULL;
  343. hard_iface->if_status = IF_NOT_IN_USE;
  344. INIT_LIST_HEAD(&hard_iface->list);
  345. /* extra reference for return */
  346. atomic_set(&hard_iface->refcount, 2);
  347. check_known_mac_addr(hard_iface->net_dev);
  348. list_add_tail_rcu(&hard_iface->list, &hardif_list);
  349. /**
  350. * This can't be called via a bat_priv callback because
  351. * we have no bat_priv yet.
  352. */
  353. atomic_set(&hard_iface->seqno, 1);
  354. hard_iface->packet_buff = NULL;
  355. return hard_iface;
  356. free_if:
  357. kfree(hard_iface);
  358. release_dev:
  359. dev_put(net_dev);
  360. out:
  361. return NULL;
  362. }
  363. static void hardif_remove_interface(struct hard_iface *hard_iface)
  364. {
  365. ASSERT_RTNL();
  366. /* first deactivate interface */
  367. if (hard_iface->if_status != IF_NOT_IN_USE)
  368. hardif_disable_interface(hard_iface);
  369. if (hard_iface->if_status != IF_NOT_IN_USE)
  370. return;
  371. hard_iface->if_status = IF_TO_BE_REMOVED;
  372. sysfs_del_hardif(&hard_iface->hardif_obj);
  373. hardif_free_ref(hard_iface);
  374. }
  375. void hardif_remove_interfaces(void)
  376. {
  377. struct hard_iface *hard_iface, *hard_iface_tmp;
  378. rtnl_lock();
  379. list_for_each_entry_safe(hard_iface, hard_iface_tmp,
  380. &hardif_list, list) {
  381. list_del_rcu(&hard_iface->list);
  382. hardif_remove_interface(hard_iface);
  383. }
  384. rtnl_unlock();
  385. }
  386. static int hard_if_event(struct notifier_block *this,
  387. unsigned long event, void *ptr)
  388. {
  389. struct net_device *net_dev = ptr;
  390. struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
  391. struct hard_iface *primary_if = NULL;
  392. struct bat_priv *bat_priv;
  393. if (!hard_iface && event == NETDEV_REGISTER)
  394. hard_iface = hardif_add_interface(net_dev);
  395. if (!hard_iface)
  396. goto out;
  397. switch (event) {
  398. case NETDEV_UP:
  399. hardif_activate_interface(hard_iface);
  400. break;
  401. case NETDEV_GOING_DOWN:
  402. case NETDEV_DOWN:
  403. hardif_deactivate_interface(hard_iface);
  404. break;
  405. case NETDEV_UNREGISTER:
  406. list_del_rcu(&hard_iface->list);
  407. hardif_remove_interface(hard_iface);
  408. break;
  409. case NETDEV_CHANGEMTU:
  410. if (hard_iface->soft_iface)
  411. update_min_mtu(hard_iface->soft_iface);
  412. break;
  413. case NETDEV_CHANGEADDR:
  414. if (hard_iface->if_status == IF_NOT_IN_USE)
  415. goto hardif_put;
  416. check_known_mac_addr(hard_iface->net_dev);
  417. bat_priv = netdev_priv(hard_iface->soft_iface);
  418. bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface);
  419. primary_if = primary_if_get_selected(bat_priv);
  420. if (!primary_if)
  421. goto hardif_put;
  422. if (hard_iface == primary_if)
  423. primary_if_update_addr(bat_priv, NULL);
  424. break;
  425. default:
  426. break;
  427. }
  428. hardif_put:
  429. hardif_free_ref(hard_iface);
  430. out:
  431. if (primary_if)
  432. hardif_free_ref(primary_if);
  433. return NOTIFY_DONE;
  434. }
  435. /* incoming packets with the batman ethertype received on any active hard
  436. * interface */
  437. static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
  438. struct packet_type *ptype,
  439. struct net_device *orig_dev)
  440. {
  441. struct bat_priv *bat_priv;
  442. struct batman_ogm_packet *batman_ogm_packet;
  443. struct hard_iface *hard_iface;
  444. int ret;
  445. hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
  446. skb = skb_share_check(skb, GFP_ATOMIC);
  447. /* skb was released by skb_share_check() */
  448. if (!skb)
  449. goto err_out;
  450. /* packet should hold at least type and version */
  451. if (unlikely(!pskb_may_pull(skb, 2)))
  452. goto err_free;
  453. /* expect a valid ethernet header here. */
  454. if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
  455. goto err_free;
  456. if (!hard_iface->soft_iface)
  457. goto err_free;
  458. bat_priv = netdev_priv(hard_iface->soft_iface);
  459. if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
  460. goto err_free;
  461. /* discard frames on not active interfaces */
  462. if (hard_iface->if_status != IF_ACTIVE)
  463. goto err_free;
  464. batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
  465. if (batman_ogm_packet->header.version != COMPAT_VERSION) {
  466. bat_dbg(DBG_BATMAN, bat_priv,
  467. "Drop packet: incompatible batman version (%i)\n",
  468. batman_ogm_packet->header.version);
  469. goto err_free;
  470. }
  471. /* all receive handlers return whether they received or reused
  472. * the supplied skb. if not, we have to free the skb. */
  473. switch (batman_ogm_packet->header.packet_type) {
  474. /* batman originator packet */
  475. case BAT_IV_OGM:
  476. ret = recv_bat_ogm_packet(skb, hard_iface);
  477. break;
  478. /* batman icmp packet */
  479. case BAT_ICMP:
  480. ret = recv_icmp_packet(skb, hard_iface);
  481. break;
  482. /* unicast packet */
  483. case BAT_UNICAST:
  484. ret = recv_unicast_packet(skb, hard_iface);
  485. break;
  486. /* fragmented unicast packet */
  487. case BAT_UNICAST_FRAG:
  488. ret = recv_ucast_frag_packet(skb, hard_iface);
  489. break;
  490. /* broadcast packet */
  491. case BAT_BCAST:
  492. ret = recv_bcast_packet(skb, hard_iface);
  493. break;
  494. /* vis packet */
  495. case BAT_VIS:
  496. ret = recv_vis_packet(skb, hard_iface);
  497. break;
  498. /* Translation table query (request or response) */
  499. case BAT_TT_QUERY:
  500. ret = recv_tt_query(skb, hard_iface);
  501. break;
  502. /* Roaming advertisement */
  503. case BAT_ROAM_ADV:
  504. ret = recv_roam_adv(skb, hard_iface);
  505. break;
  506. default:
  507. ret = NET_RX_DROP;
  508. }
  509. if (ret == NET_RX_DROP)
  510. kfree_skb(skb);
  511. /* return NET_RX_SUCCESS in any case as we
  512. * most probably dropped the packet for
  513. * routing-logical reasons. */
  514. return NET_RX_SUCCESS;
  515. err_free:
  516. kfree_skb(skb);
  517. err_out:
  518. return NET_RX_DROP;
  519. }
  520. /* This function returns true if the interface represented by ifindex is a
  521. * 802.11 wireless device */
  522. bool is_wifi_iface(int ifindex)
  523. {
  524. struct net_device *net_device = NULL;
  525. bool ret = false;
  526. if (ifindex == NULL_IFINDEX)
  527. goto out;
  528. net_device = dev_get_by_index(&init_net, ifindex);
  529. if (!net_device)
  530. goto out;
  531. #ifdef CONFIG_WIRELESS_EXT
  532. /* pre-cfg80211 drivers have to implement WEXT, so it is possible to
  533. * check for wireless_handlers != NULL */
  534. if (net_device->wireless_handlers)
  535. ret = true;
  536. else
  537. #endif
  538. /* cfg80211 drivers have to set ieee80211_ptr */
  539. if (net_device->ieee80211_ptr)
  540. ret = true;
  541. out:
  542. if (net_device)
  543. dev_put(net_device);
  544. return ret;
  545. }
  546. struct notifier_block hard_if_notifier = {
  547. .notifier_call = hard_if_event,
  548. };