link_watch.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. /*
  2. * Linux network device link state notification
  3. *
  4. * Author:
  5. * Stefan Rompf <sux@loplof.de>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/if.h>
  16. #include <net/sock.h>
  17. #include <net/pkt_sched.h>
  18. #include <linux/rtnetlink.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/slab.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/bitops.h>
  24. #include <asm/types.h>
  25. enum lw_bits {
  26. LW_URGENT = 0,
  27. };
  28. static unsigned long linkwatch_flags;
  29. static unsigned long linkwatch_nextevent;
  30. static void linkwatch_event(struct work_struct *dummy);
  31. static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
  32. static LIST_HEAD(lweventlist);
  33. static DEFINE_SPINLOCK(lweventlist_lock);
  34. static unsigned char default_operstate(const struct net_device *dev)
  35. {
  36. if (!netif_carrier_ok(dev))
  37. return (dev->ifindex != dev->iflink ?
  38. IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
  39. if (netif_dormant(dev))
  40. return IF_OPER_DORMANT;
  41. return IF_OPER_UP;
  42. }
  43. static void rfc2863_policy(struct net_device *dev)
  44. {
  45. unsigned char operstate = default_operstate(dev);
  46. if (operstate == dev->operstate)
  47. return;
  48. write_lock_bh(&dev_base_lock);
  49. switch(dev->link_mode) {
  50. case IF_LINK_MODE_DORMANT:
  51. if (operstate == IF_OPER_UP)
  52. operstate = IF_OPER_DORMANT;
  53. break;
  54. case IF_LINK_MODE_DEFAULT:
  55. default:
  56. break;
  57. }
  58. dev->operstate = operstate;
  59. write_unlock_bh(&dev_base_lock);
  60. }
  61. static bool linkwatch_urgent_event(struct net_device *dev)
  62. {
  63. return netif_running(dev) && netif_carrier_ok(dev) &&
  64. qdisc_tx_changing(dev);
  65. }
  66. static void linkwatch_add_event(struct net_device *dev)
  67. {
  68. unsigned long flags;
  69. spin_lock_irqsave(&lweventlist_lock, flags);
  70. if (list_empty(&dev->link_watch_list)) {
  71. list_add_tail(&dev->link_watch_list, &lweventlist);
  72. dev_hold(dev);
  73. }
  74. spin_unlock_irqrestore(&lweventlist_lock, flags);
  75. }
  76. static void linkwatch_schedule_work(int urgent)
  77. {
  78. unsigned long delay = linkwatch_nextevent - jiffies;
  79. if (test_bit(LW_URGENT, &linkwatch_flags))
  80. return;
  81. /* Minimise down-time: drop delay for up event. */
  82. if (urgent) {
  83. if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
  84. return;
  85. delay = 0;
  86. }
  87. /* If we wrap around we'll delay it by at most HZ. */
  88. if (delay > HZ)
  89. delay = 0;
  90. /*
  91. * This is true if we've scheduled it immeditately or if we don't
  92. * need an immediate execution and it's already pending.
  93. */
  94. if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
  95. return;
  96. /* Don't bother if there is nothing urgent. */
  97. if (!test_bit(LW_URGENT, &linkwatch_flags))
  98. return;
  99. /* It's already running which is good enough. */
  100. if (!cancel_delayed_work(&linkwatch_work))
  101. return;
  102. /* Otherwise we reschedule it again for immediate exection. */
  103. schedule_delayed_work(&linkwatch_work, 0);
  104. }
  105. static void linkwatch_do_dev(struct net_device *dev)
  106. {
  107. /*
  108. * Make sure the above read is complete since it can be
  109. * rewritten as soon as we clear the bit below.
  110. */
  111. smp_mb__before_clear_bit();
  112. /* We are about to handle this device,
  113. * so new events can be accepted
  114. */
  115. clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
  116. rfc2863_policy(dev);
  117. if (dev->flags & IFF_UP) {
  118. if (netif_carrier_ok(dev))
  119. dev_activate(dev);
  120. else
  121. dev_deactivate(dev);
  122. netdev_state_change(dev);
  123. }
  124. dev_put(dev);
  125. }
  126. static void __linkwatch_run_queue(int urgent_only)
  127. {
  128. struct net_device *dev;
  129. LIST_HEAD(wrk);
  130. /*
  131. * Limit the number of linkwatch events to one
  132. * per second so that a runaway driver does not
  133. * cause a storm of messages on the netlink
  134. * socket. This limit does not apply to up events
  135. * while the device qdisc is down.
  136. */
  137. if (!urgent_only)
  138. linkwatch_nextevent = jiffies + HZ;
  139. /* Limit wrap-around effect on delay. */
  140. else if (time_after(linkwatch_nextevent, jiffies + HZ))
  141. linkwatch_nextevent = jiffies;
  142. clear_bit(LW_URGENT, &linkwatch_flags);
  143. spin_lock_irq(&lweventlist_lock);
  144. list_splice_init(&lweventlist, &wrk);
  145. while (!list_empty(&wrk)) {
  146. dev = list_first_entry(&wrk, struct net_device, link_watch_list);
  147. list_del_init(&dev->link_watch_list);
  148. if (urgent_only && !linkwatch_urgent_event(dev)) {
  149. list_add_tail(&dev->link_watch_list, &lweventlist);
  150. continue;
  151. }
  152. spin_unlock_irq(&lweventlist_lock);
  153. linkwatch_do_dev(dev);
  154. spin_lock_irq(&lweventlist_lock);
  155. }
  156. if (!list_empty(&lweventlist))
  157. linkwatch_schedule_work(0);
  158. spin_unlock_irq(&lweventlist_lock);
  159. }
  160. void linkwatch_forget_dev(struct net_device *dev)
  161. {
  162. unsigned long flags;
  163. int clean = 0;
  164. spin_lock_irqsave(&lweventlist_lock, flags);
  165. if (!list_empty(&dev->link_watch_list)) {
  166. list_del_init(&dev->link_watch_list);
  167. clean = 1;
  168. }
  169. spin_unlock_irqrestore(&lweventlist_lock, flags);
  170. if (clean)
  171. linkwatch_do_dev(dev);
  172. }
  173. /* Must be called with the rtnl semaphore held */
  174. void linkwatch_run_queue(void)
  175. {
  176. __linkwatch_run_queue(0);
  177. }
  178. static void linkwatch_event(struct work_struct *dummy)
  179. {
  180. rtnl_lock();
  181. __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
  182. rtnl_unlock();
  183. }
  184. void linkwatch_fire_event(struct net_device *dev)
  185. {
  186. bool urgent = linkwatch_urgent_event(dev);
  187. if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
  188. linkwatch_add_event(dev);
  189. } else if (!urgent)
  190. return;
  191. linkwatch_schedule_work(urgent);
  192. }
  193. EXPORT_SYMBOL(linkwatch_fire_event);