link_watch.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * Linux network device link state notification
  3. *
  4. * Author:
  5. * Stefan Rompf <sux@loplof.de>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/if.h>
  16. #include <net/sock.h>
  17. #include <net/pkt_sched.h>
  18. #include <linux/rtnetlink.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/bitops.h>
  23. #include <asm/types.h>
  24. enum lw_bits {
  25. LW_URGENT = 0,
  26. };
  27. static unsigned long linkwatch_flags;
  28. static unsigned long linkwatch_nextevent;
  29. static void linkwatch_event(struct work_struct *dummy);
  30. static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
  31. static LIST_HEAD(lweventlist);
  32. static DEFINE_SPINLOCK(lweventlist_lock);
  33. static unsigned char default_operstate(const struct net_device *dev)
  34. {
  35. if (!netif_carrier_ok(dev))
  36. return (dev->ifindex != dev->iflink ?
  37. IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
  38. if (netif_dormant(dev))
  39. return IF_OPER_DORMANT;
  40. return IF_OPER_UP;
  41. }
  42. static void rfc2863_policy(struct net_device *dev)
  43. {
  44. unsigned char operstate = default_operstate(dev);
  45. if (operstate == dev->operstate)
  46. return;
  47. write_lock_bh(&dev_base_lock);
  48. switch(dev->link_mode) {
  49. case IF_LINK_MODE_DORMANT:
  50. if (operstate == IF_OPER_UP)
  51. operstate = IF_OPER_DORMANT;
  52. break;
  53. case IF_LINK_MODE_DEFAULT:
  54. default:
  55. break;
  56. }
  57. dev->operstate = operstate;
  58. write_unlock_bh(&dev_base_lock);
  59. }
  60. static bool linkwatch_urgent_event(struct net_device *dev)
  61. {
  62. return netif_running(dev) && netif_carrier_ok(dev) &&
  63. qdisc_tx_changing(dev);
  64. }
  65. static void linkwatch_add_event(struct net_device *dev)
  66. {
  67. unsigned long flags;
  68. spin_lock_irqsave(&lweventlist_lock, flags);
  69. if (list_empty(&dev->link_watch_list)) {
  70. list_add_tail(&dev->link_watch_list, &lweventlist);
  71. dev_hold(dev);
  72. }
  73. spin_unlock_irqrestore(&lweventlist_lock, flags);
  74. }
  75. static void linkwatch_schedule_work(int urgent)
  76. {
  77. unsigned long delay = linkwatch_nextevent - jiffies;
  78. if (test_bit(LW_URGENT, &linkwatch_flags))
  79. return;
  80. /* Minimise down-time: drop delay for up event. */
  81. if (urgent) {
  82. if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
  83. return;
  84. delay = 0;
  85. }
  86. /* If we wrap around we'll delay it by at most HZ. */
  87. if (delay > HZ)
  88. delay = 0;
  89. /*
  90. * This is true if we've scheduled it immeditately or if we don't
  91. * need an immediate execution and it's already pending.
  92. */
  93. if (schedule_delayed_work(&linkwatch_work, delay) == !delay)
  94. return;
  95. /* Don't bother if there is nothing urgent. */
  96. if (!test_bit(LW_URGENT, &linkwatch_flags))
  97. return;
  98. /* It's already running which is good enough. */
  99. if (!__cancel_delayed_work(&linkwatch_work))
  100. return;
  101. /* Otherwise we reschedule it again for immediate execution. */
  102. schedule_delayed_work(&linkwatch_work, 0);
  103. }
  104. static void linkwatch_do_dev(struct net_device *dev)
  105. {
  106. /*
  107. * Make sure the above read is complete since it can be
  108. * rewritten as soon as we clear the bit below.
  109. */
  110. smp_mb__before_clear_bit();
  111. /* We are about to handle this device,
  112. * so new events can be accepted
  113. */
  114. clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
  115. rfc2863_policy(dev);
  116. if (dev->flags & IFF_UP) {
  117. if (netif_carrier_ok(dev))
  118. dev_activate(dev);
  119. else
  120. dev_deactivate(dev);
  121. netdev_state_change(dev);
  122. }
  123. dev_put(dev);
  124. }
  125. static void __linkwatch_run_queue(int urgent_only)
  126. {
  127. struct net_device *dev;
  128. LIST_HEAD(wrk);
  129. /*
  130. * Limit the number of linkwatch events to one
  131. * per second so that a runaway driver does not
  132. * cause a storm of messages on the netlink
  133. * socket. This limit does not apply to up events
  134. * while the device qdisc is down.
  135. */
  136. if (!urgent_only)
  137. linkwatch_nextevent = jiffies + HZ;
  138. /* Limit wrap-around effect on delay. */
  139. else if (time_after(linkwatch_nextevent, jiffies + HZ))
  140. linkwatch_nextevent = jiffies;
  141. clear_bit(LW_URGENT, &linkwatch_flags);
  142. spin_lock_irq(&lweventlist_lock);
  143. list_splice_init(&lweventlist, &wrk);
  144. while (!list_empty(&wrk)) {
  145. dev = list_first_entry(&wrk, struct net_device, link_watch_list);
  146. list_del_init(&dev->link_watch_list);
  147. if (urgent_only && !linkwatch_urgent_event(dev)) {
  148. list_add_tail(&dev->link_watch_list, &lweventlist);
  149. continue;
  150. }
  151. spin_unlock_irq(&lweventlist_lock);
  152. linkwatch_do_dev(dev);
  153. spin_lock_irq(&lweventlist_lock);
  154. }
  155. if (!list_empty(&lweventlist))
  156. linkwatch_schedule_work(0);
  157. spin_unlock_irq(&lweventlist_lock);
  158. }
  159. void linkwatch_forget_dev(struct net_device *dev)
  160. {
  161. unsigned long flags;
  162. int clean = 0;
  163. spin_lock_irqsave(&lweventlist_lock, flags);
  164. if (!list_empty(&dev->link_watch_list)) {
  165. list_del_init(&dev->link_watch_list);
  166. clean = 1;
  167. }
  168. spin_unlock_irqrestore(&lweventlist_lock, flags);
  169. if (clean)
  170. linkwatch_do_dev(dev);
  171. }
  172. /* Must be called with the rtnl semaphore held */
  173. void linkwatch_run_queue(void)
  174. {
  175. __linkwatch_run_queue(0);
  176. }
  177. static void linkwatch_event(struct work_struct *dummy)
  178. {
  179. rtnl_lock();
  180. __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
  181. rtnl_unlock();
  182. }
  183. void linkwatch_fire_event(struct net_device *dev)
  184. {
  185. bool urgent = linkwatch_urgent_event(dev);
  186. if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
  187. linkwatch_add_event(dev);
  188. } else if (!urgent)
  189. return;
  190. linkwatch_schedule_work(urgent);
  191. }
  192. EXPORT_SYMBOL(linkwatch_fire_event);