netpoll.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /*
  2. * Common code for low-level network console, dump, and debugger code
  3. *
  4. * Derived from netconsole, kgdb-over-ethernet, and netdump patches
  5. */
  6. #ifndef _LINUX_NETPOLL_H
  7. #define _LINUX_NETPOLL_H
  8. #include <linux/netdevice.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/rcupdate.h>
  11. #include <linux/list.h>
  12. struct netpoll {
  13. struct net_device *dev;
  14. struct net_device *real_dev;
  15. char dev_name[IFNAMSIZ];
  16. const char *name;
  17. void (*rx_hook)(struct netpoll *, int, char *, int);
  18. __be32 local_ip, remote_ip;
  19. u16 local_port, remote_port;
  20. u8 remote_mac[ETH_ALEN];
  21. struct list_head rx; /* rx_np list element */
  22. };
  23. struct netpoll_info {
  24. atomic_t refcnt;
  25. int rx_flags;
  26. spinlock_t rx_lock;
  27. struct list_head rx_np; /* netpolls that registered an rx_hook */
  28. struct sk_buff_head arp_tx; /* list of arp requests to reply to */
  29. struct sk_buff_head txq;
  30. struct delayed_work tx_work;
  31. struct netpoll *netpoll;
  32. };
  33. void netpoll_poll_dev(struct net_device *dev);
  34. void netpoll_poll(struct netpoll *np);
  35. void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
  36. void netpoll_print_options(struct netpoll *np);
  37. int netpoll_parse_options(struct netpoll *np, char *opt);
  38. int netpoll_setup(struct netpoll *np);
  39. int netpoll_trap(void);
  40. void netpoll_set_trap(int trap);
  41. void netpoll_cleanup(struct netpoll *np);
  42. int __netpoll_rx(struct sk_buff *skb);
  43. void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb);
  44. #ifdef CONFIG_NETPOLL
  45. static inline int netpoll_rx(struct sk_buff *skb)
  46. {
  47. struct netpoll_info *npinfo = skb->dev->npinfo;
  48. unsigned long flags;
  49. int ret = 0;
  50. if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
  51. return 0;
  52. spin_lock_irqsave(&npinfo->rx_lock, flags);
  53. /* check rx_flags again with the lock held */
  54. if (npinfo->rx_flags && __netpoll_rx(skb))
  55. ret = 1;
  56. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  57. return ret;
  58. }
  59. static inline int netpoll_rx_on(struct sk_buff *skb)
  60. {
  61. struct netpoll_info *npinfo = skb->dev->npinfo;
  62. return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags);
  63. }
  64. static inline int netpoll_receive_skb(struct sk_buff *skb)
  65. {
  66. if (!list_empty(&skb->dev->napi_list))
  67. return netpoll_rx(skb);
  68. return 0;
  69. }
  70. static inline void *netpoll_poll_lock(struct napi_struct *napi)
  71. {
  72. struct net_device *dev = napi->dev;
  73. rcu_read_lock(); /* deal with race on ->npinfo */
  74. if (dev && dev->npinfo) {
  75. spin_lock(&napi->poll_lock);
  76. napi->poll_owner = smp_processor_id();
  77. return napi;
  78. }
  79. return NULL;
  80. }
  81. static inline void netpoll_poll_unlock(void *have)
  82. {
  83. struct napi_struct *napi = have;
  84. if (napi) {
  85. napi->poll_owner = -1;
  86. spin_unlock(&napi->poll_lock);
  87. }
  88. rcu_read_unlock();
  89. }
  90. #else
  91. static inline int netpoll_rx(struct sk_buff *skb)
  92. {
  93. return 0;
  94. }
  95. static inline int netpoll_rx_on(struct sk_buff *skb)
  96. {
  97. return 0;
  98. }
  99. static inline int netpoll_receive_skb(struct sk_buff *skb)
  100. {
  101. return 0;
  102. }
  103. static inline void *netpoll_poll_lock(struct napi_struct *napi)
  104. {
  105. return NULL;
  106. }
  107. static inline void netpoll_poll_unlock(void *have)
  108. {
  109. }
  110. static inline void netpoll_netdev_init(struct net_device *dev)
  111. {
  112. }
  113. #endif
  114. #endif