netpoll.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /*
  2. * Common code for low-level network console, dump, and debugger code
  3. *
  4. * Derived from netconsole, kgdb-over-ethernet, and netdump patches
  5. */
  6. #ifndef _LINUX_NETPOLL_H
  7. #define _LINUX_NETPOLL_H
  8. #include <linux/netdevice.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/rcupdate.h>
  11. #include <linux/list.h>
  12. struct netpoll {
  13. struct net_device *dev;
  14. char dev_name[IFNAMSIZ];
  15. const char *name;
  16. void (*rx_hook)(struct netpoll *, int, char *, int);
  17. u32 local_ip, remote_ip;
  18. u16 local_port, remote_port;
  19. u8 remote_mac[ETH_ALEN];
  20. };
  21. struct netpoll_info {
  22. atomic_t refcnt;
  23. int rx_flags;
  24. spinlock_t rx_lock;
  25. struct netpoll *rx_np; /* netpoll that registered an rx_hook */
  26. struct sk_buff_head arp_tx; /* list of arp requests to reply to */
  27. struct sk_buff_head txq;
  28. struct delayed_work tx_work;
  29. };
  30. void netpoll_poll(struct netpoll *np);
  31. void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
  32. void netpoll_print_options(struct netpoll *np);
  33. int netpoll_parse_options(struct netpoll *np, char *opt);
  34. int netpoll_setup(struct netpoll *np);
  35. int netpoll_trap(void);
  36. void netpoll_set_trap(int trap);
  37. void netpoll_cleanup(struct netpoll *np);
  38. int __netpoll_rx(struct sk_buff *skb);
  39. #ifdef CONFIG_NETPOLL
  40. static inline int netpoll_rx(struct sk_buff *skb)
  41. {
  42. struct netpoll_info *npinfo = skb->dev->npinfo;
  43. unsigned long flags;
  44. int ret = 0;
  45. if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
  46. return 0;
  47. spin_lock_irqsave(&npinfo->rx_lock, flags);
  48. /* check rx_flags again with the lock held */
  49. if (npinfo->rx_flags && __netpoll_rx(skb))
  50. ret = 1;
  51. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  52. return ret;
  53. }
  54. static inline int netpoll_receive_skb(struct sk_buff *skb)
  55. {
  56. if (!list_empty(&skb->dev->napi_list))
  57. return netpoll_rx(skb);
  58. return 0;
  59. }
  60. static inline void *netpoll_poll_lock(struct napi_struct *napi)
  61. {
  62. struct net_device *dev = napi->dev;
  63. rcu_read_lock(); /* deal with race on ->npinfo */
  64. if (dev && dev->npinfo) {
  65. spin_lock(&napi->poll_lock);
  66. napi->poll_owner = smp_processor_id();
  67. return napi;
  68. }
  69. return NULL;
  70. }
  71. static inline void netpoll_poll_unlock(void *have)
  72. {
  73. struct napi_struct *napi = have;
  74. if (napi) {
  75. napi->poll_owner = -1;
  76. spin_unlock(&napi->poll_lock);
  77. }
  78. rcu_read_unlock();
  79. }
  80. static inline void netpoll_netdev_init(struct net_device *dev)
  81. {
  82. INIT_LIST_HEAD(&dev->napi_list);
  83. }
  84. #else
  85. static inline int netpoll_rx(struct sk_buff *skb)
  86. {
  87. return 0;
  88. }
  89. static inline int netpoll_receive_skb(struct sk_buff *skb)
  90. {
  91. return 0;
  92. }
  93. static inline void *netpoll_poll_lock(struct napi_struct *napi)
  94. {
  95. return NULL;
  96. }
  97. static inline void netpoll_poll_unlock(void *have)
  98. {
  99. }
  100. static inline void netpoll_netdev_init(struct net_device *dev)
  101. {
  102. }
  103. #endif
  104. #endif