vlan_core.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. #include <linux/skbuff.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/if_vlan.h>
  4. #include <linux/netpoll.h>
  5. #include "vlan.h"
  6. /* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
  7. int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
  8. u16 vlan_tci, int polling)
  9. {
  10. struct net_device *vlan_dev;
  11. u16 vlan_id;
  12. if (netpoll_rx(skb))
  13. return NET_RX_DROP;
  14. if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
  15. skb->deliver_no_wcard = 1;
  16. skb->skb_iif = skb->dev->ifindex;
  17. __vlan_hwaccel_put_tag(skb, vlan_tci);
  18. vlan_id = vlan_tci & VLAN_VID_MASK;
  19. vlan_dev = vlan_group_get_device(grp, vlan_id);
  20. if (vlan_dev)
  21. skb->dev = vlan_dev;
  22. else if (vlan_id) {
  23. if (!(skb->dev->flags & IFF_PROMISC))
  24. goto drop;
  25. skb->pkt_type = PACKET_OTHERHOST;
  26. }
  27. return polling ? netif_receive_skb(skb) : netif_rx(skb);
  28. drop:
  29. atomic_long_inc(&skb->dev->rx_dropped);
  30. dev_kfree_skb_any(skb);
  31. return NET_RX_DROP;
  32. }
  33. EXPORT_SYMBOL(__vlan_hwaccel_rx);
  34. void vlan_hwaccel_do_receive(struct sk_buff *skb)
  35. {
  36. struct net_device *dev = skb->dev;
  37. struct vlan_rx_stats *rx_stats;
  38. skb->dev = vlan_dev_real_dev(dev);
  39. netif_nit_deliver(skb);
  40. skb->dev = dev;
  41. skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
  42. skb->vlan_tci = 0;
  43. rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
  44. u64_stats_update_begin(&rx_stats->syncp);
  45. rx_stats->rx_packets++;
  46. rx_stats->rx_bytes += skb->len;
  47. switch (skb->pkt_type) {
  48. case PACKET_BROADCAST:
  49. break;
  50. case PACKET_MULTICAST:
  51. rx_stats->rx_multicast++;
  52. break;
  53. case PACKET_OTHERHOST:
  54. /* Our lower layer thinks this is not local, let's make sure.
  55. * This allows the VLAN to have a different MAC than the
  56. * underlying device, and still route correctly. */
  57. if (!compare_ether_addr(eth_hdr(skb)->h_dest,
  58. dev->dev_addr))
  59. skb->pkt_type = PACKET_HOST;
  60. break;
  61. }
  62. u64_stats_update_end(&rx_stats->syncp);
  63. }
  64. struct net_device *vlan_dev_real_dev(const struct net_device *dev)
  65. {
  66. return vlan_dev_info(dev)->real_dev;
  67. }
  68. EXPORT_SYMBOL(vlan_dev_real_dev);
  69. u16 vlan_dev_vlan_id(const struct net_device *dev)
  70. {
  71. return vlan_dev_info(dev)->vlan_id;
  72. }
  73. EXPORT_SYMBOL(vlan_dev_vlan_id);
  74. static gro_result_t
  75. vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
  76. unsigned int vlan_tci, struct sk_buff *skb)
  77. {
  78. struct sk_buff *p;
  79. struct net_device *vlan_dev;
  80. u16 vlan_id;
  81. if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
  82. skb->deliver_no_wcard = 1;
  83. skb->skb_iif = skb->dev->ifindex;
  84. __vlan_hwaccel_put_tag(skb, vlan_tci);
  85. vlan_id = vlan_tci & VLAN_VID_MASK;
  86. vlan_dev = vlan_group_get_device(grp, vlan_id);
  87. if (vlan_dev)
  88. skb->dev = vlan_dev;
  89. else if (vlan_id) {
  90. if (!(skb->dev->flags & IFF_PROMISC))
  91. goto drop;
  92. skb->pkt_type = PACKET_OTHERHOST;
  93. }
  94. for (p = napi->gro_list; p; p = p->next) {
  95. unsigned long diffs;
  96. diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
  97. diffs |= compare_ether_header(skb_mac_header(p),
  98. skb_gro_mac_header(skb));
  99. NAPI_GRO_CB(p)->same_flow = !diffs;
  100. NAPI_GRO_CB(p)->flush = 0;
  101. }
  102. return dev_gro_receive(napi, skb);
  103. drop:
  104. atomic_long_inc(&skb->dev->rx_dropped);
  105. return GRO_DROP;
  106. }
  107. gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
  108. unsigned int vlan_tci, struct sk_buff *skb)
  109. {
  110. if (netpoll_rx_on(skb))
  111. return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
  112. ? GRO_DROP : GRO_NORMAL;
  113. skb_gro_reset_offset(skb);
  114. return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
  115. }
  116. EXPORT_SYMBOL(vlan_gro_receive);
  117. gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
  118. unsigned int vlan_tci)
  119. {
  120. struct sk_buff *skb = napi_frags_skb(napi);
  121. if (!skb)
  122. return GRO_DROP;
  123. if (netpoll_rx_on(skb)) {
  124. skb->protocol = eth_type_trans(skb, skb->dev);
  125. return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
  126. ? GRO_DROP : GRO_NORMAL;
  127. }
  128. return napi_frags_finish(napi, skb,
  129. vlan_gro_common(napi, grp, vlan_tci, skb));
  130. }
  131. EXPORT_SYMBOL(vlan_gro_frags);