gro_cells.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. #ifndef _NET_GRO_CELLS_H
  2. #define _NET_GRO_CELLS_H
  3. #include <linux/skbuff.h>
  4. #include <linux/slab.h>
  5. #include <linux/netdevice.h>
  6. struct gro_cell {
  7. struct sk_buff_head napi_skbs;
  8. struct napi_struct napi;
  9. } ____cacheline_aligned_in_smp;
  10. struct gro_cells {
  11. unsigned int gro_cells_mask;
  12. struct gro_cell *cells;
  13. };
  14. static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
  15. {
  16. struct gro_cell *cell = gcells->cells;
  17. struct net_device *dev = skb->dev;
  18. if (!cell || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
  19. netif_rx(skb);
  20. return;
  21. }
  22. if (skb_rx_queue_recorded(skb))
  23. cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
  24. if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
  25. atomic_long_inc(&dev->rx_dropped);
  26. kfree_skb(skb);
  27. return;
  28. }
  29. /* We run in BH context */
  30. spin_lock(&cell->napi_skbs.lock);
  31. __skb_queue_tail(&cell->napi_skbs, skb);
  32. if (skb_queue_len(&cell->napi_skbs) == 1)
  33. napi_schedule(&cell->napi);
  34. spin_unlock(&cell->napi_skbs.lock);
  35. }
  36. /* called unser BH context */
  37. static inline int gro_cell_poll(struct napi_struct *napi, int budget)
  38. {
  39. struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
  40. struct sk_buff *skb;
  41. int work_done = 0;
  42. spin_lock(&cell->napi_skbs.lock);
  43. while (work_done < budget) {
  44. skb = __skb_dequeue(&cell->napi_skbs);
  45. if (!skb)
  46. break;
  47. spin_unlock(&cell->napi_skbs.lock);
  48. napi_gro_receive(napi, skb);
  49. work_done++;
  50. spin_lock(&cell->napi_skbs.lock);
  51. }
  52. if (work_done < budget)
  53. napi_complete(napi);
  54. spin_unlock(&cell->napi_skbs.lock);
  55. return work_done;
  56. }
  57. static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
  58. {
  59. int i;
  60. gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
  61. gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
  62. sizeof(struct gro_cell),
  63. GFP_KERNEL);
  64. if (!gcells->cells)
  65. return -ENOMEM;
  66. for (i = 0; i <= gcells->gro_cells_mask; i++) {
  67. struct gro_cell *cell = gcells->cells + i;
  68. skb_queue_head_init(&cell->napi_skbs);
  69. netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
  70. napi_enable(&cell->napi);
  71. }
  72. return 0;
  73. }
  74. static inline void gro_cells_destroy(struct gro_cells *gcells)
  75. {
  76. struct gro_cell *cell = gcells->cells;
  77. int i;
  78. if (!cell)
  79. return;
  80. for (i = 0; i <= gcells->gro_cells_mask; i++,cell++) {
  81. netif_napi_del(&cell->napi);
  82. skb_queue_purge(&cell->napi_skbs);
  83. }
  84. kfree(gcells->cells);
  85. gcells->cells = NULL;
  86. }
  87. #endif