Browse Source

skbuff: Move rxhash and vlan_tci to consolidate holes in sk_buff

This change helps to reduce the overall size of the sk_buff by moving
rxhash and vlan_tci so that the u16 values and u8 bitfields can be better
combined to create only one hole instead of multiple.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Alexander Duyck 13 years ago
parent
commit
4031ae6edb
1 changed files with 5 additions and 4 deletions
  1. 5 4
      include/linux/skbuff.h

+ 5 - 4
include/linux/skbuff.h

@@ -438,6 +438,11 @@ struct sk_buff {
 #endif
 
 	int			skb_iif;
+
+	__u32			rxhash;
+
+	__u16			vlan_tci;
+
 #ifdef CONFIG_NET_SCHED
 	__u16			tc_index;	/* traffic control index */
 #ifdef CONFIG_NET_CLS_ACT
@@ -445,8 +450,6 @@ struct sk_buff {
 #endif
 #endif
 
-	__u32			rxhash;
-
 	__u16			queue_mapping;
 	kmemcheck_bitfield_begin(flags2);
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -470,8 +473,6 @@ struct sk_buff {
 		__u32		dropcount;
 	};
 
-	__u16			vlan_tci;
-
 	sk_buff_data_t		transport_header;
 	sk_buff_data_t		network_header;
 	sk_buff_data_t		mac_header;