flow_dissector.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. #include <linux/skbuff.h>
  2. #include <linux/export.h>
  3. #include <linux/ip.h>
  4. #include <linux/ipv6.h>
  5. #include <linux/if_vlan.h>
  6. #include <net/ip.h>
  7. #include <net/ipv6.h>
  8. #include <linux/igmp.h>
  9. #include <linux/icmp.h>
  10. #include <linux/sctp.h>
  11. #include <linux/dccp.h>
  12. #include <linux/if_tunnel.h>
  13. #include <linux/if_pppox.h>
  14. #include <linux/ppp_defs.h>
  15. #include <net/flow_keys.h>
  16. /* copy saddr & daddr, possibly using 64bit load/store
  17. * Equivalent to : flow->src = iph->saddr;
  18. * flow->dst = iph->daddr;
  19. */
  20. static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
  21. {
  22. BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
  23. offsetof(typeof(*flow), src) + sizeof(flow->src));
  24. memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
  25. }
  26. /**
  27. * skb_flow_get_ports - extract the upper layer ports and return them
  28. * @skb: buffer to extract the ports from
  29. * @thoff: transport header offset
  30. * @ip_proto: protocol for which to get port offset
  31. *
  32. * The function will try to retrieve the ports at offset thoff + poff where poff
  33. * is the protocol port offset returned from proto_ports_offset
  34. */
  35. __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
  36. {
  37. int poff = proto_ports_offset(ip_proto);
  38. if (poff >= 0) {
  39. __be32 *ports, _ports;
  40. ports = skb_header_pointer(skb, thoff + poff,
  41. sizeof(_ports), &_ports);
  42. if (ports)
  43. return *ports;
  44. }
  45. return 0;
  46. }
  47. EXPORT_SYMBOL(skb_flow_get_ports);
  48. bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
  49. {
  50. int nhoff = skb_network_offset(skb);
  51. u8 ip_proto;
  52. __be16 proto = skb->protocol;
  53. memset(flow, 0, sizeof(*flow));
  54. again:
  55. switch (proto) {
  56. case __constant_htons(ETH_P_IP): {
  57. const struct iphdr *iph;
  58. struct iphdr _iph;
  59. ip:
  60. iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
  61. if (!iph)
  62. return false;
  63. if (ip_is_fragment(iph))
  64. ip_proto = 0;
  65. else
  66. ip_proto = iph->protocol;
  67. iph_to_flow_copy_addrs(flow, iph);
  68. nhoff += iph->ihl * 4;
  69. break;
  70. }
  71. case __constant_htons(ETH_P_IPV6): {
  72. const struct ipv6hdr *iph;
  73. struct ipv6hdr _iph;
  74. ipv6:
  75. iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
  76. if (!iph)
  77. return false;
  78. ip_proto = iph->nexthdr;
  79. flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
  80. flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
  81. nhoff += sizeof(struct ipv6hdr);
  82. break;
  83. }
  84. case __constant_htons(ETH_P_8021AD):
  85. case __constant_htons(ETH_P_8021Q): {
  86. const struct vlan_hdr *vlan;
  87. struct vlan_hdr _vlan;
  88. vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
  89. if (!vlan)
  90. return false;
  91. proto = vlan->h_vlan_encapsulated_proto;
  92. nhoff += sizeof(*vlan);
  93. goto again;
  94. }
  95. case __constant_htons(ETH_P_PPP_SES): {
  96. struct {
  97. struct pppoe_hdr hdr;
  98. __be16 proto;
  99. } *hdr, _hdr;
  100. hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
  101. if (!hdr)
  102. return false;
  103. proto = hdr->proto;
  104. nhoff += PPPOE_SES_HLEN;
  105. switch (proto) {
  106. case __constant_htons(PPP_IP):
  107. goto ip;
  108. case __constant_htons(PPP_IPV6):
  109. goto ipv6;
  110. default:
  111. return false;
  112. }
  113. }
  114. default:
  115. return false;
  116. }
  117. switch (ip_proto) {
  118. case IPPROTO_GRE: {
  119. struct gre_hdr {
  120. __be16 flags;
  121. __be16 proto;
  122. } *hdr, _hdr;
  123. hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
  124. if (!hdr)
  125. return false;
  126. /*
  127. * Only look inside GRE if version zero and no
  128. * routing
  129. */
  130. if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
  131. proto = hdr->proto;
  132. nhoff += 4;
  133. if (hdr->flags & GRE_CSUM)
  134. nhoff += 4;
  135. if (hdr->flags & GRE_KEY)
  136. nhoff += 4;
  137. if (hdr->flags & GRE_SEQ)
  138. nhoff += 4;
  139. if (proto == htons(ETH_P_TEB)) {
  140. const struct ethhdr *eth;
  141. struct ethhdr _eth;
  142. eth = skb_header_pointer(skb, nhoff,
  143. sizeof(_eth), &_eth);
  144. if (!eth)
  145. return false;
  146. proto = eth->h_proto;
  147. nhoff += sizeof(*eth);
  148. }
  149. goto again;
  150. }
  151. break;
  152. }
  153. case IPPROTO_IPIP:
  154. proto = htons(ETH_P_IP);
  155. goto ip;
  156. case IPPROTO_IPV6:
  157. proto = htons(ETH_P_IPV6);
  158. goto ipv6;
  159. default:
  160. break;
  161. }
  162. flow->ip_proto = ip_proto;
  163. flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
  164. flow->thoff = (u16) nhoff;
  165. return true;
  166. }
  167. EXPORT_SYMBOL(skb_flow_dissect);
  168. static u32 hashrnd __read_mostly;
  169. static __always_inline void __flow_hash_secret_init(void)
  170. {
  171. net_get_random_once(&hashrnd, sizeof(hashrnd));
  172. }
  173. static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
  174. {
  175. __flow_hash_secret_init();
  176. return jhash_3words(a, b, c, hashrnd);
  177. }
  178. static __always_inline u32 __flow_hash_1word(u32 a)
  179. {
  180. __flow_hash_secret_init();
  181. return jhash_1word(a, hashrnd);
  182. }
  183. /*
  184. * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
  185. * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
  186. * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
  187. * if hash is a canonical 4-tuple hash over transport ports.
  188. */
  189. void __skb_get_rxhash(struct sk_buff *skb)
  190. {
  191. struct flow_keys keys;
  192. u32 hash;
  193. if (!skb_flow_dissect(skb, &keys))
  194. return;
  195. if (keys.ports)
  196. skb->l4_rxhash = 1;
  197. /* get a consistent hash (same value on both flow directions) */
  198. if (((__force u32)keys.dst < (__force u32)keys.src) ||
  199. (((__force u32)keys.dst == (__force u32)keys.src) &&
  200. ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
  201. swap(keys.dst, keys.src);
  202. swap(keys.port16[0], keys.port16[1]);
  203. }
  204. hash = __flow_hash_3words((__force u32)keys.dst,
  205. (__force u32)keys.src,
  206. (__force u32)keys.ports);
  207. if (!hash)
  208. hash = 1;
  209. skb->rxhash = hash;
  210. }
  211. EXPORT_SYMBOL(__skb_get_rxhash);
  212. /*
  213. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  214. * to be used as a distribution range.
  215. */
  216. u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
  217. unsigned int num_tx_queues)
  218. {
  219. u32 hash;
  220. u16 qoffset = 0;
  221. u16 qcount = num_tx_queues;
  222. if (skb_rx_queue_recorded(skb)) {
  223. hash = skb_get_rx_queue(skb);
  224. while (unlikely(hash >= num_tx_queues))
  225. hash -= num_tx_queues;
  226. return hash;
  227. }
  228. if (dev->num_tc) {
  229. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  230. qoffset = dev->tc_to_txq[tc].offset;
  231. qcount = dev->tc_to_txq[tc].count;
  232. }
  233. if (skb->sk && skb->sk->sk_hash)
  234. hash = skb->sk->sk_hash;
  235. else
  236. hash = (__force u16) skb->protocol;
  237. hash = __flow_hash_1word(hash);
  238. return (u16) (((u64) hash * qcount) >> 32) + qoffset;
  239. }
  240. EXPORT_SYMBOL(__skb_tx_hash);
  241. /* __skb_get_poff() returns the offset to the payload as far as it could
  242. * be dissected. The main user is currently BPF, so that we can dynamically
  243. * truncate packets without needing to push actual payload to the user
  244. * space and can analyze headers only, instead.
  245. */
  246. u32 __skb_get_poff(const struct sk_buff *skb)
  247. {
  248. struct flow_keys keys;
  249. u32 poff = 0;
  250. if (!skb_flow_dissect(skb, &keys))
  251. return 0;
  252. poff += keys.thoff;
  253. switch (keys.ip_proto) {
  254. case IPPROTO_TCP: {
  255. const struct tcphdr *tcph;
  256. struct tcphdr _tcph;
  257. tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
  258. if (!tcph)
  259. return poff;
  260. poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
  261. break;
  262. }
  263. case IPPROTO_UDP:
  264. case IPPROTO_UDPLITE:
  265. poff += sizeof(struct udphdr);
  266. break;
  267. /* For the rest, we do not really care about header
  268. * extensions at this point for now.
  269. */
  270. case IPPROTO_ICMP:
  271. poff += sizeof(struct icmphdr);
  272. break;
  273. case IPPROTO_ICMPV6:
  274. poff += sizeof(struct icmp6hdr);
  275. break;
  276. case IPPROTO_IGMP:
  277. poff += sizeof(struct igmphdr);
  278. break;
  279. case IPPROTO_DCCP:
  280. poff += sizeof(struct dccp_hdr);
  281. break;
  282. case IPPROTO_SCTP:
  283. poff += sizeof(struct sctphdr);
  284. break;
  285. }
  286. return poff;
  287. }
  288. static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  289. {
  290. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  291. net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
  292. dev->name, queue_index,
  293. dev->real_num_tx_queues);
  294. return 0;
  295. }
  296. return queue_index;
  297. }
  298. static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
  299. {
  300. #ifdef CONFIG_XPS
  301. struct xps_dev_maps *dev_maps;
  302. struct xps_map *map;
  303. int queue_index = -1;
  304. rcu_read_lock();
  305. dev_maps = rcu_dereference(dev->xps_maps);
  306. if (dev_maps) {
  307. map = rcu_dereference(
  308. dev_maps->cpu_map[raw_smp_processor_id()]);
  309. if (map) {
  310. if (map->len == 1)
  311. queue_index = map->queues[0];
  312. else {
  313. u32 hash;
  314. if (skb->sk && skb->sk->sk_hash)
  315. hash = skb->sk->sk_hash;
  316. else
  317. hash = (__force u16) skb->protocol ^
  318. skb->rxhash;
  319. hash = __flow_hash_1word(hash);
  320. queue_index = map->queues[
  321. ((u64)hash * map->len) >> 32];
  322. }
  323. if (unlikely(queue_index >= dev->real_num_tx_queues))
  324. queue_index = -1;
  325. }
  326. }
  327. rcu_read_unlock();
  328. return queue_index;
  329. #else
  330. return -1;
  331. #endif
  332. }
  333. u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
  334. {
  335. struct sock *sk = skb->sk;
  336. int queue_index = sk_tx_queue_get(sk);
  337. if (queue_index < 0 || skb->ooo_okay ||
  338. queue_index >= dev->real_num_tx_queues) {
  339. int new_index = get_xps_queue(dev, skb);
  340. if (new_index < 0)
  341. new_index = skb_tx_hash(dev, skb);
  342. if (queue_index != new_index && sk &&
  343. rcu_access_pointer(sk->sk_dst_cache))
  344. sk_tx_queue_set(sk, new_index);
  345. queue_index = new_index;
  346. }
  347. return queue_index;
  348. }
  349. EXPORT_SYMBOL(__netdev_pick_tx);
  350. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  351. struct sk_buff *skb)
  352. {
  353. int queue_index = 0;
  354. if (dev->real_num_tx_queues != 1) {
  355. const struct net_device_ops *ops = dev->netdev_ops;
  356. if (ops->ndo_select_queue)
  357. queue_index = ops->ndo_select_queue(dev, skb);
  358. else
  359. queue_index = __netdev_pick_tx(dev, skb);
  360. queue_index = dev_cap_txqueue(dev, queue_index);
  361. }
  362. skb_set_queue_mapping(skb, queue_index);
  363. return netdev_get_tx_queue(dev, queue_index);
  364. }