flow_dissector.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. #include <linux/skbuff.h>
  2. #include <linux/export.h>
  3. #include <linux/ip.h>
  4. #include <linux/ipv6.h>
  5. #include <linux/if_vlan.h>
  6. #include <net/ip.h>
  7. #include <net/ipv6.h>
  8. #include <linux/igmp.h>
  9. #include <linux/icmp.h>
  10. #include <linux/sctp.h>
  11. #include <linux/dccp.h>
  12. #include <linux/if_tunnel.h>
  13. #include <linux/if_pppox.h>
  14. #include <linux/ppp_defs.h>
  15. #include <net/flow_keys.h>
  16. /* copy saddr & daddr, possibly using 64bit load/store
  17. * Equivalent to : flow->src = iph->saddr;
  18. * flow->dst = iph->daddr;
  19. */
  20. static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph)
  21. {
  22. BUILD_BUG_ON(offsetof(typeof(*flow), dst) !=
  23. offsetof(typeof(*flow), src) + sizeof(flow->src));
  24. memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst));
  25. }
  26. /**
  27. * skb_flow_get_ports - extract the upper layer ports and return them
  28. * @skb: buffer to extract the ports from
  29. * @thoff: transport header offset
  30. * @ip_proto: protocol for which to get port offset
  31. *
  32. * The function will try to retrieve the ports at offset thoff + poff where poff
  33. * is the protocol port offset returned from proto_ports_offset
  34. */
  35. __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto)
  36. {
  37. int poff = proto_ports_offset(ip_proto);
  38. if (poff >= 0) {
  39. __be32 *ports, _ports;
  40. ports = skb_header_pointer(skb, thoff + poff,
  41. sizeof(_ports), &_ports);
  42. if (ports)
  43. return *ports;
  44. }
  45. return 0;
  46. }
  47. EXPORT_SYMBOL(skb_flow_get_ports);
  48. bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow)
  49. {
  50. int nhoff = skb_network_offset(skb);
  51. u8 ip_proto;
  52. __be16 proto = skb->protocol;
  53. memset(flow, 0, sizeof(*flow));
  54. again:
  55. switch (proto) {
  56. case __constant_htons(ETH_P_IP): {
  57. const struct iphdr *iph;
  58. struct iphdr _iph;
  59. ip:
  60. iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
  61. if (!iph)
  62. return false;
  63. if (ip_is_fragment(iph))
  64. ip_proto = 0;
  65. else
  66. ip_proto = iph->protocol;
  67. iph_to_flow_copy_addrs(flow, iph);
  68. nhoff += iph->ihl * 4;
  69. break;
  70. }
  71. case __constant_htons(ETH_P_IPV6): {
  72. const struct ipv6hdr *iph;
  73. struct ipv6hdr _iph;
  74. ipv6:
  75. iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
  76. if (!iph)
  77. return false;
  78. ip_proto = iph->nexthdr;
  79. flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
  80. flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
  81. nhoff += sizeof(struct ipv6hdr);
  82. break;
  83. }
  84. case __constant_htons(ETH_P_8021AD):
  85. case __constant_htons(ETH_P_8021Q): {
  86. const struct vlan_hdr *vlan;
  87. struct vlan_hdr _vlan;
  88. vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan);
  89. if (!vlan)
  90. return false;
  91. proto = vlan->h_vlan_encapsulated_proto;
  92. nhoff += sizeof(*vlan);
  93. goto again;
  94. }
  95. case __constant_htons(ETH_P_PPP_SES): {
  96. struct {
  97. struct pppoe_hdr hdr;
  98. __be16 proto;
  99. } *hdr, _hdr;
  100. hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
  101. if (!hdr)
  102. return false;
  103. proto = hdr->proto;
  104. nhoff += PPPOE_SES_HLEN;
  105. switch (proto) {
  106. case __constant_htons(PPP_IP):
  107. goto ip;
  108. case __constant_htons(PPP_IPV6):
  109. goto ipv6;
  110. default:
  111. return false;
  112. }
  113. }
  114. default:
  115. return false;
  116. }
  117. switch (ip_proto) {
  118. case IPPROTO_GRE: {
  119. struct gre_hdr {
  120. __be16 flags;
  121. __be16 proto;
  122. } *hdr, _hdr;
  123. hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr);
  124. if (!hdr)
  125. return false;
  126. /*
  127. * Only look inside GRE if version zero and no
  128. * routing
  129. */
  130. if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) {
  131. proto = hdr->proto;
  132. nhoff += 4;
  133. if (hdr->flags & GRE_CSUM)
  134. nhoff += 4;
  135. if (hdr->flags & GRE_KEY)
  136. nhoff += 4;
  137. if (hdr->flags & GRE_SEQ)
  138. nhoff += 4;
  139. if (proto == htons(ETH_P_TEB)) {
  140. const struct ethhdr *eth;
  141. struct ethhdr _eth;
  142. eth = skb_header_pointer(skb, nhoff,
  143. sizeof(_eth), &_eth);
  144. if (!eth)
  145. return false;
  146. proto = eth->h_proto;
  147. nhoff += sizeof(*eth);
  148. }
  149. goto again;
  150. }
  151. break;
  152. }
  153. case IPPROTO_IPIP:
  154. proto = htons(ETH_P_IP);
  155. goto ip;
  156. case IPPROTO_IPV6:
  157. proto = htons(ETH_P_IPV6);
  158. goto ipv6;
  159. default:
  160. break;
  161. }
  162. flow->ip_proto = ip_proto;
  163. flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
  164. flow->thoff = (u16) nhoff;
  165. return true;
  166. }
  167. EXPORT_SYMBOL(skb_flow_dissect);
  168. static u32 hashrnd __read_mostly;
  169. /*
  170. * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
  171. * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
  172. * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
  173. * if hash is a canonical 4-tuple hash over transport ports.
  174. */
  175. void __skb_get_rxhash(struct sk_buff *skb)
  176. {
  177. struct flow_keys keys;
  178. u32 hash;
  179. if (!skb_flow_dissect(skb, &keys))
  180. return;
  181. if (keys.ports)
  182. skb->l4_rxhash = 1;
  183. /* get a consistent hash (same value on both flow directions) */
  184. if (((__force u32)keys.dst < (__force u32)keys.src) ||
  185. (((__force u32)keys.dst == (__force u32)keys.src) &&
  186. ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
  187. swap(keys.dst, keys.src);
  188. swap(keys.port16[0], keys.port16[1]);
  189. }
  190. hash = jhash_3words((__force u32)keys.dst,
  191. (__force u32)keys.src,
  192. (__force u32)keys.ports, hashrnd);
  193. if (!hash)
  194. hash = 1;
  195. skb->rxhash = hash;
  196. }
  197. EXPORT_SYMBOL(__skb_get_rxhash);
  198. /*
  199. * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  200. * to be used as a distribution range.
  201. */
  202. u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
  203. unsigned int num_tx_queues)
  204. {
  205. u32 hash;
  206. u16 qoffset = 0;
  207. u16 qcount = num_tx_queues;
  208. if (skb_rx_queue_recorded(skb)) {
  209. hash = skb_get_rx_queue(skb);
  210. while (unlikely(hash >= num_tx_queues))
  211. hash -= num_tx_queues;
  212. return hash;
  213. }
  214. if (dev->num_tc) {
  215. u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
  216. qoffset = dev->tc_to_txq[tc].offset;
  217. qcount = dev->tc_to_txq[tc].count;
  218. }
  219. if (skb->sk && skb->sk->sk_hash)
  220. hash = skb->sk->sk_hash;
  221. else
  222. hash = (__force u16) skb->protocol;
  223. hash = jhash_1word(hash, hashrnd);
  224. return (u16) (((u64) hash * qcount) >> 32) + qoffset;
  225. }
  226. EXPORT_SYMBOL(__skb_tx_hash);
  227. /* __skb_get_poff() returns the offset to the payload as far as it could
  228. * be dissected. The main user is currently BPF, so that we can dynamically
  229. * truncate packets without needing to push actual payload to the user
  230. * space and can analyze headers only, instead.
  231. */
  232. u32 __skb_get_poff(const struct sk_buff *skb)
  233. {
  234. struct flow_keys keys;
  235. u32 poff = 0;
  236. if (!skb_flow_dissect(skb, &keys))
  237. return 0;
  238. poff += keys.thoff;
  239. switch (keys.ip_proto) {
  240. case IPPROTO_TCP: {
  241. const struct tcphdr *tcph;
  242. struct tcphdr _tcph;
  243. tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
  244. if (!tcph)
  245. return poff;
  246. poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
  247. break;
  248. }
  249. case IPPROTO_UDP:
  250. case IPPROTO_UDPLITE:
  251. poff += sizeof(struct udphdr);
  252. break;
  253. /* For the rest, we do not really care about header
  254. * extensions at this point for now.
  255. */
  256. case IPPROTO_ICMP:
  257. poff += sizeof(struct icmphdr);
  258. break;
  259. case IPPROTO_ICMPV6:
  260. poff += sizeof(struct icmp6hdr);
  261. break;
  262. case IPPROTO_IGMP:
  263. poff += sizeof(struct igmphdr);
  264. break;
  265. case IPPROTO_DCCP:
  266. poff += sizeof(struct dccp_hdr);
  267. break;
  268. case IPPROTO_SCTP:
  269. poff += sizeof(struct sctphdr);
  270. break;
  271. }
  272. return poff;
  273. }
  274. static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
  275. {
  276. if (unlikely(queue_index >= dev->real_num_tx_queues)) {
  277. net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
  278. dev->name, queue_index,
  279. dev->real_num_tx_queues);
  280. return 0;
  281. }
  282. return queue_index;
  283. }
  284. static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
  285. {
  286. #ifdef CONFIG_XPS
  287. struct xps_dev_maps *dev_maps;
  288. struct xps_map *map;
  289. int queue_index = -1;
  290. rcu_read_lock();
  291. dev_maps = rcu_dereference(dev->xps_maps);
  292. if (dev_maps) {
  293. map = rcu_dereference(
  294. dev_maps->cpu_map[raw_smp_processor_id()]);
  295. if (map) {
  296. if (map->len == 1)
  297. queue_index = map->queues[0];
  298. else {
  299. u32 hash;
  300. if (skb->sk && skb->sk->sk_hash)
  301. hash = skb->sk->sk_hash;
  302. else
  303. hash = (__force u16) skb->protocol ^
  304. skb->rxhash;
  305. hash = jhash_1word(hash, hashrnd);
  306. queue_index = map->queues[
  307. ((u64)hash * map->len) >> 32];
  308. }
  309. if (unlikely(queue_index >= dev->real_num_tx_queues))
  310. queue_index = -1;
  311. }
  312. }
  313. rcu_read_unlock();
  314. return queue_index;
  315. #else
  316. return -1;
  317. #endif
  318. }
  319. u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
  320. {
  321. struct sock *sk = skb->sk;
  322. int queue_index = sk_tx_queue_get(sk);
  323. if (queue_index < 0 || skb->ooo_okay ||
  324. queue_index >= dev->real_num_tx_queues) {
  325. int new_index = get_xps_queue(dev, skb);
  326. if (new_index < 0)
  327. new_index = skb_tx_hash(dev, skb);
  328. if (queue_index != new_index && sk &&
  329. rcu_access_pointer(sk->sk_dst_cache))
  330. sk_tx_queue_set(sk, new_index);
  331. queue_index = new_index;
  332. }
  333. return queue_index;
  334. }
  335. EXPORT_SYMBOL(__netdev_pick_tx);
  336. struct netdev_queue *netdev_pick_tx(struct net_device *dev,
  337. struct sk_buff *skb)
  338. {
  339. int queue_index = 0;
  340. if (dev->real_num_tx_queues != 1) {
  341. const struct net_device_ops *ops = dev->netdev_ops;
  342. if (ops->ndo_select_queue)
  343. queue_index = ops->ndo_select_queue(dev, skb);
  344. else
  345. queue_index = __netdev_pick_tx(dev, skb);
  346. queue_index = dev_cap_txqueue(dev, queue_index);
  347. }
  348. skb_set_queue_mapping(skb, queue_index);
  349. return netdev_get_tx_queue(dev, queue_index);
  350. }
  351. static int __init initialize_hashrnd(void)
  352. {
  353. get_random_bytes(&hashrnd, sizeof(hashrnd));
  354. return 0;
  355. }
  356. late_initcall_sync(initialize_hashrnd);