flow.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348
  1. /*
  2. * Copyright (c) 2007-2011 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include "flow.h"
  19. #include "datapath.h"
  20. #include <linux/uaccess.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/if_ether.h>
  24. #include <linux/if_vlan.h>
  25. #include <net/llc_pdu.h>
  26. #include <linux/kernel.h>
  27. #include <linux/jhash.h>
  28. #include <linux/jiffies.h>
  29. #include <linux/llc.h>
  30. #include <linux/module.h>
  31. #include <linux/in.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/ip.h>
  35. #include <linux/ipv6.h>
  36. #include <linux/tcp.h>
  37. #include <linux/udp.h>
  38. #include <linux/icmp.h>
  39. #include <linux/icmpv6.h>
  40. #include <linux/rculist.h>
  41. #include <net/ip.h>
  42. #include <net/ipv6.h>
  43. #include <net/ndisc.h>
  44. static struct kmem_cache *flow_cache;
  45. static int check_header(struct sk_buff *skb, int len)
  46. {
  47. if (unlikely(skb->len < len))
  48. return -EINVAL;
  49. if (unlikely(!pskb_may_pull(skb, len)))
  50. return -ENOMEM;
  51. return 0;
  52. }
  53. static bool arphdr_ok(struct sk_buff *skb)
  54. {
  55. return pskb_may_pull(skb, skb_network_offset(skb) +
  56. sizeof(struct arp_eth_header));
  57. }
  58. static int check_iphdr(struct sk_buff *skb)
  59. {
  60. unsigned int nh_ofs = skb_network_offset(skb);
  61. unsigned int ip_len;
  62. int err;
  63. err = check_header(skb, nh_ofs + sizeof(struct iphdr));
  64. if (unlikely(err))
  65. return err;
  66. ip_len = ip_hdrlen(skb);
  67. if (unlikely(ip_len < sizeof(struct iphdr) ||
  68. skb->len < nh_ofs + ip_len))
  69. return -EINVAL;
  70. skb_set_transport_header(skb, nh_ofs + ip_len);
  71. return 0;
  72. }
  73. static bool tcphdr_ok(struct sk_buff *skb)
  74. {
  75. int th_ofs = skb_transport_offset(skb);
  76. int tcp_len;
  77. if (unlikely(!pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))))
  78. return false;
  79. tcp_len = tcp_hdrlen(skb);
  80. if (unlikely(tcp_len < sizeof(struct tcphdr) ||
  81. skb->len < th_ofs + tcp_len))
  82. return false;
  83. return true;
  84. }
  85. static bool udphdr_ok(struct sk_buff *skb)
  86. {
  87. return pskb_may_pull(skb, skb_transport_offset(skb) +
  88. sizeof(struct udphdr));
  89. }
  90. static bool icmphdr_ok(struct sk_buff *skb)
  91. {
  92. return pskb_may_pull(skb, skb_transport_offset(skb) +
  93. sizeof(struct icmphdr));
  94. }
  95. u64 ovs_flow_used_time(unsigned long flow_jiffies)
  96. {
  97. struct timespec cur_ts;
  98. u64 cur_ms, idle_ms;
  99. ktime_get_ts(&cur_ts);
  100. idle_ms = jiffies_to_msecs(jiffies - flow_jiffies);
  101. cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC +
  102. cur_ts.tv_nsec / NSEC_PER_MSEC;
  103. return cur_ms - idle_ms;
  104. }
  105. #define SW_FLOW_KEY_OFFSET(field) \
  106. (offsetof(struct sw_flow_key, field) + \
  107. FIELD_SIZEOF(struct sw_flow_key, field))
  108. static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
  109. int *key_lenp)
  110. {
  111. unsigned int nh_ofs = skb_network_offset(skb);
  112. unsigned int nh_len;
  113. int payload_ofs;
  114. struct ipv6hdr *nh;
  115. uint8_t nexthdr;
  116. __be16 frag_off;
  117. int err;
  118. *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
  119. err = check_header(skb, nh_ofs + sizeof(*nh));
  120. if (unlikely(err))
  121. return err;
  122. nh = ipv6_hdr(skb);
  123. nexthdr = nh->nexthdr;
  124. payload_ofs = (u8 *)(nh + 1) - skb->data;
  125. key->ip.proto = NEXTHDR_NONE;
  126. key->ip.tos = ipv6_get_dsfield(nh);
  127. key->ip.ttl = nh->hop_limit;
  128. key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
  129. key->ipv6.addr.src = nh->saddr;
  130. key->ipv6.addr.dst = nh->daddr;
  131. payload_ofs = ipv6_skip_exthdr(skb, payload_ofs, &nexthdr, &frag_off);
  132. if (unlikely(payload_ofs < 0))
  133. return -EINVAL;
  134. if (frag_off) {
  135. if (frag_off & htons(~0x7))
  136. key->ip.frag = OVS_FRAG_TYPE_LATER;
  137. else
  138. key->ip.frag = OVS_FRAG_TYPE_FIRST;
  139. }
  140. nh_len = payload_ofs - nh_ofs;
  141. skb_set_transport_header(skb, nh_ofs + nh_len);
  142. key->ip.proto = nexthdr;
  143. return nh_len;
  144. }
  145. static bool icmp6hdr_ok(struct sk_buff *skb)
  146. {
  147. return pskb_may_pull(skb, skb_transport_offset(skb) +
  148. sizeof(struct icmp6hdr));
  149. }
  150. #define TCP_FLAGS_OFFSET 13
  151. #define TCP_FLAG_MASK 0x3f
  152. void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
  153. {
  154. u8 tcp_flags = 0;
  155. if ((flow->key.eth.type == htons(ETH_P_IP) ||
  156. flow->key.eth.type == htons(ETH_P_IPV6)) &&
  157. flow->key.ip.proto == IPPROTO_TCP &&
  158. likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
  159. u8 *tcp = (u8 *)tcp_hdr(skb);
  160. tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
  161. }
  162. spin_lock(&flow->lock);
  163. flow->used = jiffies;
  164. flow->packet_count++;
  165. flow->byte_count += skb->len;
  166. flow->tcp_flags |= tcp_flags;
  167. spin_unlock(&flow->lock);
  168. }
  169. struct sw_flow_actions *ovs_flow_actions_alloc(const struct nlattr *actions)
  170. {
  171. int actions_len = nla_len(actions);
  172. struct sw_flow_actions *sfa;
  173. if (actions_len > MAX_ACTIONS_BUFSIZE)
  174. return ERR_PTR(-EINVAL);
  175. sfa = kmalloc(sizeof(*sfa) + actions_len, GFP_KERNEL);
  176. if (!sfa)
  177. return ERR_PTR(-ENOMEM);
  178. sfa->actions_len = actions_len;
  179. memcpy(sfa->actions, nla_data(actions), actions_len);
  180. return sfa;
  181. }
  182. struct sw_flow *ovs_flow_alloc(void)
  183. {
  184. struct sw_flow *flow;
  185. flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
  186. if (!flow)
  187. return ERR_PTR(-ENOMEM);
  188. spin_lock_init(&flow->lock);
  189. flow->sf_acts = NULL;
  190. return flow;
  191. }
  192. static struct hlist_head *find_bucket(struct flow_table *table, u32 hash)
  193. {
  194. hash = jhash_1word(hash, table->hash_seed);
  195. return flex_array_get(table->buckets,
  196. (hash & (table->n_buckets - 1)));
  197. }
  198. static struct flex_array *alloc_buckets(unsigned int n_buckets)
  199. {
  200. struct flex_array *buckets;
  201. int i, err;
  202. buckets = flex_array_alloc(sizeof(struct hlist_head *),
  203. n_buckets, GFP_KERNEL);
  204. if (!buckets)
  205. return NULL;
  206. err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL);
  207. if (err) {
  208. flex_array_free(buckets);
  209. return NULL;
  210. }
  211. for (i = 0; i < n_buckets; i++)
  212. INIT_HLIST_HEAD((struct hlist_head *)
  213. flex_array_get(buckets, i));
  214. return buckets;
  215. }
  216. static void free_buckets(struct flex_array *buckets)
  217. {
  218. flex_array_free(buckets);
  219. }
  220. struct flow_table *ovs_flow_tbl_alloc(int new_size)
  221. {
  222. struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
  223. if (!table)
  224. return NULL;
  225. table->buckets = alloc_buckets(new_size);
  226. if (!table->buckets) {
  227. kfree(table);
  228. return NULL;
  229. }
  230. table->n_buckets = new_size;
  231. table->count = 0;
  232. table->node_ver = 0;
  233. table->keep_flows = false;
  234. get_random_bytes(&table->hash_seed, sizeof(u32));
  235. return table;
  236. }
  237. void ovs_flow_tbl_destroy(struct flow_table *table)
  238. {
  239. int i;
  240. if (!table)
  241. return;
  242. if (table->keep_flows)
  243. goto skip_flows;
  244. for (i = 0; i < table->n_buckets; i++) {
  245. struct sw_flow *flow;
  246. struct hlist_head *head = flex_array_get(table->buckets, i);
  247. struct hlist_node *node, *n;
  248. int ver = table->node_ver;
  249. hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
  250. hlist_del_rcu(&flow->hash_node[ver]);
  251. ovs_flow_free(flow);
  252. }
  253. }
  254. skip_flows:
  255. free_buckets(table->buckets);
  256. kfree(table);
  257. }
  258. static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
  259. {
  260. struct flow_table *table = container_of(rcu, struct flow_table, rcu);
  261. ovs_flow_tbl_destroy(table);
  262. }
  263. void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
  264. {
  265. if (!table)
  266. return;
  267. call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
  268. }
  269. struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
  270. {
  271. struct sw_flow *flow;
  272. struct hlist_head *head;
  273. struct hlist_node *n;
  274. int ver;
  275. int i;
  276. ver = table->node_ver;
  277. while (*bucket < table->n_buckets) {
  278. i = 0;
  279. head = flex_array_get(table->buckets, *bucket);
  280. hlist_for_each_entry_rcu(flow, n, head, hash_node[ver]) {
  281. if (i < *last) {
  282. i++;
  283. continue;
  284. }
  285. *last = i + 1;
  286. return flow;
  287. }
  288. (*bucket)++;
  289. *last = 0;
  290. }
  291. return NULL;
  292. }
  293. static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new)
  294. {
  295. int old_ver;
  296. int i;
  297. old_ver = old->node_ver;
  298. new->node_ver = !old_ver;
  299. /* Insert in new table. */
  300. for (i = 0; i < old->n_buckets; i++) {
  301. struct sw_flow *flow;
  302. struct hlist_head *head;
  303. struct hlist_node *n;
  304. head = flex_array_get(old->buckets, i);
  305. hlist_for_each_entry(flow, n, head, hash_node[old_ver])
  306. ovs_flow_tbl_insert(new, flow);
  307. }
  308. old->keep_flows = true;
  309. }
  310. static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets)
  311. {
  312. struct flow_table *new_table;
  313. new_table = ovs_flow_tbl_alloc(n_buckets);
  314. if (!new_table)
  315. return ERR_PTR(-ENOMEM);
  316. flow_table_copy_flows(table, new_table);
  317. return new_table;
  318. }
  319. struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table)
  320. {
  321. return __flow_tbl_rehash(table, table->n_buckets);
  322. }
  323. struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
  324. {
  325. return __flow_tbl_rehash(table, table->n_buckets * 2);
  326. }
  327. void ovs_flow_free(struct sw_flow *flow)
  328. {
  329. if (unlikely(!flow))
  330. return;
  331. kfree((struct sf_flow_acts __force *)flow->sf_acts);
  332. kmem_cache_free(flow_cache, flow);
  333. }
  334. /* RCU callback used by ovs_flow_deferred_free. */
  335. static void rcu_free_flow_callback(struct rcu_head *rcu)
  336. {
  337. struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
  338. ovs_flow_free(flow);
  339. }
  340. /* Schedules 'flow' to be freed after the next RCU grace period.
  341. * The caller must hold rcu_read_lock for this to be sensible. */
  342. void ovs_flow_deferred_free(struct sw_flow *flow)
  343. {
  344. call_rcu(&flow->rcu, rcu_free_flow_callback);
  345. }
  346. /* RCU callback used by ovs_flow_deferred_free_acts. */
  347. static void rcu_free_acts_callback(struct rcu_head *rcu)
  348. {
  349. struct sw_flow_actions *sf_acts = container_of(rcu,
  350. struct sw_flow_actions, rcu);
  351. kfree(sf_acts);
  352. }
  353. /* Schedules 'sf_acts' to be freed after the next RCU grace period.
  354. * The caller must hold rcu_read_lock for this to be sensible. */
  355. void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
  356. {
  357. call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
  358. }
  359. static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
  360. {
  361. struct qtag_prefix {
  362. __be16 eth_type; /* ETH_P_8021Q */
  363. __be16 tci;
  364. };
  365. struct qtag_prefix *qp;
  366. if (unlikely(skb->len < sizeof(struct qtag_prefix) + sizeof(__be16)))
  367. return 0;
  368. if (unlikely(!pskb_may_pull(skb, sizeof(struct qtag_prefix) +
  369. sizeof(__be16))))
  370. return -ENOMEM;
  371. qp = (struct qtag_prefix *) skb->data;
  372. key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT);
  373. __skb_pull(skb, sizeof(struct qtag_prefix));
  374. return 0;
  375. }
  376. static __be16 parse_ethertype(struct sk_buff *skb)
  377. {
  378. struct llc_snap_hdr {
  379. u8 dsap; /* Always 0xAA */
  380. u8 ssap; /* Always 0xAA */
  381. u8 ctrl;
  382. u8 oui[3];
  383. __be16 ethertype;
  384. };
  385. struct llc_snap_hdr *llc;
  386. __be16 proto;
  387. proto = *(__be16 *) skb->data;
  388. __skb_pull(skb, sizeof(__be16));
  389. if (ntohs(proto) >= 1536)
  390. return proto;
  391. if (skb->len < sizeof(struct llc_snap_hdr))
  392. return htons(ETH_P_802_2);
  393. if (unlikely(!pskb_may_pull(skb, sizeof(struct llc_snap_hdr))))
  394. return htons(0);
  395. llc = (struct llc_snap_hdr *) skb->data;
  396. if (llc->dsap != LLC_SAP_SNAP ||
  397. llc->ssap != LLC_SAP_SNAP ||
  398. (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
  399. return htons(ETH_P_802_2);
  400. __skb_pull(skb, sizeof(struct llc_snap_hdr));
  401. return llc->ethertype;
  402. }
  403. static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
  404. int *key_lenp, int nh_len)
  405. {
  406. struct icmp6hdr *icmp = icmp6_hdr(skb);
  407. int error = 0;
  408. int key_len;
  409. /* The ICMPv6 type and code fields use the 16-bit transport port
  410. * fields, so we need to store them in 16-bit network byte order.
  411. */
  412. key->ipv6.tp.src = htons(icmp->icmp6_type);
  413. key->ipv6.tp.dst = htons(icmp->icmp6_code);
  414. key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  415. if (icmp->icmp6_code == 0 &&
  416. (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
  417. icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) {
  418. int icmp_len = skb->len - skb_transport_offset(skb);
  419. struct nd_msg *nd;
  420. int offset;
  421. key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
  422. /* In order to process neighbor discovery options, we need the
  423. * entire packet.
  424. */
  425. if (unlikely(icmp_len < sizeof(*nd)))
  426. goto out;
  427. if (unlikely(skb_linearize(skb))) {
  428. error = -ENOMEM;
  429. goto out;
  430. }
  431. nd = (struct nd_msg *)skb_transport_header(skb);
  432. key->ipv6.nd.target = nd->target;
  433. key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
  434. icmp_len -= sizeof(*nd);
  435. offset = 0;
  436. while (icmp_len >= 8) {
  437. struct nd_opt_hdr *nd_opt =
  438. (struct nd_opt_hdr *)(nd->opt + offset);
  439. int opt_len = nd_opt->nd_opt_len * 8;
  440. if (unlikely(!opt_len || opt_len > icmp_len))
  441. goto invalid;
  442. /* Store the link layer address if the appropriate
  443. * option is provided. It is considered an error if
  444. * the same link layer option is specified twice.
  445. */
  446. if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR
  447. && opt_len == 8) {
  448. if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
  449. goto invalid;
  450. memcpy(key->ipv6.nd.sll,
  451. &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
  452. } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR
  453. && opt_len == 8) {
  454. if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
  455. goto invalid;
  456. memcpy(key->ipv6.nd.tll,
  457. &nd->opt[offset+sizeof(*nd_opt)], ETH_ALEN);
  458. }
  459. icmp_len -= opt_len;
  460. offset += opt_len;
  461. }
  462. }
  463. goto out;
  464. invalid:
  465. memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
  466. memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
  467. memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
  468. out:
  469. *key_lenp = key_len;
  470. return error;
  471. }
  472. /**
  473. * ovs_flow_extract - extracts a flow key from an Ethernet frame.
  474. * @skb: sk_buff that contains the frame, with skb->data pointing to the
  475. * Ethernet header
  476. * @in_port: port number on which @skb was received.
  477. * @key: output flow key
  478. * @key_lenp: length of output flow key
  479. *
  480. * The caller must ensure that skb->len >= ETH_HLEN.
  481. *
  482. * Returns 0 if successful, otherwise a negative errno value.
  483. *
  484. * Initializes @skb header pointers as follows:
  485. *
  486. * - skb->mac_header: the Ethernet header.
  487. *
  488. * - skb->network_header: just past the Ethernet header, or just past the
  489. * VLAN header, to the first byte of the Ethernet payload.
  490. *
  491. * - skb->transport_header: If key->dl_type is ETH_P_IP or ETH_P_IPV6
  492. * on output, then just past the IP header, if one is present and
  493. * of a correct length, otherwise the same as skb->network_header.
  494. * For other key->dl_type values it is left untouched.
  495. */
  496. int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
  497. int *key_lenp)
  498. {
  499. int error = 0;
  500. int key_len = SW_FLOW_KEY_OFFSET(eth);
  501. struct ethhdr *eth;
  502. memset(key, 0, sizeof(*key));
  503. key->phy.priority = skb->priority;
  504. key->phy.in_port = in_port;
  505. skb_reset_mac_header(skb);
  506. /* Link layer. We are guaranteed to have at least the 14 byte Ethernet
  507. * header in the linear data area.
  508. */
  509. eth = eth_hdr(skb);
  510. memcpy(key->eth.src, eth->h_source, ETH_ALEN);
  511. memcpy(key->eth.dst, eth->h_dest, ETH_ALEN);
  512. __skb_pull(skb, 2 * ETH_ALEN);
  513. if (vlan_tx_tag_present(skb))
  514. key->eth.tci = htons(skb->vlan_tci);
  515. else if (eth->h_proto == htons(ETH_P_8021Q))
  516. if (unlikely(parse_vlan(skb, key)))
  517. return -ENOMEM;
  518. key->eth.type = parse_ethertype(skb);
  519. if (unlikely(key->eth.type == htons(0)))
  520. return -ENOMEM;
  521. skb_reset_network_header(skb);
  522. __skb_push(skb, skb->data - skb_mac_header(skb));
  523. /* Network layer. */
  524. if (key->eth.type == htons(ETH_P_IP)) {
  525. struct iphdr *nh;
  526. __be16 offset;
  527. key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
  528. error = check_iphdr(skb);
  529. if (unlikely(error)) {
  530. if (error == -EINVAL) {
  531. skb->transport_header = skb->network_header;
  532. error = 0;
  533. }
  534. goto out;
  535. }
  536. nh = ip_hdr(skb);
  537. key->ipv4.addr.src = nh->saddr;
  538. key->ipv4.addr.dst = nh->daddr;
  539. key->ip.proto = nh->protocol;
  540. key->ip.tos = nh->tos;
  541. key->ip.ttl = nh->ttl;
  542. offset = nh->frag_off & htons(IP_OFFSET);
  543. if (offset) {
  544. key->ip.frag = OVS_FRAG_TYPE_LATER;
  545. goto out;
  546. }
  547. if (nh->frag_off & htons(IP_MF) ||
  548. skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  549. key->ip.frag = OVS_FRAG_TYPE_FIRST;
  550. /* Transport layer. */
  551. if (key->ip.proto == IPPROTO_TCP) {
  552. key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
  553. if (tcphdr_ok(skb)) {
  554. struct tcphdr *tcp = tcp_hdr(skb);
  555. key->ipv4.tp.src = tcp->source;
  556. key->ipv4.tp.dst = tcp->dest;
  557. }
  558. } else if (key->ip.proto == IPPROTO_UDP) {
  559. key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
  560. if (udphdr_ok(skb)) {
  561. struct udphdr *udp = udp_hdr(skb);
  562. key->ipv4.tp.src = udp->source;
  563. key->ipv4.tp.dst = udp->dest;
  564. }
  565. } else if (key->ip.proto == IPPROTO_ICMP) {
  566. key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
  567. if (icmphdr_ok(skb)) {
  568. struct icmphdr *icmp = icmp_hdr(skb);
  569. /* The ICMP type and code fields use the 16-bit
  570. * transport port fields, so we need to store
  571. * them in 16-bit network byte order. */
  572. key->ipv4.tp.src = htons(icmp->type);
  573. key->ipv4.tp.dst = htons(icmp->code);
  574. }
  575. }
  576. } else if (key->eth.type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
  577. struct arp_eth_header *arp;
  578. arp = (struct arp_eth_header *)skb_network_header(skb);
  579. if (arp->ar_hrd == htons(ARPHRD_ETHER)
  580. && arp->ar_pro == htons(ETH_P_IP)
  581. && arp->ar_hln == ETH_ALEN
  582. && arp->ar_pln == 4) {
  583. /* We only match on the lower 8 bits of the opcode. */
  584. if (ntohs(arp->ar_op) <= 0xff)
  585. key->ip.proto = ntohs(arp->ar_op);
  586. if (key->ip.proto == ARPOP_REQUEST
  587. || key->ip.proto == ARPOP_REPLY) {
  588. memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src));
  589. memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
  590. memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
  591. memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
  592. key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
  593. }
  594. }
  595. } else if (key->eth.type == htons(ETH_P_IPV6)) {
  596. int nh_len; /* IPv6 Header + Extensions */
  597. nh_len = parse_ipv6hdr(skb, key, &key_len);
  598. if (unlikely(nh_len < 0)) {
  599. if (nh_len == -EINVAL)
  600. skb->transport_header = skb->network_header;
  601. else
  602. error = nh_len;
  603. goto out;
  604. }
  605. if (key->ip.frag == OVS_FRAG_TYPE_LATER)
  606. goto out;
  607. if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
  608. key->ip.frag = OVS_FRAG_TYPE_FIRST;
  609. /* Transport layer. */
  610. if (key->ip.proto == NEXTHDR_TCP) {
  611. key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  612. if (tcphdr_ok(skb)) {
  613. struct tcphdr *tcp = tcp_hdr(skb);
  614. key->ipv6.tp.src = tcp->source;
  615. key->ipv6.tp.dst = tcp->dest;
  616. }
  617. } else if (key->ip.proto == NEXTHDR_UDP) {
  618. key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  619. if (udphdr_ok(skb)) {
  620. struct udphdr *udp = udp_hdr(skb);
  621. key->ipv6.tp.src = udp->source;
  622. key->ipv6.tp.dst = udp->dest;
  623. }
  624. } else if (key->ip.proto == NEXTHDR_ICMP) {
  625. key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  626. if (icmp6hdr_ok(skb)) {
  627. error = parse_icmpv6(skb, key, &key_len, nh_len);
  628. if (error < 0)
  629. goto out;
  630. }
  631. }
  632. }
  633. out:
  634. *key_lenp = key_len;
  635. return error;
  636. }
  637. u32 ovs_flow_hash(const struct sw_flow_key *key, int key_len)
  638. {
  639. return jhash2((u32 *)key, DIV_ROUND_UP(key_len, sizeof(u32)), 0);
  640. }
  641. struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
  642. struct sw_flow_key *key, int key_len)
  643. {
  644. struct sw_flow *flow;
  645. struct hlist_node *n;
  646. struct hlist_head *head;
  647. u32 hash;
  648. hash = ovs_flow_hash(key, key_len);
  649. head = find_bucket(table, hash);
  650. hlist_for_each_entry_rcu(flow, n, head, hash_node[table->node_ver]) {
  651. if (flow->hash == hash &&
  652. !memcmp(&flow->key, key, key_len)) {
  653. return flow;
  654. }
  655. }
  656. return NULL;
  657. }
  658. void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
  659. {
  660. struct hlist_head *head;
  661. head = find_bucket(table, flow->hash);
  662. hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
  663. table->count++;
  664. }
  665. void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
  666. {
  667. hlist_del_rcu(&flow->hash_node[table->node_ver]);
  668. table->count--;
  669. BUG_ON(table->count < 0);
  670. }
  671. /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */
  672. const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
  673. [OVS_KEY_ATTR_ENCAP] = -1,
  674. [OVS_KEY_ATTR_PRIORITY] = sizeof(u32),
  675. [OVS_KEY_ATTR_IN_PORT] = sizeof(u32),
  676. [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet),
  677. [OVS_KEY_ATTR_VLAN] = sizeof(__be16),
  678. [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16),
  679. [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4),
  680. [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
  681. [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
  682. [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
  683. [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
  684. [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
  685. [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
  686. [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd),
  687. };
  688. static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
  689. const struct nlattr *a[], u32 *attrs)
  690. {
  691. const struct ovs_key_icmp *icmp_key;
  692. const struct ovs_key_tcp *tcp_key;
  693. const struct ovs_key_udp *udp_key;
  694. switch (swkey->ip.proto) {
  695. case IPPROTO_TCP:
  696. if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
  697. return -EINVAL;
  698. *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
  699. *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
  700. tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
  701. swkey->ipv4.tp.src = tcp_key->tcp_src;
  702. swkey->ipv4.tp.dst = tcp_key->tcp_dst;
  703. break;
  704. case IPPROTO_UDP:
  705. if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
  706. return -EINVAL;
  707. *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
  708. *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
  709. udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
  710. swkey->ipv4.tp.src = udp_key->udp_src;
  711. swkey->ipv4.tp.dst = udp_key->udp_dst;
  712. break;
  713. case IPPROTO_ICMP:
  714. if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
  715. return -EINVAL;
  716. *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
  717. *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
  718. icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
  719. swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
  720. swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
  721. break;
  722. }
  723. return 0;
  724. }
  725. static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
  726. const struct nlattr *a[], u32 *attrs)
  727. {
  728. const struct ovs_key_icmpv6 *icmpv6_key;
  729. const struct ovs_key_tcp *tcp_key;
  730. const struct ovs_key_udp *udp_key;
  731. switch (swkey->ip.proto) {
  732. case IPPROTO_TCP:
  733. if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
  734. return -EINVAL;
  735. *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
  736. *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  737. tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
  738. swkey->ipv6.tp.src = tcp_key->tcp_src;
  739. swkey->ipv6.tp.dst = tcp_key->tcp_dst;
  740. break;
  741. case IPPROTO_UDP:
  742. if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
  743. return -EINVAL;
  744. *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
  745. *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  746. udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
  747. swkey->ipv6.tp.src = udp_key->udp_src;
  748. swkey->ipv6.tp.dst = udp_key->udp_dst;
  749. break;
  750. case IPPROTO_ICMPV6:
  751. if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
  752. return -EINVAL;
  753. *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
  754. *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
  755. icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
  756. swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
  757. swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
  758. if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
  759. swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
  760. const struct ovs_key_nd *nd_key;
  761. if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
  762. return -EINVAL;
  763. *attrs &= ~(1 << OVS_KEY_ATTR_ND);
  764. *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
  765. nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
  766. memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
  767. sizeof(swkey->ipv6.nd.target));
  768. memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
  769. memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
  770. }
  771. break;
  772. }
  773. return 0;
  774. }
  775. static int parse_flow_nlattrs(const struct nlattr *attr,
  776. const struct nlattr *a[], u32 *attrsp)
  777. {
  778. const struct nlattr *nla;
  779. u32 attrs;
  780. int rem;
  781. attrs = 0;
  782. nla_for_each_nested(nla, attr, rem) {
  783. u16 type = nla_type(nla);
  784. int expected_len;
  785. if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
  786. return -EINVAL;
  787. expected_len = ovs_key_lens[type];
  788. if (nla_len(nla) != expected_len && expected_len != -1)
  789. return -EINVAL;
  790. attrs |= 1 << type;
  791. a[type] = nla;
  792. }
  793. if (rem)
  794. return -EINVAL;
  795. *attrsp = attrs;
  796. return 0;
  797. }
  798. /**
  799. * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
  800. * @swkey: receives the extracted flow key.
  801. * @key_lenp: number of bytes used in @swkey.
  802. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
  803. * sequence.
  804. */
  805. int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
  806. const struct nlattr *attr)
  807. {
  808. const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
  809. const struct ovs_key_ethernet *eth_key;
  810. int key_len;
  811. u32 attrs;
  812. int err;
  813. memset(swkey, 0, sizeof(struct sw_flow_key));
  814. key_len = SW_FLOW_KEY_OFFSET(eth);
  815. err = parse_flow_nlattrs(attr, a, &attrs);
  816. if (err)
  817. return err;
  818. /* Metadata attributes. */
  819. if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
  820. swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
  821. attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
  822. }
  823. if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
  824. u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
  825. if (in_port >= DP_MAX_PORTS)
  826. return -EINVAL;
  827. swkey->phy.in_port = in_port;
  828. attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
  829. } else {
  830. swkey->phy.in_port = DP_MAX_PORTS;
  831. }
  832. /* Data attributes. */
  833. if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
  834. return -EINVAL;
  835. attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
  836. eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
  837. memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
  838. memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
  839. if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
  840. nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
  841. const struct nlattr *encap;
  842. __be16 tci;
  843. if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
  844. (1 << OVS_KEY_ATTR_ETHERTYPE) |
  845. (1 << OVS_KEY_ATTR_ENCAP)))
  846. return -EINVAL;
  847. encap = a[OVS_KEY_ATTR_ENCAP];
  848. tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
  849. if (tci & htons(VLAN_TAG_PRESENT)) {
  850. swkey->eth.tci = tci;
  851. err = parse_flow_nlattrs(encap, a, &attrs);
  852. if (err)
  853. return err;
  854. } else if (!tci) {
  855. /* Corner case for truncated 802.1Q header. */
  856. if (nla_len(encap))
  857. return -EINVAL;
  858. swkey->eth.type = htons(ETH_P_8021Q);
  859. *key_lenp = key_len;
  860. return 0;
  861. } else {
  862. return -EINVAL;
  863. }
  864. }
  865. if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
  866. swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
  867. if (ntohs(swkey->eth.type) < 1536)
  868. return -EINVAL;
  869. attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
  870. } else {
  871. swkey->eth.type = htons(ETH_P_802_2);
  872. }
  873. if (swkey->eth.type == htons(ETH_P_IP)) {
  874. const struct ovs_key_ipv4 *ipv4_key;
  875. if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
  876. return -EINVAL;
  877. attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
  878. key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
  879. ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
  880. if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
  881. return -EINVAL;
  882. swkey->ip.proto = ipv4_key->ipv4_proto;
  883. swkey->ip.tos = ipv4_key->ipv4_tos;
  884. swkey->ip.ttl = ipv4_key->ipv4_ttl;
  885. swkey->ip.frag = ipv4_key->ipv4_frag;
  886. swkey->ipv4.addr.src = ipv4_key->ipv4_src;
  887. swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
  888. if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
  889. err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
  890. if (err)
  891. return err;
  892. }
  893. } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
  894. const struct ovs_key_ipv6 *ipv6_key;
  895. if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
  896. return -EINVAL;
  897. attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
  898. key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
  899. ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
  900. if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
  901. return -EINVAL;
  902. swkey->ipv6.label = ipv6_key->ipv6_label;
  903. swkey->ip.proto = ipv6_key->ipv6_proto;
  904. swkey->ip.tos = ipv6_key->ipv6_tclass;
  905. swkey->ip.ttl = ipv6_key->ipv6_hlimit;
  906. swkey->ip.frag = ipv6_key->ipv6_frag;
  907. memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
  908. sizeof(swkey->ipv6.addr.src));
  909. memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
  910. sizeof(swkey->ipv6.addr.dst));
  911. if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
  912. err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
  913. if (err)
  914. return err;
  915. }
  916. } else if (swkey->eth.type == htons(ETH_P_ARP)) {
  917. const struct ovs_key_arp *arp_key;
  918. if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
  919. return -EINVAL;
  920. attrs &= ~(1 << OVS_KEY_ATTR_ARP);
  921. key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
  922. arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
  923. swkey->ipv4.addr.src = arp_key->arp_sip;
  924. swkey->ipv4.addr.dst = arp_key->arp_tip;
  925. if (arp_key->arp_op & htons(0xff00))
  926. return -EINVAL;
  927. swkey->ip.proto = ntohs(arp_key->arp_op);
  928. memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
  929. memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
  930. }
  931. if (attrs)
  932. return -EINVAL;
  933. *key_lenp = key_len;
  934. return 0;
  935. }
  936. /**
  937. * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
  938. * @in_port: receives the extracted input port.
  939. * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
  940. * sequence.
  941. *
  942. * This parses a series of Netlink attributes that form a flow key, which must
  943. * take the same form accepted by flow_from_nlattrs(), but only enough of it to
  944. * get the metadata, that is, the parts of the flow key that cannot be
  945. * extracted from the packet itself.
  946. */
  947. int ovs_flow_metadata_from_nlattrs(u32 *priority, u16 *in_port,
  948. const struct nlattr *attr)
  949. {
  950. const struct nlattr *nla;
  951. int rem;
  952. *in_port = DP_MAX_PORTS;
  953. *priority = 0;
  954. nla_for_each_nested(nla, attr, rem) {
  955. int type = nla_type(nla);
  956. if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
  957. if (nla_len(nla) != ovs_key_lens[type])
  958. return -EINVAL;
  959. switch (type) {
  960. case OVS_KEY_ATTR_PRIORITY:
  961. *priority = nla_get_u32(nla);
  962. break;
  963. case OVS_KEY_ATTR_IN_PORT:
  964. if (nla_get_u32(nla) >= DP_MAX_PORTS)
  965. return -EINVAL;
  966. *in_port = nla_get_u32(nla);
  967. break;
  968. }
  969. }
  970. }
  971. if (rem)
  972. return -EINVAL;
  973. return 0;
  974. }
  975. int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
  976. {
  977. struct ovs_key_ethernet *eth_key;
  978. struct nlattr *nla, *encap;
  979. if (swkey->phy.priority &&
  980. nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
  981. goto nla_put_failure;
  982. if (swkey->phy.in_port != DP_MAX_PORTS &&
  983. nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
  984. goto nla_put_failure;
  985. nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
  986. if (!nla)
  987. goto nla_put_failure;
  988. eth_key = nla_data(nla);
  989. memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
  990. memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
  991. if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
  992. if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
  993. nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
  994. goto nla_put_failure;
  995. encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
  996. if (!swkey->eth.tci)
  997. goto unencap;
  998. } else {
  999. encap = NULL;
  1000. }
  1001. if (swkey->eth.type == htons(ETH_P_802_2))
  1002. goto unencap;
  1003. if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
  1004. goto nla_put_failure;
  1005. if (swkey->eth.type == htons(ETH_P_IP)) {
  1006. struct ovs_key_ipv4 *ipv4_key;
  1007. nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key));
  1008. if (!nla)
  1009. goto nla_put_failure;
  1010. ipv4_key = nla_data(nla);
  1011. ipv4_key->ipv4_src = swkey->ipv4.addr.src;
  1012. ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
  1013. ipv4_key->ipv4_proto = swkey->ip.proto;
  1014. ipv4_key->ipv4_tos = swkey->ip.tos;
  1015. ipv4_key->ipv4_ttl = swkey->ip.ttl;
  1016. ipv4_key->ipv4_frag = swkey->ip.frag;
  1017. } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
  1018. struct ovs_key_ipv6 *ipv6_key;
  1019. nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key));
  1020. if (!nla)
  1021. goto nla_put_failure;
  1022. ipv6_key = nla_data(nla);
  1023. memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
  1024. sizeof(ipv6_key->ipv6_src));
  1025. memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
  1026. sizeof(ipv6_key->ipv6_dst));
  1027. ipv6_key->ipv6_label = swkey->ipv6.label;
  1028. ipv6_key->ipv6_proto = swkey->ip.proto;
  1029. ipv6_key->ipv6_tclass = swkey->ip.tos;
  1030. ipv6_key->ipv6_hlimit = swkey->ip.ttl;
  1031. ipv6_key->ipv6_frag = swkey->ip.frag;
  1032. } else if (swkey->eth.type == htons(ETH_P_ARP)) {
  1033. struct ovs_key_arp *arp_key;
  1034. nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key));
  1035. if (!nla)
  1036. goto nla_put_failure;
  1037. arp_key = nla_data(nla);
  1038. memset(arp_key, 0, sizeof(struct ovs_key_arp));
  1039. arp_key->arp_sip = swkey->ipv4.addr.src;
  1040. arp_key->arp_tip = swkey->ipv4.addr.dst;
  1041. arp_key->arp_op = htons(swkey->ip.proto);
  1042. memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
  1043. memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
  1044. }
  1045. if ((swkey->eth.type == htons(ETH_P_IP) ||
  1046. swkey->eth.type == htons(ETH_P_IPV6)) &&
  1047. swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
  1048. if (swkey->ip.proto == IPPROTO_TCP) {
  1049. struct ovs_key_tcp *tcp_key;
  1050. nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key));
  1051. if (!nla)
  1052. goto nla_put_failure;
  1053. tcp_key = nla_data(nla);
  1054. if (swkey->eth.type == htons(ETH_P_IP)) {
  1055. tcp_key->tcp_src = swkey->ipv4.tp.src;
  1056. tcp_key->tcp_dst = swkey->ipv4.tp.dst;
  1057. } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
  1058. tcp_key->tcp_src = swkey->ipv6.tp.src;
  1059. tcp_key->tcp_dst = swkey->ipv6.tp.dst;
  1060. }
  1061. } else if (swkey->ip.proto == IPPROTO_UDP) {
  1062. struct ovs_key_udp *udp_key;
  1063. nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key));
  1064. if (!nla)
  1065. goto nla_put_failure;
  1066. udp_key = nla_data(nla);
  1067. if (swkey->eth.type == htons(ETH_P_IP)) {
  1068. udp_key->udp_src = swkey->ipv4.tp.src;
  1069. udp_key->udp_dst = swkey->ipv4.tp.dst;
  1070. } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
  1071. udp_key->udp_src = swkey->ipv6.tp.src;
  1072. udp_key->udp_dst = swkey->ipv6.tp.dst;
  1073. }
  1074. } else if (swkey->eth.type == htons(ETH_P_IP) &&
  1075. swkey->ip.proto == IPPROTO_ICMP) {
  1076. struct ovs_key_icmp *icmp_key;
  1077. nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key));
  1078. if (!nla)
  1079. goto nla_put_failure;
  1080. icmp_key = nla_data(nla);
  1081. icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
  1082. icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
  1083. } else if (swkey->eth.type == htons(ETH_P_IPV6) &&
  1084. swkey->ip.proto == IPPROTO_ICMPV6) {
  1085. struct ovs_key_icmpv6 *icmpv6_key;
  1086. nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6,
  1087. sizeof(*icmpv6_key));
  1088. if (!nla)
  1089. goto nla_put_failure;
  1090. icmpv6_key = nla_data(nla);
  1091. icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
  1092. icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
  1093. if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
  1094. icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
  1095. struct ovs_key_nd *nd_key;
  1096. nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key));
  1097. if (!nla)
  1098. goto nla_put_failure;
  1099. nd_key = nla_data(nla);
  1100. memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
  1101. sizeof(nd_key->nd_target));
  1102. memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
  1103. memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
  1104. }
  1105. }
  1106. }
  1107. unencap:
  1108. if (encap)
  1109. nla_nest_end(skb, encap);
  1110. return 0;
  1111. nla_put_failure:
  1112. return -EMSGSIZE;
  1113. }
  1114. /* Initializes the flow module.
  1115. * Returns zero if successful or a negative error code. */
  1116. int ovs_flow_init(void)
  1117. {
  1118. flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
  1119. 0, NULL);
  1120. if (flow_cache == NULL)
  1121. return -ENOMEM;
  1122. return 0;
  1123. }
  1124. /* Uninitializes the flow module. */
  1125. void ovs_flow_exit(void)
  1126. {
  1127. kmem_cache_destroy(flow_cache);
  1128. }