datapath.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387
  1. /*
  2. * Copyright (c) 2007-2012 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  19. #include <linux/init.h>
  20. #include <linux/module.h>
  21. #include <linux/if_arp.h>
  22. #include <linux/if_vlan.h>
  23. #include <linux/in.h>
  24. #include <linux/ip.h>
  25. #include <linux/jhash.h>
  26. #include <linux/delay.h>
  27. #include <linux/time.h>
  28. #include <linux/etherdevice.h>
  29. #include <linux/genetlink.h>
  30. #include <linux/kernel.h>
  31. #include <linux/kthread.h>
  32. #include <linux/mutex.h>
  33. #include <linux/percpu.h>
  34. #include <linux/rcupdate.h>
  35. #include <linux/tcp.h>
  36. #include <linux/udp.h>
  37. #include <linux/ethtool.h>
  38. #include <linux/wait.h>
  39. #include <asm/div64.h>
  40. #include <linux/highmem.h>
  41. #include <linux/netfilter_bridge.h>
  42. #include <linux/netfilter_ipv4.h>
  43. #include <linux/inetdevice.h>
  44. #include <linux/list.h>
  45. #include <linux/lockdep.h>
  46. #include <linux/openvswitch.h>
  47. #include <linux/rculist.h>
  48. #include <linux/dmi.h>
  49. #include <linux/workqueue.h>
  50. #include <net/genetlink.h>
  51. #include <net/net_namespace.h>
  52. #include <net/netns/generic.h>
  53. #include "datapath.h"
  54. #include "flow.h"
  55. #include "vport-internal_dev.h"
  56. #include "vport-netdev.h"
  57. #define REHASH_FLOW_INTERVAL (10 * 60 * HZ)
  58. static void rehash_flow_table(struct work_struct *work);
  59. static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table);
  60. int ovs_net_id __read_mostly;
  61. static void ovs_notify(struct sk_buff *skb, struct genl_info *info,
  62. struct genl_multicast_group *grp)
  63. {
  64. genl_notify(skb, genl_info_net(info), info->snd_portid,
  65. grp->id, info->nlhdr, GFP_KERNEL);
  66. }
  67. /**
  68. * DOC: Locking:
  69. *
  70. * All writes e.g. Writes to device state (add/remove datapath, port, set
  71. * operations on vports, etc.), Writes to other state (flow table
  72. * modifications, set miscellaneous datapath parameters, etc.) are protected
  73. * by ovs_lock.
  74. *
  75. * Reads are protected by RCU.
  76. *
  77. * There are a few special cases (mostly stats) that have their own
  78. * synchronization but they nest under all of above and don't interact with
  79. * each other.
  80. *
  81. * The RTNL lock nests inside ovs_mutex.
  82. */
  83. static DEFINE_MUTEX(ovs_mutex);
  84. void ovs_lock(void)
  85. {
  86. mutex_lock(&ovs_mutex);
  87. }
  88. void ovs_unlock(void)
  89. {
  90. mutex_unlock(&ovs_mutex);
  91. }
  92. #ifdef CONFIG_LOCKDEP
  93. int lockdep_ovsl_is_held(void)
  94. {
  95. if (debug_locks)
  96. return lockdep_is_held(&ovs_mutex);
  97. else
  98. return 1;
  99. }
  100. #endif
  101. static struct vport *new_vport(const struct vport_parms *);
  102. static int queue_gso_packets(struct net *, int dp_ifindex, struct sk_buff *,
  103. const struct dp_upcall_info *);
  104. static int queue_userspace_packet(struct net *, int dp_ifindex,
  105. struct sk_buff *,
  106. const struct dp_upcall_info *);
  107. /* Must be called with rcu_read_lock or ovs_mutex. */
  108. static struct datapath *get_dp(struct net *net, int dp_ifindex)
  109. {
  110. struct datapath *dp = NULL;
  111. struct net_device *dev;
  112. rcu_read_lock();
  113. dev = dev_get_by_index_rcu(net, dp_ifindex);
  114. if (dev) {
  115. struct vport *vport = ovs_internal_dev_get_vport(dev);
  116. if (vport)
  117. dp = vport->dp;
  118. }
  119. rcu_read_unlock();
  120. return dp;
  121. }
  122. /* Must be called with rcu_read_lock or ovs_mutex. */
  123. const char *ovs_dp_name(const struct datapath *dp)
  124. {
  125. struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
  126. return vport->ops->get_name(vport);
  127. }
  128. static int get_dpifindex(struct datapath *dp)
  129. {
  130. struct vport *local;
  131. int ifindex;
  132. rcu_read_lock();
  133. local = ovs_vport_rcu(dp, OVSP_LOCAL);
  134. if (local)
  135. ifindex = netdev_vport_priv(local)->dev->ifindex;
  136. else
  137. ifindex = 0;
  138. rcu_read_unlock();
  139. return ifindex;
  140. }
  141. static void destroy_dp_rcu(struct rcu_head *rcu)
  142. {
  143. struct datapath *dp = container_of(rcu, struct datapath, rcu);
  144. ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
  145. free_percpu(dp->stats_percpu);
  146. release_net(ovs_dp_get_net(dp));
  147. kfree(dp->ports);
  148. kfree(dp);
  149. }
  150. static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
  151. u16 port_no)
  152. {
  153. return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
  154. }
  155. struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
  156. {
  157. struct vport *vport;
  158. struct hlist_head *head;
  159. head = vport_hash_bucket(dp, port_no);
  160. hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
  161. if (vport->port_no == port_no)
  162. return vport;
  163. }
  164. return NULL;
  165. }
  166. /* Called with ovs_mutex. */
  167. static struct vport *new_vport(const struct vport_parms *parms)
  168. {
  169. struct vport *vport;
  170. vport = ovs_vport_add(parms);
  171. if (!IS_ERR(vport)) {
  172. struct datapath *dp = parms->dp;
  173. struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
  174. hlist_add_head_rcu(&vport->dp_hash_node, head);
  175. }
  176. return vport;
  177. }
  178. void ovs_dp_detach_port(struct vport *p)
  179. {
  180. ASSERT_OVSL();
  181. /* First drop references to device. */
  182. hlist_del_rcu(&p->dp_hash_node);
  183. /* Then destroy it. */
  184. ovs_vport_del(p);
  185. }
  186. /* Must be called with rcu_read_lock. */
  187. void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
  188. {
  189. struct datapath *dp = p->dp;
  190. struct sw_flow *flow;
  191. struct dp_stats_percpu *stats;
  192. struct sw_flow_key key;
  193. u64 *stats_counter;
  194. int error;
  195. int key_len;
  196. stats = this_cpu_ptr(dp->stats_percpu);
  197. /* Extract flow from 'skb' into 'key'. */
  198. error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
  199. if (unlikely(error)) {
  200. kfree_skb(skb);
  201. return;
  202. }
  203. /* Look up flow. */
  204. flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
  205. if (unlikely(!flow)) {
  206. struct dp_upcall_info upcall;
  207. upcall.cmd = OVS_PACKET_CMD_MISS;
  208. upcall.key = &key;
  209. upcall.userdata = NULL;
  210. upcall.portid = p->upcall_portid;
  211. ovs_dp_upcall(dp, skb, &upcall);
  212. consume_skb(skb);
  213. stats_counter = &stats->n_missed;
  214. goto out;
  215. }
  216. OVS_CB(skb)->flow = flow;
  217. stats_counter = &stats->n_hit;
  218. ovs_flow_used(OVS_CB(skb)->flow, skb);
  219. ovs_execute_actions(dp, skb);
  220. out:
  221. /* Update datapath statistics. */
  222. u64_stats_update_begin(&stats->sync);
  223. (*stats_counter)++;
  224. u64_stats_update_end(&stats->sync);
  225. }
  226. static struct genl_family dp_packet_genl_family = {
  227. .id = GENL_ID_GENERATE,
  228. .hdrsize = sizeof(struct ovs_header),
  229. .name = OVS_PACKET_FAMILY,
  230. .version = OVS_PACKET_VERSION,
  231. .maxattr = OVS_PACKET_ATTR_MAX,
  232. .netnsok = true,
  233. .parallel_ops = true,
  234. };
  235. int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
  236. const struct dp_upcall_info *upcall_info)
  237. {
  238. struct dp_stats_percpu *stats;
  239. int dp_ifindex;
  240. int err;
  241. if (upcall_info->portid == 0) {
  242. err = -ENOTCONN;
  243. goto err;
  244. }
  245. dp_ifindex = get_dpifindex(dp);
  246. if (!dp_ifindex) {
  247. err = -ENODEV;
  248. goto err;
  249. }
  250. if (!skb_is_gso(skb))
  251. err = queue_userspace_packet(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
  252. else
  253. err = queue_gso_packets(ovs_dp_get_net(dp), dp_ifindex, skb, upcall_info);
  254. if (err)
  255. goto err;
  256. return 0;
  257. err:
  258. stats = this_cpu_ptr(dp->stats_percpu);
  259. u64_stats_update_begin(&stats->sync);
  260. stats->n_lost++;
  261. u64_stats_update_end(&stats->sync);
  262. return err;
  263. }
  264. static int queue_gso_packets(struct net *net, int dp_ifindex,
  265. struct sk_buff *skb,
  266. const struct dp_upcall_info *upcall_info)
  267. {
  268. unsigned short gso_type = skb_shinfo(skb)->gso_type;
  269. struct dp_upcall_info later_info;
  270. struct sw_flow_key later_key;
  271. struct sk_buff *segs, *nskb;
  272. int err;
  273. segs = __skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM, false);
  274. if (IS_ERR(segs))
  275. return PTR_ERR(segs);
  276. /* Queue all of the segments. */
  277. skb = segs;
  278. do {
  279. err = queue_userspace_packet(net, dp_ifindex, skb, upcall_info);
  280. if (err)
  281. break;
  282. if (skb == segs && gso_type & SKB_GSO_UDP) {
  283. /* The initial flow key extracted by ovs_flow_extract()
  284. * in this case is for a first fragment, so we need to
  285. * properly mark later fragments.
  286. */
  287. later_key = *upcall_info->key;
  288. later_key.ip.frag = OVS_FRAG_TYPE_LATER;
  289. later_info = *upcall_info;
  290. later_info.key = &later_key;
  291. upcall_info = &later_info;
  292. }
  293. } while ((skb = skb->next));
  294. /* Free all of the segments. */
  295. skb = segs;
  296. do {
  297. nskb = skb->next;
  298. if (err)
  299. kfree_skb(skb);
  300. else
  301. consume_skb(skb);
  302. } while ((skb = nskb));
  303. return err;
  304. }
  305. static size_t key_attr_size(void)
  306. {
  307. return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */
  308. + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */
  309. + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */
  310. + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
  311. + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
  312. + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */
  313. + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */
  314. + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
  315. + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */
  316. + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */
  317. + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */
  318. + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */
  319. + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
  320. + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */
  321. + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */
  322. + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */
  323. + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */
  324. + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */
  325. + nla_total_size(28); /* OVS_KEY_ATTR_ND */
  326. }
  327. static size_t upcall_msg_size(const struct sk_buff *skb,
  328. const struct nlattr *userdata)
  329. {
  330. size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
  331. + nla_total_size(skb->len) /* OVS_PACKET_ATTR_PACKET */
  332. + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
  333. /* OVS_PACKET_ATTR_USERDATA */
  334. if (userdata)
  335. size += NLA_ALIGN(userdata->nla_len);
  336. return size;
  337. }
  338. static int queue_userspace_packet(struct net *net, int dp_ifindex,
  339. struct sk_buff *skb,
  340. const struct dp_upcall_info *upcall_info)
  341. {
  342. struct ovs_header *upcall;
  343. struct sk_buff *nskb = NULL;
  344. struct sk_buff *user_skb; /* to be queued to userspace */
  345. struct nlattr *nla;
  346. int err;
  347. if (vlan_tx_tag_present(skb)) {
  348. nskb = skb_clone(skb, GFP_ATOMIC);
  349. if (!nskb)
  350. return -ENOMEM;
  351. nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
  352. if (!nskb)
  353. return -ENOMEM;
  354. nskb->vlan_tci = 0;
  355. skb = nskb;
  356. }
  357. if (nla_attr_size(skb->len) > USHRT_MAX) {
  358. err = -EFBIG;
  359. goto out;
  360. }
  361. user_skb = genlmsg_new(upcall_msg_size(skb, upcall_info->userdata), GFP_ATOMIC);
  362. if (!user_skb) {
  363. err = -ENOMEM;
  364. goto out;
  365. }
  366. upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
  367. 0, upcall_info->cmd);
  368. upcall->dp_ifindex = dp_ifindex;
  369. nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
  370. ovs_flow_to_nlattrs(upcall_info->key, user_skb);
  371. nla_nest_end(user_skb, nla);
  372. if (upcall_info->userdata)
  373. __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
  374. nla_len(upcall_info->userdata),
  375. nla_data(upcall_info->userdata));
  376. nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
  377. skb_copy_and_csum_dev(skb, nla_data(nla));
  378. genlmsg_end(user_skb, upcall);
  379. err = genlmsg_unicast(net, user_skb, upcall_info->portid);
  380. out:
  381. kfree_skb(nskb);
  382. return err;
  383. }
  384. /* Called with ovs_mutex. */
  385. static int flush_flows(struct datapath *dp)
  386. {
  387. struct flow_table *old_table;
  388. struct flow_table *new_table;
  389. old_table = ovsl_dereference(dp->table);
  390. new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS);
  391. if (!new_table)
  392. return -ENOMEM;
  393. rcu_assign_pointer(dp->table, new_table);
  394. ovs_flow_tbl_deferred_destroy(old_table);
  395. return 0;
  396. }
  397. static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len)
  398. {
  399. struct sw_flow_actions *acts;
  400. int new_acts_size;
  401. int req_size = NLA_ALIGN(attr_len);
  402. int next_offset = offsetof(struct sw_flow_actions, actions) +
  403. (*sfa)->actions_len;
  404. if (req_size <= (ksize(*sfa) - next_offset))
  405. goto out;
  406. new_acts_size = ksize(*sfa) * 2;
  407. if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
  408. if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
  409. return ERR_PTR(-EMSGSIZE);
  410. new_acts_size = MAX_ACTIONS_BUFSIZE;
  411. }
  412. acts = ovs_flow_actions_alloc(new_acts_size);
  413. if (IS_ERR(acts))
  414. return (void *)acts;
  415. memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len);
  416. acts->actions_len = (*sfa)->actions_len;
  417. kfree(*sfa);
  418. *sfa = acts;
  419. out:
  420. (*sfa)->actions_len += req_size;
  421. return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset);
  422. }
  423. static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len)
  424. {
  425. struct nlattr *a;
  426. a = reserve_sfa_size(sfa, nla_attr_size(len));
  427. if (IS_ERR(a))
  428. return PTR_ERR(a);
  429. a->nla_type = attrtype;
  430. a->nla_len = nla_attr_size(len);
  431. if (data)
  432. memcpy(nla_data(a), data, len);
  433. memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len));
  434. return 0;
  435. }
  436. static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype)
  437. {
  438. int used = (*sfa)->actions_len;
  439. int err;
  440. err = add_action(sfa, attrtype, NULL, 0);
  441. if (err)
  442. return err;
  443. return used;
  444. }
  445. static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset)
  446. {
  447. struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset);
  448. a->nla_len = sfa->actions_len - st_offset;
  449. }
  450. static int validate_and_copy_actions(const struct nlattr *attr,
  451. const struct sw_flow_key *key, int depth,
  452. struct sw_flow_actions **sfa);
  453. static int validate_and_copy_sample(const struct nlattr *attr,
  454. const struct sw_flow_key *key, int depth,
  455. struct sw_flow_actions **sfa)
  456. {
  457. const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
  458. const struct nlattr *probability, *actions;
  459. const struct nlattr *a;
  460. int rem, start, err, st_acts;
  461. memset(attrs, 0, sizeof(attrs));
  462. nla_for_each_nested(a, attr, rem) {
  463. int type = nla_type(a);
  464. if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
  465. return -EINVAL;
  466. attrs[type] = a;
  467. }
  468. if (rem)
  469. return -EINVAL;
  470. probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
  471. if (!probability || nla_len(probability) != sizeof(u32))
  472. return -EINVAL;
  473. actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
  474. if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
  475. return -EINVAL;
  476. /* validation done, copy sample action. */
  477. start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE);
  478. if (start < 0)
  479. return start;
  480. err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32));
  481. if (err)
  482. return err;
  483. st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS);
  484. if (st_acts < 0)
  485. return st_acts;
  486. err = validate_and_copy_actions(actions, key, depth + 1, sfa);
  487. if (err)
  488. return err;
  489. add_nested_action_end(*sfa, st_acts);
  490. add_nested_action_end(*sfa, start);
  491. return 0;
  492. }
  493. static int validate_tp_port(const struct sw_flow_key *flow_key)
  494. {
  495. if (flow_key->eth.type == htons(ETH_P_IP)) {
  496. if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst)
  497. return 0;
  498. } else if (flow_key->eth.type == htons(ETH_P_IPV6)) {
  499. if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst)
  500. return 0;
  501. }
  502. return -EINVAL;
  503. }
  504. static int validate_and_copy_set_tun(const struct nlattr *attr,
  505. struct sw_flow_actions **sfa)
  506. {
  507. struct ovs_key_ipv4_tunnel tun_key;
  508. int err, start;
  509. err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
  510. if (err)
  511. return err;
  512. start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET);
  513. if (start < 0)
  514. return start;
  515. err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
  516. add_nested_action_end(*sfa, start);
  517. return err;
  518. }
  519. static int validate_set(const struct nlattr *a,
  520. const struct sw_flow_key *flow_key,
  521. struct sw_flow_actions **sfa,
  522. bool *set_tun)
  523. {
  524. const struct nlattr *ovs_key = nla_data(a);
  525. int key_type = nla_type(ovs_key);
  526. /* There can be only one key in a action */
  527. if (nla_total_size(nla_len(ovs_key)) != nla_len(a))
  528. return -EINVAL;
  529. if (key_type > OVS_KEY_ATTR_MAX ||
  530. (ovs_key_lens[key_type] != nla_len(ovs_key) &&
  531. ovs_key_lens[key_type] != -1))
  532. return -EINVAL;
  533. switch (key_type) {
  534. const struct ovs_key_ipv4 *ipv4_key;
  535. const struct ovs_key_ipv6 *ipv6_key;
  536. int err;
  537. case OVS_KEY_ATTR_PRIORITY:
  538. case OVS_KEY_ATTR_SKB_MARK:
  539. case OVS_KEY_ATTR_ETHERNET:
  540. break;
  541. case OVS_KEY_ATTR_TUNNEL:
  542. *set_tun = true;
  543. err = validate_and_copy_set_tun(a, sfa);
  544. if (err)
  545. return err;
  546. break;
  547. case OVS_KEY_ATTR_IPV4:
  548. if (flow_key->eth.type != htons(ETH_P_IP))
  549. return -EINVAL;
  550. if (!flow_key->ip.proto)
  551. return -EINVAL;
  552. ipv4_key = nla_data(ovs_key);
  553. if (ipv4_key->ipv4_proto != flow_key->ip.proto)
  554. return -EINVAL;
  555. if (ipv4_key->ipv4_frag != flow_key->ip.frag)
  556. return -EINVAL;
  557. break;
  558. case OVS_KEY_ATTR_IPV6:
  559. if (flow_key->eth.type != htons(ETH_P_IPV6))
  560. return -EINVAL;
  561. if (!flow_key->ip.proto)
  562. return -EINVAL;
  563. ipv6_key = nla_data(ovs_key);
  564. if (ipv6_key->ipv6_proto != flow_key->ip.proto)
  565. return -EINVAL;
  566. if (ipv6_key->ipv6_frag != flow_key->ip.frag)
  567. return -EINVAL;
  568. if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000)
  569. return -EINVAL;
  570. break;
  571. case OVS_KEY_ATTR_TCP:
  572. if (flow_key->ip.proto != IPPROTO_TCP)
  573. return -EINVAL;
  574. return validate_tp_port(flow_key);
  575. case OVS_KEY_ATTR_UDP:
  576. if (flow_key->ip.proto != IPPROTO_UDP)
  577. return -EINVAL;
  578. return validate_tp_port(flow_key);
  579. default:
  580. return -EINVAL;
  581. }
  582. return 0;
  583. }
  584. static int validate_userspace(const struct nlattr *attr)
  585. {
  586. static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
  587. [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
  588. [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
  589. };
  590. struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
  591. int error;
  592. error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
  593. attr, userspace_policy);
  594. if (error)
  595. return error;
  596. if (!a[OVS_USERSPACE_ATTR_PID] ||
  597. !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
  598. return -EINVAL;
  599. return 0;
  600. }
  601. static int copy_action(const struct nlattr *from,
  602. struct sw_flow_actions **sfa)
  603. {
  604. int totlen = NLA_ALIGN(from->nla_len);
  605. struct nlattr *to;
  606. to = reserve_sfa_size(sfa, from->nla_len);
  607. if (IS_ERR(to))
  608. return PTR_ERR(to);
  609. memcpy(to, from, totlen);
  610. return 0;
  611. }
  612. static int validate_and_copy_actions(const struct nlattr *attr,
  613. const struct sw_flow_key *key,
  614. int depth,
  615. struct sw_flow_actions **sfa)
  616. {
  617. const struct nlattr *a;
  618. int rem, err;
  619. if (depth >= SAMPLE_ACTION_DEPTH)
  620. return -EOVERFLOW;
  621. nla_for_each_nested(a, attr, rem) {
  622. /* Expected argument lengths, (u32)-1 for variable length. */
  623. static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = {
  624. [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32),
  625. [OVS_ACTION_ATTR_USERSPACE] = (u32)-1,
  626. [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan),
  627. [OVS_ACTION_ATTR_POP_VLAN] = 0,
  628. [OVS_ACTION_ATTR_SET] = (u32)-1,
  629. [OVS_ACTION_ATTR_SAMPLE] = (u32)-1
  630. };
  631. const struct ovs_action_push_vlan *vlan;
  632. int type = nla_type(a);
  633. bool skip_copy;
  634. if (type > OVS_ACTION_ATTR_MAX ||
  635. (action_lens[type] != nla_len(a) &&
  636. action_lens[type] != (u32)-1))
  637. return -EINVAL;
  638. skip_copy = false;
  639. switch (type) {
  640. case OVS_ACTION_ATTR_UNSPEC:
  641. return -EINVAL;
  642. case OVS_ACTION_ATTR_USERSPACE:
  643. err = validate_userspace(a);
  644. if (err)
  645. return err;
  646. break;
  647. case OVS_ACTION_ATTR_OUTPUT:
  648. if (nla_get_u32(a) >= DP_MAX_PORTS)
  649. return -EINVAL;
  650. break;
  651. case OVS_ACTION_ATTR_POP_VLAN:
  652. break;
  653. case OVS_ACTION_ATTR_PUSH_VLAN:
  654. vlan = nla_data(a);
  655. if (vlan->vlan_tpid != htons(ETH_P_8021Q))
  656. return -EINVAL;
  657. if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT)))
  658. return -EINVAL;
  659. break;
  660. case OVS_ACTION_ATTR_SET:
  661. err = validate_set(a, key, sfa, &skip_copy);
  662. if (err)
  663. return err;
  664. break;
  665. case OVS_ACTION_ATTR_SAMPLE:
  666. err = validate_and_copy_sample(a, key, depth, sfa);
  667. if (err)
  668. return err;
  669. skip_copy = true;
  670. break;
  671. default:
  672. return -EINVAL;
  673. }
  674. if (!skip_copy) {
  675. err = copy_action(a, sfa);
  676. if (err)
  677. return err;
  678. }
  679. }
  680. if (rem > 0)
  681. return -EINVAL;
  682. return 0;
  683. }
  684. static void clear_stats(struct sw_flow *flow)
  685. {
  686. flow->used = 0;
  687. flow->tcp_flags = 0;
  688. flow->packet_count = 0;
  689. flow->byte_count = 0;
  690. }
  691. static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
  692. {
  693. struct ovs_header *ovs_header = info->userhdr;
  694. struct nlattr **a = info->attrs;
  695. struct sw_flow_actions *acts;
  696. struct sk_buff *packet;
  697. struct sw_flow *flow;
  698. struct datapath *dp;
  699. struct ethhdr *eth;
  700. int len;
  701. int err;
  702. int key_len;
  703. err = -EINVAL;
  704. if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
  705. !a[OVS_PACKET_ATTR_ACTIONS])
  706. goto err;
  707. len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
  708. packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
  709. err = -ENOMEM;
  710. if (!packet)
  711. goto err;
  712. skb_reserve(packet, NET_IP_ALIGN);
  713. nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
  714. skb_reset_mac_header(packet);
  715. eth = eth_hdr(packet);
  716. /* Normally, setting the skb 'protocol' field would be handled by a
  717. * call to eth_type_trans(), but it assumes there's a sending
  718. * device, which we may not have. */
  719. if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
  720. packet->protocol = eth->h_proto;
  721. else
  722. packet->protocol = htons(ETH_P_802_2);
  723. /* Build an sw_flow for sending this packet. */
  724. flow = ovs_flow_alloc();
  725. err = PTR_ERR(flow);
  726. if (IS_ERR(flow))
  727. goto err_kfree_skb;
  728. err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
  729. if (err)
  730. goto err_flow_free;
  731. err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
  732. if (err)
  733. goto err_flow_free;
  734. acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
  735. err = PTR_ERR(acts);
  736. if (IS_ERR(acts))
  737. goto err_flow_free;
  738. err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts);
  739. rcu_assign_pointer(flow->sf_acts, acts);
  740. if (err)
  741. goto err_flow_free;
  742. OVS_CB(packet)->flow = flow;
  743. packet->priority = flow->key.phy.priority;
  744. packet->mark = flow->key.phy.skb_mark;
  745. rcu_read_lock();
  746. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  747. err = -ENODEV;
  748. if (!dp)
  749. goto err_unlock;
  750. local_bh_disable();
  751. err = ovs_execute_actions(dp, packet);
  752. local_bh_enable();
  753. rcu_read_unlock();
  754. ovs_flow_free(flow);
  755. return err;
  756. err_unlock:
  757. rcu_read_unlock();
  758. err_flow_free:
  759. ovs_flow_free(flow);
  760. err_kfree_skb:
  761. kfree_skb(packet);
  762. err:
  763. return err;
  764. }
  765. static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
  766. [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
  767. [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
  768. [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
  769. };
  770. static struct genl_ops dp_packet_genl_ops[] = {
  771. { .cmd = OVS_PACKET_CMD_EXECUTE,
  772. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  773. .policy = packet_policy,
  774. .doit = ovs_packet_cmd_execute
  775. }
  776. };
  777. static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
  778. {
  779. struct flow_table *table;
  780. int i;
  781. table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
  782. stats->n_flows = ovs_flow_tbl_count(table);
  783. stats->n_hit = stats->n_missed = stats->n_lost = 0;
  784. for_each_possible_cpu(i) {
  785. const struct dp_stats_percpu *percpu_stats;
  786. struct dp_stats_percpu local_stats;
  787. unsigned int start;
  788. percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
  789. do {
  790. start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
  791. local_stats = *percpu_stats;
  792. } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
  793. stats->n_hit += local_stats.n_hit;
  794. stats->n_missed += local_stats.n_missed;
  795. stats->n_lost += local_stats.n_lost;
  796. }
  797. }
  798. static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
  799. [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
  800. [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
  801. [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
  802. };
  803. static struct genl_family dp_flow_genl_family = {
  804. .id = GENL_ID_GENERATE,
  805. .hdrsize = sizeof(struct ovs_header),
  806. .name = OVS_FLOW_FAMILY,
  807. .version = OVS_FLOW_VERSION,
  808. .maxattr = OVS_FLOW_ATTR_MAX,
  809. .netnsok = true,
  810. .parallel_ops = true,
  811. };
  812. static struct genl_multicast_group ovs_dp_flow_multicast_group = {
  813. .name = OVS_FLOW_MCGROUP
  814. };
  815. static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb);
  816. static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb)
  817. {
  818. const struct nlattr *a;
  819. struct nlattr *start;
  820. int err = 0, rem;
  821. start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE);
  822. if (!start)
  823. return -EMSGSIZE;
  824. nla_for_each_nested(a, attr, rem) {
  825. int type = nla_type(a);
  826. struct nlattr *st_sample;
  827. switch (type) {
  828. case OVS_SAMPLE_ATTR_PROBABILITY:
  829. if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a)))
  830. return -EMSGSIZE;
  831. break;
  832. case OVS_SAMPLE_ATTR_ACTIONS:
  833. st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS);
  834. if (!st_sample)
  835. return -EMSGSIZE;
  836. err = actions_to_attr(nla_data(a), nla_len(a), skb);
  837. if (err)
  838. return err;
  839. nla_nest_end(skb, st_sample);
  840. break;
  841. }
  842. }
  843. nla_nest_end(skb, start);
  844. return err;
  845. }
  846. static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
  847. {
  848. const struct nlattr *ovs_key = nla_data(a);
  849. int key_type = nla_type(ovs_key);
  850. struct nlattr *start;
  851. int err;
  852. switch (key_type) {
  853. case OVS_KEY_ATTR_IPV4_TUNNEL:
  854. start = nla_nest_start(skb, OVS_ACTION_ATTR_SET);
  855. if (!start)
  856. return -EMSGSIZE;
  857. err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
  858. if (err)
  859. return err;
  860. nla_nest_end(skb, start);
  861. break;
  862. default:
  863. if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key))
  864. return -EMSGSIZE;
  865. break;
  866. }
  867. return 0;
  868. }
  869. static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb)
  870. {
  871. const struct nlattr *a;
  872. int rem, err;
  873. nla_for_each_attr(a, attr, len, rem) {
  874. int type = nla_type(a);
  875. switch (type) {
  876. case OVS_ACTION_ATTR_SET:
  877. err = set_action_to_attr(a, skb);
  878. if (err)
  879. return err;
  880. break;
  881. case OVS_ACTION_ATTR_SAMPLE:
  882. err = sample_action_to_attr(a, skb);
  883. if (err)
  884. return err;
  885. break;
  886. default:
  887. if (nla_put(skb, type, nla_len(a), nla_data(a)))
  888. return -EMSGSIZE;
  889. break;
  890. }
  891. }
  892. return 0;
  893. }
  894. static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
  895. {
  896. return NLMSG_ALIGN(sizeof(struct ovs_header))
  897. + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
  898. + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
  899. + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
  900. + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
  901. + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
  902. }
  903. /* Called with ovs_mutex. */
  904. static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
  905. struct sk_buff *skb, u32 portid,
  906. u32 seq, u32 flags, u8 cmd)
  907. {
  908. const int skb_orig_len = skb->len;
  909. struct nlattr *start;
  910. struct ovs_flow_stats stats;
  911. struct ovs_header *ovs_header;
  912. struct nlattr *nla;
  913. unsigned long used;
  914. u8 tcp_flags;
  915. int err;
  916. ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
  917. if (!ovs_header)
  918. return -EMSGSIZE;
  919. ovs_header->dp_ifindex = get_dpifindex(dp);
  920. nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
  921. if (!nla)
  922. goto nla_put_failure;
  923. err = ovs_flow_to_nlattrs(&flow->key, skb);
  924. if (err)
  925. goto error;
  926. nla_nest_end(skb, nla);
  927. spin_lock_bh(&flow->lock);
  928. used = flow->used;
  929. stats.n_packets = flow->packet_count;
  930. stats.n_bytes = flow->byte_count;
  931. tcp_flags = flow->tcp_flags;
  932. spin_unlock_bh(&flow->lock);
  933. if (used &&
  934. nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
  935. goto nla_put_failure;
  936. if (stats.n_packets &&
  937. nla_put(skb, OVS_FLOW_ATTR_STATS,
  938. sizeof(struct ovs_flow_stats), &stats))
  939. goto nla_put_failure;
  940. if (tcp_flags &&
  941. nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
  942. goto nla_put_failure;
  943. /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
  944. * this is the first flow to be dumped into 'skb'. This is unusual for
  945. * Netlink but individual action lists can be longer than
  946. * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
  947. * The userspace caller can always fetch the actions separately if it
  948. * really wants them. (Most userspace callers in fact don't care.)
  949. *
  950. * This can only fail for dump operations because the skb is always
  951. * properly sized for single flows.
  952. */
  953. start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
  954. if (start) {
  955. const struct sw_flow_actions *sf_acts;
  956. sf_acts = rcu_dereference_check(flow->sf_acts,
  957. lockdep_ovsl_is_held());
  958. err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
  959. if (!err)
  960. nla_nest_end(skb, start);
  961. else {
  962. if (skb_orig_len)
  963. goto error;
  964. nla_nest_cancel(skb, start);
  965. }
  966. } else if (skb_orig_len)
  967. goto nla_put_failure;
  968. return genlmsg_end(skb, ovs_header);
  969. nla_put_failure:
  970. err = -EMSGSIZE;
  971. error:
  972. genlmsg_cancel(skb, ovs_header);
  973. return err;
  974. }
  975. static struct sk_buff *ovs_flow_cmd_alloc_info(struct sw_flow *flow)
  976. {
  977. const struct sw_flow_actions *sf_acts;
  978. sf_acts = ovsl_dereference(flow->sf_acts);
  979. return genlmsg_new(ovs_flow_cmd_msg_size(sf_acts), GFP_KERNEL);
  980. }
  981. static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
  982. struct datapath *dp,
  983. u32 portid, u32 seq, u8 cmd)
  984. {
  985. struct sk_buff *skb;
  986. int retval;
  987. skb = ovs_flow_cmd_alloc_info(flow);
  988. if (!skb)
  989. return ERR_PTR(-ENOMEM);
  990. retval = ovs_flow_cmd_fill_info(flow, dp, skb, portid, seq, 0, cmd);
  991. BUG_ON(retval < 0);
  992. return skb;
  993. }
  994. static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
  995. {
  996. struct nlattr **a = info->attrs;
  997. struct ovs_header *ovs_header = info->userhdr;
  998. struct sw_flow_key key;
  999. struct sw_flow *flow;
  1000. struct sk_buff *reply;
  1001. struct datapath *dp;
  1002. struct flow_table *table;
  1003. struct sw_flow_actions *acts = NULL;
  1004. int error;
  1005. int key_len;
  1006. /* Extract key. */
  1007. error = -EINVAL;
  1008. if (!a[OVS_FLOW_ATTR_KEY])
  1009. goto error;
  1010. error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
  1011. if (error)
  1012. goto error;
  1013. /* Validate actions. */
  1014. if (a[OVS_FLOW_ATTR_ACTIONS]) {
  1015. acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
  1016. error = PTR_ERR(acts);
  1017. if (IS_ERR(acts))
  1018. goto error;
  1019. error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts);
  1020. if (error)
  1021. goto err_kfree;
  1022. } else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
  1023. error = -EINVAL;
  1024. goto error;
  1025. }
  1026. ovs_lock();
  1027. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1028. error = -ENODEV;
  1029. if (!dp)
  1030. goto err_unlock_ovs;
  1031. table = ovsl_dereference(dp->table);
  1032. flow = ovs_flow_tbl_lookup(table, &key, key_len);
  1033. if (!flow) {
  1034. /* Bail out if we're not allowed to create a new flow. */
  1035. error = -ENOENT;
  1036. if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
  1037. goto err_unlock_ovs;
  1038. /* Expand table, if necessary, to make room. */
  1039. if (ovs_flow_tbl_need_to_expand(table)) {
  1040. struct flow_table *new_table;
  1041. new_table = ovs_flow_tbl_expand(table);
  1042. if (!IS_ERR(new_table)) {
  1043. rcu_assign_pointer(dp->table, new_table);
  1044. ovs_flow_tbl_deferred_destroy(table);
  1045. table = ovsl_dereference(dp->table);
  1046. }
  1047. }
  1048. /* Allocate flow. */
  1049. flow = ovs_flow_alloc();
  1050. if (IS_ERR(flow)) {
  1051. error = PTR_ERR(flow);
  1052. goto err_unlock_ovs;
  1053. }
  1054. clear_stats(flow);
  1055. rcu_assign_pointer(flow->sf_acts, acts);
  1056. /* Put flow in bucket. */
  1057. ovs_flow_tbl_insert(table, flow, &key, key_len);
  1058. reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
  1059. info->snd_seq,
  1060. OVS_FLOW_CMD_NEW);
  1061. } else {
  1062. /* We found a matching flow. */
  1063. struct sw_flow_actions *old_acts;
  1064. /* Bail out if we're not allowed to modify an existing flow.
  1065. * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
  1066. * because Generic Netlink treats the latter as a dump
  1067. * request. We also accept NLM_F_EXCL in case that bug ever
  1068. * gets fixed.
  1069. */
  1070. error = -EEXIST;
  1071. if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW &&
  1072. info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
  1073. goto err_unlock_ovs;
  1074. /* Update actions. */
  1075. old_acts = ovsl_dereference(flow->sf_acts);
  1076. rcu_assign_pointer(flow->sf_acts, acts);
  1077. ovs_flow_deferred_free_acts(old_acts);
  1078. reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
  1079. info->snd_seq, OVS_FLOW_CMD_NEW);
  1080. /* Clear stats. */
  1081. if (a[OVS_FLOW_ATTR_CLEAR]) {
  1082. spin_lock_bh(&flow->lock);
  1083. clear_stats(flow);
  1084. spin_unlock_bh(&flow->lock);
  1085. }
  1086. }
  1087. ovs_unlock();
  1088. if (!IS_ERR(reply))
  1089. ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
  1090. else
  1091. netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
  1092. ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
  1093. return 0;
  1094. err_unlock_ovs:
  1095. ovs_unlock();
  1096. err_kfree:
  1097. kfree(acts);
  1098. error:
  1099. return error;
  1100. }
  1101. static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
  1102. {
  1103. struct nlattr **a = info->attrs;
  1104. struct ovs_header *ovs_header = info->userhdr;
  1105. struct sw_flow_key key;
  1106. struct sk_buff *reply;
  1107. struct sw_flow *flow;
  1108. struct datapath *dp;
  1109. struct flow_table *table;
  1110. int err;
  1111. int key_len;
  1112. if (!a[OVS_FLOW_ATTR_KEY])
  1113. return -EINVAL;
  1114. err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
  1115. if (err)
  1116. return err;
  1117. ovs_lock();
  1118. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1119. if (!dp) {
  1120. err = -ENODEV;
  1121. goto unlock;
  1122. }
  1123. table = ovsl_dereference(dp->table);
  1124. flow = ovs_flow_tbl_lookup(table, &key, key_len);
  1125. if (!flow) {
  1126. err = -ENOENT;
  1127. goto unlock;
  1128. }
  1129. reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
  1130. info->snd_seq, OVS_FLOW_CMD_NEW);
  1131. if (IS_ERR(reply)) {
  1132. err = PTR_ERR(reply);
  1133. goto unlock;
  1134. }
  1135. ovs_unlock();
  1136. return genlmsg_reply(reply, info);
  1137. unlock:
  1138. ovs_unlock();
  1139. return err;
  1140. }
  1141. static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
  1142. {
  1143. struct nlattr **a = info->attrs;
  1144. struct ovs_header *ovs_header = info->userhdr;
  1145. struct sw_flow_key key;
  1146. struct sk_buff *reply;
  1147. struct sw_flow *flow;
  1148. struct datapath *dp;
  1149. struct flow_table *table;
  1150. int err;
  1151. int key_len;
  1152. ovs_lock();
  1153. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1154. if (!dp) {
  1155. err = -ENODEV;
  1156. goto unlock;
  1157. }
  1158. if (!a[OVS_FLOW_ATTR_KEY]) {
  1159. err = flush_flows(dp);
  1160. goto unlock;
  1161. }
  1162. err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
  1163. if (err)
  1164. goto unlock;
  1165. table = ovsl_dereference(dp->table);
  1166. flow = ovs_flow_tbl_lookup(table, &key, key_len);
  1167. if (!flow) {
  1168. err = -ENOENT;
  1169. goto unlock;
  1170. }
  1171. reply = ovs_flow_cmd_alloc_info(flow);
  1172. if (!reply) {
  1173. err = -ENOMEM;
  1174. goto unlock;
  1175. }
  1176. ovs_flow_tbl_remove(table, flow);
  1177. err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
  1178. info->snd_seq, 0, OVS_FLOW_CMD_DEL);
  1179. BUG_ON(err < 0);
  1180. ovs_flow_deferred_free(flow);
  1181. ovs_unlock();
  1182. ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
  1183. return 0;
  1184. unlock:
  1185. ovs_unlock();
  1186. return err;
  1187. }
  1188. static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1189. {
  1190. struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
  1191. struct datapath *dp;
  1192. struct flow_table *table;
  1193. rcu_read_lock();
  1194. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1195. if (!dp) {
  1196. rcu_read_unlock();
  1197. return -ENODEV;
  1198. }
  1199. table = rcu_dereference(dp->table);
  1200. for (;;) {
  1201. struct sw_flow *flow;
  1202. u32 bucket, obj;
  1203. bucket = cb->args[0];
  1204. obj = cb->args[1];
  1205. flow = ovs_flow_tbl_next(table, &bucket, &obj);
  1206. if (!flow)
  1207. break;
  1208. if (ovs_flow_cmd_fill_info(flow, dp, skb,
  1209. NETLINK_CB(cb->skb).portid,
  1210. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1211. OVS_FLOW_CMD_NEW) < 0)
  1212. break;
  1213. cb->args[0] = bucket;
  1214. cb->args[1] = obj;
  1215. }
  1216. rcu_read_unlock();
  1217. return skb->len;
  1218. }
  1219. static struct genl_ops dp_flow_genl_ops[] = {
  1220. { .cmd = OVS_FLOW_CMD_NEW,
  1221. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1222. .policy = flow_policy,
  1223. .doit = ovs_flow_cmd_new_or_set
  1224. },
  1225. { .cmd = OVS_FLOW_CMD_DEL,
  1226. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1227. .policy = flow_policy,
  1228. .doit = ovs_flow_cmd_del
  1229. },
  1230. { .cmd = OVS_FLOW_CMD_GET,
  1231. .flags = 0, /* OK for unprivileged users. */
  1232. .policy = flow_policy,
  1233. .doit = ovs_flow_cmd_get,
  1234. .dumpit = ovs_flow_cmd_dump
  1235. },
  1236. { .cmd = OVS_FLOW_CMD_SET,
  1237. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1238. .policy = flow_policy,
  1239. .doit = ovs_flow_cmd_new_or_set,
  1240. },
  1241. };
  1242. static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
  1243. [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
  1244. [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
  1245. };
  1246. static struct genl_family dp_datapath_genl_family = {
  1247. .id = GENL_ID_GENERATE,
  1248. .hdrsize = sizeof(struct ovs_header),
  1249. .name = OVS_DATAPATH_FAMILY,
  1250. .version = OVS_DATAPATH_VERSION,
  1251. .maxattr = OVS_DP_ATTR_MAX,
  1252. .netnsok = true,
  1253. .parallel_ops = true,
  1254. };
  1255. static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
  1256. .name = OVS_DATAPATH_MCGROUP
  1257. };
  1258. static size_t ovs_dp_cmd_msg_size(void)
  1259. {
  1260. size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
  1261. msgsize += nla_total_size(IFNAMSIZ);
  1262. msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
  1263. return msgsize;
  1264. }
  1265. static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
  1266. u32 portid, u32 seq, u32 flags, u8 cmd)
  1267. {
  1268. struct ovs_header *ovs_header;
  1269. struct ovs_dp_stats dp_stats;
  1270. int err;
  1271. ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
  1272. flags, cmd);
  1273. if (!ovs_header)
  1274. goto error;
  1275. ovs_header->dp_ifindex = get_dpifindex(dp);
  1276. rcu_read_lock();
  1277. err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
  1278. rcu_read_unlock();
  1279. if (err)
  1280. goto nla_put_failure;
  1281. get_dp_stats(dp, &dp_stats);
  1282. if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
  1283. goto nla_put_failure;
  1284. return genlmsg_end(skb, ovs_header);
  1285. nla_put_failure:
  1286. genlmsg_cancel(skb, ovs_header);
  1287. error:
  1288. return -EMSGSIZE;
  1289. }
  1290. static struct sk_buff *ovs_dp_cmd_build_info(struct datapath *dp, u32 portid,
  1291. u32 seq, u8 cmd)
  1292. {
  1293. struct sk_buff *skb;
  1294. int retval;
  1295. skb = genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
  1296. if (!skb)
  1297. return ERR_PTR(-ENOMEM);
  1298. retval = ovs_dp_cmd_fill_info(dp, skb, portid, seq, 0, cmd);
  1299. if (retval < 0) {
  1300. kfree_skb(skb);
  1301. return ERR_PTR(retval);
  1302. }
  1303. return skb;
  1304. }
  1305. /* Called with ovs_mutex. */
  1306. static struct datapath *lookup_datapath(struct net *net,
  1307. struct ovs_header *ovs_header,
  1308. struct nlattr *a[OVS_DP_ATTR_MAX + 1])
  1309. {
  1310. struct datapath *dp;
  1311. if (!a[OVS_DP_ATTR_NAME])
  1312. dp = get_dp(net, ovs_header->dp_ifindex);
  1313. else {
  1314. struct vport *vport;
  1315. rcu_read_lock();
  1316. vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
  1317. dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
  1318. rcu_read_unlock();
  1319. }
  1320. return dp ? dp : ERR_PTR(-ENODEV);
  1321. }
  1322. static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
  1323. {
  1324. struct nlattr **a = info->attrs;
  1325. struct vport_parms parms;
  1326. struct sk_buff *reply;
  1327. struct datapath *dp;
  1328. struct vport *vport;
  1329. struct ovs_net *ovs_net;
  1330. int err, i;
  1331. err = -EINVAL;
  1332. if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
  1333. goto err;
  1334. ovs_lock();
  1335. err = -ENOMEM;
  1336. dp = kzalloc(sizeof(*dp), GFP_KERNEL);
  1337. if (dp == NULL)
  1338. goto err_unlock_ovs;
  1339. ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
  1340. /* Allocate table. */
  1341. err = -ENOMEM;
  1342. rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS));
  1343. if (!dp->table)
  1344. goto err_free_dp;
  1345. dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
  1346. if (!dp->stats_percpu) {
  1347. err = -ENOMEM;
  1348. goto err_destroy_table;
  1349. }
  1350. dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
  1351. GFP_KERNEL);
  1352. if (!dp->ports) {
  1353. err = -ENOMEM;
  1354. goto err_destroy_percpu;
  1355. }
  1356. for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
  1357. INIT_HLIST_HEAD(&dp->ports[i]);
  1358. /* Set up our datapath device. */
  1359. parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
  1360. parms.type = OVS_VPORT_TYPE_INTERNAL;
  1361. parms.options = NULL;
  1362. parms.dp = dp;
  1363. parms.port_no = OVSP_LOCAL;
  1364. parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
  1365. vport = new_vport(&parms);
  1366. if (IS_ERR(vport)) {
  1367. err = PTR_ERR(vport);
  1368. if (err == -EBUSY)
  1369. err = -EEXIST;
  1370. goto err_destroy_ports_array;
  1371. }
  1372. reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
  1373. info->snd_seq, OVS_DP_CMD_NEW);
  1374. err = PTR_ERR(reply);
  1375. if (IS_ERR(reply))
  1376. goto err_destroy_local_port;
  1377. ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
  1378. list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
  1379. ovs_unlock();
  1380. ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
  1381. return 0;
  1382. err_destroy_local_port:
  1383. ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
  1384. err_destroy_ports_array:
  1385. kfree(dp->ports);
  1386. err_destroy_percpu:
  1387. free_percpu(dp->stats_percpu);
  1388. err_destroy_table:
  1389. ovs_flow_tbl_destroy(ovsl_dereference(dp->table));
  1390. err_free_dp:
  1391. release_net(ovs_dp_get_net(dp));
  1392. kfree(dp);
  1393. err_unlock_ovs:
  1394. ovs_unlock();
  1395. err:
  1396. return err;
  1397. }
  1398. /* Called with ovs_mutex. */
  1399. static void __dp_destroy(struct datapath *dp)
  1400. {
  1401. int i;
  1402. for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
  1403. struct vport *vport;
  1404. struct hlist_node *n;
  1405. hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
  1406. if (vport->port_no != OVSP_LOCAL)
  1407. ovs_dp_detach_port(vport);
  1408. }
  1409. list_del_rcu(&dp->list_node);
  1410. /* OVSP_LOCAL is datapath internal port. We need to make sure that
  1411. * all port in datapath are destroyed first before freeing datapath.
  1412. */
  1413. ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
  1414. call_rcu(&dp->rcu, destroy_dp_rcu);
  1415. }
  1416. static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
  1417. {
  1418. struct sk_buff *reply;
  1419. struct datapath *dp;
  1420. int err;
  1421. ovs_lock();
  1422. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  1423. err = PTR_ERR(dp);
  1424. if (IS_ERR(dp))
  1425. goto unlock;
  1426. reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
  1427. info->snd_seq, OVS_DP_CMD_DEL);
  1428. err = PTR_ERR(reply);
  1429. if (IS_ERR(reply))
  1430. goto unlock;
  1431. __dp_destroy(dp);
  1432. ovs_unlock();
  1433. ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
  1434. return 0;
  1435. unlock:
  1436. ovs_unlock();
  1437. return err;
  1438. }
  1439. static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
  1440. {
  1441. struct sk_buff *reply;
  1442. struct datapath *dp;
  1443. int err;
  1444. ovs_lock();
  1445. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  1446. err = PTR_ERR(dp);
  1447. if (IS_ERR(dp))
  1448. goto unlock;
  1449. reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
  1450. info->snd_seq, OVS_DP_CMD_NEW);
  1451. if (IS_ERR(reply)) {
  1452. err = PTR_ERR(reply);
  1453. netlink_set_err(sock_net(skb->sk)->genl_sock, 0,
  1454. ovs_dp_datapath_multicast_group.id, err);
  1455. err = 0;
  1456. goto unlock;
  1457. }
  1458. ovs_unlock();
  1459. ovs_notify(reply, info, &ovs_dp_datapath_multicast_group);
  1460. return 0;
  1461. unlock:
  1462. ovs_unlock();
  1463. return err;
  1464. }
  1465. static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
  1466. {
  1467. struct sk_buff *reply;
  1468. struct datapath *dp;
  1469. int err;
  1470. ovs_lock();
  1471. dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
  1472. if (IS_ERR(dp)) {
  1473. err = PTR_ERR(dp);
  1474. goto unlock;
  1475. }
  1476. reply = ovs_dp_cmd_build_info(dp, info->snd_portid,
  1477. info->snd_seq, OVS_DP_CMD_NEW);
  1478. if (IS_ERR(reply)) {
  1479. err = PTR_ERR(reply);
  1480. goto unlock;
  1481. }
  1482. ovs_unlock();
  1483. return genlmsg_reply(reply, info);
  1484. unlock:
  1485. ovs_unlock();
  1486. return err;
  1487. }
  1488. static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1489. {
  1490. struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
  1491. struct datapath *dp;
  1492. int skip = cb->args[0];
  1493. int i = 0;
  1494. rcu_read_lock();
  1495. list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
  1496. if (i >= skip &&
  1497. ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
  1498. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  1499. OVS_DP_CMD_NEW) < 0)
  1500. break;
  1501. i++;
  1502. }
  1503. rcu_read_unlock();
  1504. cb->args[0] = i;
  1505. return skb->len;
  1506. }
  1507. static struct genl_ops dp_datapath_genl_ops[] = {
  1508. { .cmd = OVS_DP_CMD_NEW,
  1509. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1510. .policy = datapath_policy,
  1511. .doit = ovs_dp_cmd_new
  1512. },
  1513. { .cmd = OVS_DP_CMD_DEL,
  1514. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1515. .policy = datapath_policy,
  1516. .doit = ovs_dp_cmd_del
  1517. },
  1518. { .cmd = OVS_DP_CMD_GET,
  1519. .flags = 0, /* OK for unprivileged users. */
  1520. .policy = datapath_policy,
  1521. .doit = ovs_dp_cmd_get,
  1522. .dumpit = ovs_dp_cmd_dump
  1523. },
  1524. { .cmd = OVS_DP_CMD_SET,
  1525. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1526. .policy = datapath_policy,
  1527. .doit = ovs_dp_cmd_set,
  1528. },
  1529. };
  1530. static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
  1531. [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
  1532. [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
  1533. [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
  1534. [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
  1535. [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
  1536. [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
  1537. };
  1538. static struct genl_family dp_vport_genl_family = {
  1539. .id = GENL_ID_GENERATE,
  1540. .hdrsize = sizeof(struct ovs_header),
  1541. .name = OVS_VPORT_FAMILY,
  1542. .version = OVS_VPORT_VERSION,
  1543. .maxattr = OVS_VPORT_ATTR_MAX,
  1544. .netnsok = true,
  1545. .parallel_ops = true,
  1546. };
  1547. struct genl_multicast_group ovs_dp_vport_multicast_group = {
  1548. .name = OVS_VPORT_MCGROUP
  1549. };
  1550. /* Called with ovs_mutex or RCU read lock. */
  1551. static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
  1552. u32 portid, u32 seq, u32 flags, u8 cmd)
  1553. {
  1554. struct ovs_header *ovs_header;
  1555. struct ovs_vport_stats vport_stats;
  1556. int err;
  1557. ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
  1558. flags, cmd);
  1559. if (!ovs_header)
  1560. return -EMSGSIZE;
  1561. ovs_header->dp_ifindex = get_dpifindex(vport->dp);
  1562. if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
  1563. nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
  1564. nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
  1565. nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
  1566. goto nla_put_failure;
  1567. ovs_vport_get_stats(vport, &vport_stats);
  1568. if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
  1569. &vport_stats))
  1570. goto nla_put_failure;
  1571. err = ovs_vport_get_options(vport, skb);
  1572. if (err == -EMSGSIZE)
  1573. goto error;
  1574. return genlmsg_end(skb, ovs_header);
  1575. nla_put_failure:
  1576. err = -EMSGSIZE;
  1577. error:
  1578. genlmsg_cancel(skb, ovs_header);
  1579. return err;
  1580. }
  1581. /* Called with ovs_mutex or RCU read lock. */
  1582. struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
  1583. u32 seq, u8 cmd)
  1584. {
  1585. struct sk_buff *skb;
  1586. int retval;
  1587. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  1588. if (!skb)
  1589. return ERR_PTR(-ENOMEM);
  1590. retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
  1591. BUG_ON(retval < 0);
  1592. return skb;
  1593. }
  1594. /* Called with ovs_mutex or RCU read lock. */
  1595. static struct vport *lookup_vport(struct net *net,
  1596. struct ovs_header *ovs_header,
  1597. struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
  1598. {
  1599. struct datapath *dp;
  1600. struct vport *vport;
  1601. if (a[OVS_VPORT_ATTR_NAME]) {
  1602. vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
  1603. if (!vport)
  1604. return ERR_PTR(-ENODEV);
  1605. if (ovs_header->dp_ifindex &&
  1606. ovs_header->dp_ifindex != get_dpifindex(vport->dp))
  1607. return ERR_PTR(-ENODEV);
  1608. return vport;
  1609. } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
  1610. u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
  1611. if (port_no >= DP_MAX_PORTS)
  1612. return ERR_PTR(-EFBIG);
  1613. dp = get_dp(net, ovs_header->dp_ifindex);
  1614. if (!dp)
  1615. return ERR_PTR(-ENODEV);
  1616. vport = ovs_vport_ovsl_rcu(dp, port_no);
  1617. if (!vport)
  1618. return ERR_PTR(-ENODEV);
  1619. return vport;
  1620. } else
  1621. return ERR_PTR(-EINVAL);
  1622. }
  1623. static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
  1624. {
  1625. struct nlattr **a = info->attrs;
  1626. struct ovs_header *ovs_header = info->userhdr;
  1627. struct vport_parms parms;
  1628. struct sk_buff *reply;
  1629. struct vport *vport;
  1630. struct datapath *dp;
  1631. u32 port_no;
  1632. int err;
  1633. err = -EINVAL;
  1634. if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
  1635. !a[OVS_VPORT_ATTR_UPCALL_PID])
  1636. goto exit;
  1637. ovs_lock();
  1638. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1639. err = -ENODEV;
  1640. if (!dp)
  1641. goto exit_unlock;
  1642. if (a[OVS_VPORT_ATTR_PORT_NO]) {
  1643. port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
  1644. err = -EFBIG;
  1645. if (port_no >= DP_MAX_PORTS)
  1646. goto exit_unlock;
  1647. vport = ovs_vport_ovsl(dp, port_no);
  1648. err = -EBUSY;
  1649. if (vport)
  1650. goto exit_unlock;
  1651. } else {
  1652. for (port_no = 1; ; port_no++) {
  1653. if (port_no >= DP_MAX_PORTS) {
  1654. err = -EFBIG;
  1655. goto exit_unlock;
  1656. }
  1657. vport = ovs_vport_ovsl(dp, port_no);
  1658. if (!vport)
  1659. break;
  1660. }
  1661. }
  1662. parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
  1663. parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
  1664. parms.options = a[OVS_VPORT_ATTR_OPTIONS];
  1665. parms.dp = dp;
  1666. parms.port_no = port_no;
  1667. parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
  1668. vport = new_vport(&parms);
  1669. err = PTR_ERR(vport);
  1670. if (IS_ERR(vport))
  1671. goto exit_unlock;
  1672. err = 0;
  1673. reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,
  1674. OVS_VPORT_CMD_NEW);
  1675. if (IS_ERR(reply)) {
  1676. err = PTR_ERR(reply);
  1677. ovs_dp_detach_port(vport);
  1678. goto exit_unlock;
  1679. }
  1680. ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
  1681. exit_unlock:
  1682. ovs_unlock();
  1683. exit:
  1684. return err;
  1685. }
  1686. static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
  1687. {
  1688. struct nlattr **a = info->attrs;
  1689. struct sk_buff *reply;
  1690. struct vport *vport;
  1691. int err;
  1692. ovs_lock();
  1693. vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
  1694. err = PTR_ERR(vport);
  1695. if (IS_ERR(vport))
  1696. goto exit_unlock;
  1697. if (a[OVS_VPORT_ATTR_TYPE] &&
  1698. nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
  1699. err = -EINVAL;
  1700. goto exit_unlock;
  1701. }
  1702. reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1703. if (!reply) {
  1704. err = -ENOMEM;
  1705. goto exit_unlock;
  1706. }
  1707. if (a[OVS_VPORT_ATTR_OPTIONS]) {
  1708. err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
  1709. if (err)
  1710. goto exit_free;
  1711. }
  1712. if (a[OVS_VPORT_ATTR_UPCALL_PID])
  1713. vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
  1714. err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
  1715. info->snd_seq, 0, OVS_VPORT_CMD_NEW);
  1716. BUG_ON(err < 0);
  1717. ovs_unlock();
  1718. ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
  1719. return 0;
  1720. exit_free:
  1721. kfree_skb(reply);
  1722. exit_unlock:
  1723. ovs_unlock();
  1724. return err;
  1725. }
  1726. static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
  1727. {
  1728. struct nlattr **a = info->attrs;
  1729. struct sk_buff *reply;
  1730. struct vport *vport;
  1731. int err;
  1732. ovs_lock();
  1733. vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
  1734. err = PTR_ERR(vport);
  1735. if (IS_ERR(vport))
  1736. goto exit_unlock;
  1737. if (vport->port_no == OVSP_LOCAL) {
  1738. err = -EINVAL;
  1739. goto exit_unlock;
  1740. }
  1741. reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
  1742. info->snd_seq, OVS_VPORT_CMD_DEL);
  1743. err = PTR_ERR(reply);
  1744. if (IS_ERR(reply))
  1745. goto exit_unlock;
  1746. err = 0;
  1747. ovs_dp_detach_port(vport);
  1748. ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
  1749. exit_unlock:
  1750. ovs_unlock();
  1751. return err;
  1752. }
  1753. static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
  1754. {
  1755. struct nlattr **a = info->attrs;
  1756. struct ovs_header *ovs_header = info->userhdr;
  1757. struct sk_buff *reply;
  1758. struct vport *vport;
  1759. int err;
  1760. rcu_read_lock();
  1761. vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
  1762. err = PTR_ERR(vport);
  1763. if (IS_ERR(vport))
  1764. goto exit_unlock;
  1765. reply = ovs_vport_cmd_build_info(vport, info->snd_portid,
  1766. info->snd_seq, OVS_VPORT_CMD_NEW);
  1767. err = PTR_ERR(reply);
  1768. if (IS_ERR(reply))
  1769. goto exit_unlock;
  1770. rcu_read_unlock();
  1771. return genlmsg_reply(reply, info);
  1772. exit_unlock:
  1773. rcu_read_unlock();
  1774. return err;
  1775. }
  1776. static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1777. {
  1778. struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
  1779. struct datapath *dp;
  1780. int bucket = cb->args[0], skip = cb->args[1];
  1781. int i, j = 0;
  1782. dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
  1783. if (!dp)
  1784. return -ENODEV;
  1785. rcu_read_lock();
  1786. for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
  1787. struct vport *vport;
  1788. j = 0;
  1789. hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
  1790. if (j >= skip &&
  1791. ovs_vport_cmd_fill_info(vport, skb,
  1792. NETLINK_CB(cb->skb).portid,
  1793. cb->nlh->nlmsg_seq,
  1794. NLM_F_MULTI,
  1795. OVS_VPORT_CMD_NEW) < 0)
  1796. goto out;
  1797. j++;
  1798. }
  1799. skip = 0;
  1800. }
  1801. out:
  1802. rcu_read_unlock();
  1803. cb->args[0] = i;
  1804. cb->args[1] = j;
  1805. return skb->len;
  1806. }
  1807. static struct genl_ops dp_vport_genl_ops[] = {
  1808. { .cmd = OVS_VPORT_CMD_NEW,
  1809. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1810. .policy = vport_policy,
  1811. .doit = ovs_vport_cmd_new
  1812. },
  1813. { .cmd = OVS_VPORT_CMD_DEL,
  1814. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1815. .policy = vport_policy,
  1816. .doit = ovs_vport_cmd_del
  1817. },
  1818. { .cmd = OVS_VPORT_CMD_GET,
  1819. .flags = 0, /* OK for unprivileged users. */
  1820. .policy = vport_policy,
  1821. .doit = ovs_vport_cmd_get,
  1822. .dumpit = ovs_vport_cmd_dump
  1823. },
  1824. { .cmd = OVS_VPORT_CMD_SET,
  1825. .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
  1826. .policy = vport_policy,
  1827. .doit = ovs_vport_cmd_set,
  1828. },
  1829. };
  1830. struct genl_family_and_ops {
  1831. struct genl_family *family;
  1832. struct genl_ops *ops;
  1833. int n_ops;
  1834. struct genl_multicast_group *group;
  1835. };
  1836. static const struct genl_family_and_ops dp_genl_families[] = {
  1837. { &dp_datapath_genl_family,
  1838. dp_datapath_genl_ops, ARRAY_SIZE(dp_datapath_genl_ops),
  1839. &ovs_dp_datapath_multicast_group },
  1840. { &dp_vport_genl_family,
  1841. dp_vport_genl_ops, ARRAY_SIZE(dp_vport_genl_ops),
  1842. &ovs_dp_vport_multicast_group },
  1843. { &dp_flow_genl_family,
  1844. dp_flow_genl_ops, ARRAY_SIZE(dp_flow_genl_ops),
  1845. &ovs_dp_flow_multicast_group },
  1846. { &dp_packet_genl_family,
  1847. dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
  1848. NULL },
  1849. };
  1850. static void dp_unregister_genl(int n_families)
  1851. {
  1852. int i;
  1853. for (i = 0; i < n_families; i++)
  1854. genl_unregister_family(dp_genl_families[i].family);
  1855. }
  1856. static int dp_register_genl(void)
  1857. {
  1858. int n_registered;
  1859. int err;
  1860. int i;
  1861. n_registered = 0;
  1862. for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
  1863. const struct genl_family_and_ops *f = &dp_genl_families[i];
  1864. err = genl_register_family_with_ops(f->family, f->ops,
  1865. f->n_ops);
  1866. if (err)
  1867. goto error;
  1868. n_registered++;
  1869. if (f->group) {
  1870. err = genl_register_mc_group(f->family, f->group);
  1871. if (err)
  1872. goto error;
  1873. }
  1874. }
  1875. return 0;
  1876. error:
  1877. dp_unregister_genl(n_registered);
  1878. return err;
  1879. }
  1880. static void rehash_flow_table(struct work_struct *work)
  1881. {
  1882. struct datapath *dp;
  1883. struct net *net;
  1884. ovs_lock();
  1885. rtnl_lock();
  1886. for_each_net(net) {
  1887. struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  1888. list_for_each_entry(dp, &ovs_net->dps, list_node) {
  1889. struct flow_table *old_table = ovsl_dereference(dp->table);
  1890. struct flow_table *new_table;
  1891. new_table = ovs_flow_tbl_rehash(old_table);
  1892. if (!IS_ERR(new_table)) {
  1893. rcu_assign_pointer(dp->table, new_table);
  1894. ovs_flow_tbl_deferred_destroy(old_table);
  1895. }
  1896. }
  1897. }
  1898. rtnl_unlock();
  1899. ovs_unlock();
  1900. schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
  1901. }
  1902. static int __net_init ovs_init_net(struct net *net)
  1903. {
  1904. struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  1905. INIT_LIST_HEAD(&ovs_net->dps);
  1906. INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
  1907. return 0;
  1908. }
  1909. static void __net_exit ovs_exit_net(struct net *net)
  1910. {
  1911. struct datapath *dp, *dp_next;
  1912. struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  1913. ovs_lock();
  1914. list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
  1915. __dp_destroy(dp);
  1916. ovs_unlock();
  1917. cancel_work_sync(&ovs_net->dp_notify_work);
  1918. }
  1919. static struct pernet_operations ovs_net_ops = {
  1920. .init = ovs_init_net,
  1921. .exit = ovs_exit_net,
  1922. .id = &ovs_net_id,
  1923. .size = sizeof(struct ovs_net),
  1924. };
  1925. static int __init dp_init(void)
  1926. {
  1927. int err;
  1928. BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
  1929. pr_info("Open vSwitch switching datapath\n");
  1930. err = ovs_flow_init();
  1931. if (err)
  1932. goto error;
  1933. err = ovs_vport_init();
  1934. if (err)
  1935. goto error_flow_exit;
  1936. err = register_pernet_device(&ovs_net_ops);
  1937. if (err)
  1938. goto error_vport_exit;
  1939. err = register_netdevice_notifier(&ovs_dp_device_notifier);
  1940. if (err)
  1941. goto error_netns_exit;
  1942. err = dp_register_genl();
  1943. if (err < 0)
  1944. goto error_unreg_notifier;
  1945. schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL);
  1946. return 0;
  1947. error_unreg_notifier:
  1948. unregister_netdevice_notifier(&ovs_dp_device_notifier);
  1949. error_netns_exit:
  1950. unregister_pernet_device(&ovs_net_ops);
  1951. error_vport_exit:
  1952. ovs_vport_exit();
  1953. error_flow_exit:
  1954. ovs_flow_exit();
  1955. error:
  1956. return err;
  1957. }
  1958. static void dp_cleanup(void)
  1959. {
  1960. cancel_delayed_work_sync(&rehash_flow_wq);
  1961. dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
  1962. unregister_netdevice_notifier(&ovs_dp_device_notifier);
  1963. unregister_pernet_device(&ovs_net_ops);
  1964. rcu_barrier();
  1965. ovs_vport_exit();
  1966. ovs_flow_exit();
  1967. }
  1968. module_init(dp_init);
  1969. module_exit(dp_cleanup);
  1970. MODULE_DESCRIPTION("Open vSwitch switching datapath");
  1971. MODULE_LICENSE("GPL");