tun.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302
  1. /*
  2. * TUN - Universal TUN/TAP device driver.
  3. * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
  16. */
  17. /*
  18. * Changes:
  19. *
  20. * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  21. * Add TUNSETLINK ioctl to set the link encapsulation
  22. *
  23. * Mark Smith <markzzzsmith@yahoo.com.au>
  24. * Use eth_random_addr() for tap MAC address.
  25. *
  26. * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
  27. * Fixes in packet dropping, queue length setting and queue wakeup.
  28. * Increased default tx queue length.
  29. * Added ethtool API.
  30. * Minor cleanups
  31. *
  32. * Daniel Podlejski <underley@underley.eu.org>
  33. * Modifications for 2.3.99-pre5 kernel.
  34. */
  35. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  36. #define DRV_NAME "tun"
  37. #define DRV_VERSION "1.6"
  38. #define DRV_DESCRIPTION "Universal TUN/TAP device driver"
  39. #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  40. #include <linux/module.h>
  41. #include <linux/errno.h>
  42. #include <linux/kernel.h>
  43. #include <linux/major.h>
  44. #include <linux/slab.h>
  45. #include <linux/poll.h>
  46. #include <linux/fcntl.h>
  47. #include <linux/init.h>
  48. #include <linux/skbuff.h>
  49. #include <linux/netdevice.h>
  50. #include <linux/etherdevice.h>
  51. #include <linux/miscdevice.h>
  52. #include <linux/ethtool.h>
  53. #include <linux/rtnetlink.h>
  54. #include <linux/compat.h>
  55. #include <linux/if.h>
  56. #include <linux/if_arp.h>
  57. #include <linux/if_ether.h>
  58. #include <linux/if_tun.h>
  59. #include <linux/crc32.h>
  60. #include <linux/nsproxy.h>
  61. #include <linux/virtio_net.h>
  62. #include <linux/rcupdate.h>
  63. #include <net/net_namespace.h>
  64. #include <net/netns/generic.h>
  65. #include <net/rtnetlink.h>
  66. #include <net/sock.h>
  67. #include <asm/uaccess.h>
  68. /* Uncomment to enable debugging */
  69. /* #define TUN_DEBUG 1 */
  70. #ifdef TUN_DEBUG
  71. static int debug;
  72. #define tun_debug(level, tun, fmt, args...) \
  73. do { \
  74. if (tun->debug) \
  75. netdev_printk(level, tun->dev, fmt, ##args); \
  76. } while (0)
  77. #define DBG1(level, fmt, args...) \
  78. do { \
  79. if (debug == 2) \
  80. printk(level fmt, ##args); \
  81. } while (0)
  82. #else
  83. #define tun_debug(level, tun, fmt, args...) \
  84. do { \
  85. if (0) \
  86. netdev_printk(level, tun->dev, fmt, ##args); \
  87. } while (0)
  88. #define DBG1(level, fmt, args...) \
  89. do { \
  90. if (0) \
  91. printk(level fmt, ##args); \
  92. } while (0)
  93. #endif
  94. #define GOODCOPY_LEN 128
  95. #define FLT_EXACT_COUNT 8
  96. struct tap_filter {
  97. unsigned int count; /* Number of addrs. Zero means disabled */
  98. u32 mask[2]; /* Mask of the hashed addrs */
  99. unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
  100. };
  101. /* DEFAULT_MAX_NUM_RSS_QUEUES were choosed to let the rx/tx queues allocated for
  102. * the netdevice to be fit in one page. So we can make sure the success of
  103. * memory allocation. TODO: increase the limit. */
  104. #define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
  105. #define MAX_TAP_FLOWS 4096
  106. #define TUN_FLOW_EXPIRE (3 * HZ)
  107. /* A tun_file connects an open character device to a tuntap netdevice. It
  108. * also contains all socket related strctures (except sock_fprog and tap_filter)
  109. * to serve as one transmit queue for tuntap device. The sock_fprog and
  110. * tap_filter were kept in tun_struct since they were used for filtering for the
  111. * netdevice not for a specific queue (at least I didn't see the requirement for
  112. * this).
  113. *
  114. * RCU usage:
  115. * The tun_file and tun_struct are loosely coupled, the pointer from one to the
  116. * other can only be read while rcu_read_lock or rtnl_lock is held.
  117. */
  118. struct tun_file {
  119. struct sock sk;
  120. struct socket socket;
  121. struct socket_wq wq;
  122. struct tun_struct __rcu *tun;
  123. struct net *net;
  124. struct fasync_struct *fasync;
  125. /* only used for fasnyc */
  126. unsigned int flags;
  127. u16 queue_index;
  128. struct list_head next;
  129. struct tun_struct *detached;
  130. };
  131. struct tun_flow_entry {
  132. struct hlist_node hash_link;
  133. struct rcu_head rcu;
  134. struct tun_struct *tun;
  135. u32 rxhash;
  136. int queue_index;
  137. unsigned long updated;
  138. };
  139. #define TUN_NUM_FLOW_ENTRIES 1024
  140. /* Since the socket were moved to tun_file, to preserve the behavior of persist
  141. * device, socket filter, sndbuf and vnet header size were restore when the
  142. * file were attached to a persist device.
  143. */
  144. struct tun_struct {
  145. struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
  146. unsigned int numqueues;
  147. unsigned int flags;
  148. kuid_t owner;
  149. kgid_t group;
  150. struct net_device *dev;
  151. netdev_features_t set_features;
  152. #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
  153. NETIF_F_TSO6|NETIF_F_UFO)
  154. int vnet_hdr_sz;
  155. int sndbuf;
  156. struct tap_filter txflt;
  157. struct sock_fprog fprog;
  158. /* protected by rtnl lock */
  159. bool filter_attached;
  160. #ifdef TUN_DEBUG
  161. int debug;
  162. #endif
  163. spinlock_t lock;
  164. struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
  165. struct timer_list flow_gc_timer;
  166. unsigned long ageing_time;
  167. unsigned int numdisabled;
  168. struct list_head disabled;
  169. void *security;
  170. u32 flow_count;
  171. };
  172. static inline u32 tun_hashfn(u32 rxhash)
  173. {
  174. return rxhash & 0x3ff;
  175. }
  176. static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
  177. {
  178. struct tun_flow_entry *e;
  179. hlist_for_each_entry_rcu(e, head, hash_link) {
  180. if (e->rxhash == rxhash)
  181. return e;
  182. }
  183. return NULL;
  184. }
  185. static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
  186. struct hlist_head *head,
  187. u32 rxhash, u16 queue_index)
  188. {
  189. struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
  190. if (e) {
  191. tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
  192. rxhash, queue_index);
  193. e->updated = jiffies;
  194. e->rxhash = rxhash;
  195. e->queue_index = queue_index;
  196. e->tun = tun;
  197. hlist_add_head_rcu(&e->hash_link, head);
  198. ++tun->flow_count;
  199. }
  200. return e;
  201. }
  202. static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
  203. {
  204. tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
  205. e->rxhash, e->queue_index);
  206. hlist_del_rcu(&e->hash_link);
  207. kfree_rcu(e, rcu);
  208. --tun->flow_count;
  209. }
  210. static void tun_flow_flush(struct tun_struct *tun)
  211. {
  212. int i;
  213. spin_lock_bh(&tun->lock);
  214. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
  215. struct tun_flow_entry *e;
  216. struct hlist_node *n;
  217. hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
  218. tun_flow_delete(tun, e);
  219. }
  220. spin_unlock_bh(&tun->lock);
  221. }
  222. static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
  223. {
  224. int i;
  225. spin_lock_bh(&tun->lock);
  226. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
  227. struct tun_flow_entry *e;
  228. struct hlist_node *n;
  229. hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
  230. if (e->queue_index == queue_index)
  231. tun_flow_delete(tun, e);
  232. }
  233. }
  234. spin_unlock_bh(&tun->lock);
  235. }
  236. static void tun_flow_cleanup(unsigned long data)
  237. {
  238. struct tun_struct *tun = (struct tun_struct *)data;
  239. unsigned long delay = tun->ageing_time;
  240. unsigned long next_timer = jiffies + delay;
  241. unsigned long count = 0;
  242. int i;
  243. tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
  244. spin_lock_bh(&tun->lock);
  245. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
  246. struct tun_flow_entry *e;
  247. struct hlist_node *n;
  248. hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
  249. unsigned long this_timer;
  250. count++;
  251. this_timer = e->updated + delay;
  252. if (time_before_eq(this_timer, jiffies))
  253. tun_flow_delete(tun, e);
  254. else if (time_before(this_timer, next_timer))
  255. next_timer = this_timer;
  256. }
  257. }
  258. if (count)
  259. mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
  260. spin_unlock_bh(&tun->lock);
  261. }
  262. static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
  263. struct tun_file *tfile)
  264. {
  265. struct hlist_head *head;
  266. struct tun_flow_entry *e;
  267. unsigned long delay = tun->ageing_time;
  268. u16 queue_index = tfile->queue_index;
  269. if (!rxhash)
  270. return;
  271. else
  272. head = &tun->flows[tun_hashfn(rxhash)];
  273. rcu_read_lock();
  274. /* We may get a very small possibility of OOO during switching, not
  275. * worth to optimize.*/
  276. if (tun->numqueues == 1 || tfile->detached)
  277. goto unlock;
  278. e = tun_flow_find(head, rxhash);
  279. if (likely(e)) {
  280. /* TODO: keep queueing to old queue until it's empty? */
  281. e->queue_index = queue_index;
  282. e->updated = jiffies;
  283. } else {
  284. spin_lock_bh(&tun->lock);
  285. if (!tun_flow_find(head, rxhash) &&
  286. tun->flow_count < MAX_TAP_FLOWS)
  287. tun_flow_create(tun, head, rxhash, queue_index);
  288. if (!timer_pending(&tun->flow_gc_timer))
  289. mod_timer(&tun->flow_gc_timer,
  290. round_jiffies_up(jiffies + delay));
  291. spin_unlock_bh(&tun->lock);
  292. }
  293. unlock:
  294. rcu_read_unlock();
  295. }
  296. /* We try to identify a flow through its rxhash first. The reason that
  297. * we do not check rxq no. is becuase some cards(e.g 82599), chooses
  298. * the rxq based on the txq where the last packet of the flow comes. As
  299. * the userspace application move between processors, we may get a
  300. * different rxq no. here. If we could not get rxhash, then we would
  301. * hope the rxq no. may help here.
  302. */
  303. static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
  304. {
  305. struct tun_struct *tun = netdev_priv(dev);
  306. struct tun_flow_entry *e;
  307. u32 txq = 0;
  308. u32 numqueues = 0;
  309. rcu_read_lock();
  310. numqueues = tun->numqueues;
  311. txq = skb_get_rxhash(skb);
  312. if (txq) {
  313. e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
  314. if (e)
  315. txq = e->queue_index;
  316. else
  317. /* use multiply and shift instead of expensive divide */
  318. txq = ((u64)txq * numqueues) >> 32;
  319. } else if (likely(skb_rx_queue_recorded(skb))) {
  320. txq = skb_get_rx_queue(skb);
  321. while (unlikely(txq >= numqueues))
  322. txq -= numqueues;
  323. }
  324. rcu_read_unlock();
  325. return txq;
  326. }
  327. static inline bool tun_not_capable(struct tun_struct *tun)
  328. {
  329. const struct cred *cred = current_cred();
  330. struct net *net = dev_net(tun->dev);
  331. return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
  332. (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
  333. !ns_capable(net->user_ns, CAP_NET_ADMIN);
  334. }
  335. static void tun_set_real_num_queues(struct tun_struct *tun)
  336. {
  337. netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
  338. netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
  339. }
  340. static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
  341. {
  342. tfile->detached = tun;
  343. list_add_tail(&tfile->next, &tun->disabled);
  344. ++tun->numdisabled;
  345. }
  346. static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
  347. {
  348. struct tun_struct *tun = tfile->detached;
  349. tfile->detached = NULL;
  350. list_del_init(&tfile->next);
  351. --tun->numdisabled;
  352. return tun;
  353. }
  354. static void __tun_detach(struct tun_file *tfile, bool clean)
  355. {
  356. struct tun_file *ntfile;
  357. struct tun_struct *tun;
  358. struct net_device *dev;
  359. tun = rtnl_dereference(tfile->tun);
  360. if (tun && !tfile->detached) {
  361. u16 index = tfile->queue_index;
  362. BUG_ON(index >= tun->numqueues);
  363. dev = tun->dev;
  364. rcu_assign_pointer(tun->tfiles[index],
  365. tun->tfiles[tun->numqueues - 1]);
  366. ntfile = rtnl_dereference(tun->tfiles[index]);
  367. ntfile->queue_index = index;
  368. --tun->numqueues;
  369. if (clean) {
  370. rcu_assign_pointer(tfile->tun, NULL);
  371. sock_put(&tfile->sk);
  372. } else
  373. tun_disable_queue(tun, tfile);
  374. synchronize_net();
  375. tun_flow_delete_by_queue(tun, tun->numqueues + 1);
  376. /* Drop read queue */
  377. skb_queue_purge(&tfile->sk.sk_receive_queue);
  378. tun_set_real_num_queues(tun);
  379. } else if (tfile->detached && clean) {
  380. tun = tun_enable_queue(tfile);
  381. sock_put(&tfile->sk);
  382. }
  383. if (clean) {
  384. if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
  385. netif_carrier_off(tun->dev);
  386. if (!(tun->flags & TUN_PERSIST) &&
  387. tun->dev->reg_state == NETREG_REGISTERED)
  388. unregister_netdevice(tun->dev);
  389. }
  390. BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
  391. &tfile->socket.flags));
  392. sk_release_kernel(&tfile->sk);
  393. }
  394. }
  395. static void tun_detach(struct tun_file *tfile, bool clean)
  396. {
  397. rtnl_lock();
  398. __tun_detach(tfile, clean);
  399. rtnl_unlock();
  400. }
  401. static void tun_detach_all(struct net_device *dev)
  402. {
  403. struct tun_struct *tun = netdev_priv(dev);
  404. struct tun_file *tfile, *tmp;
  405. int i, n = tun->numqueues;
  406. for (i = 0; i < n; i++) {
  407. tfile = rtnl_dereference(tun->tfiles[i]);
  408. BUG_ON(!tfile);
  409. wake_up_all(&tfile->wq.wait);
  410. rcu_assign_pointer(tfile->tun, NULL);
  411. --tun->numqueues;
  412. }
  413. list_for_each_entry(tfile, &tun->disabled, next) {
  414. wake_up_all(&tfile->wq.wait);
  415. rcu_assign_pointer(tfile->tun, NULL);
  416. }
  417. BUG_ON(tun->numqueues != 0);
  418. synchronize_net();
  419. for (i = 0; i < n; i++) {
  420. tfile = rtnl_dereference(tun->tfiles[i]);
  421. /* Drop read queue */
  422. skb_queue_purge(&tfile->sk.sk_receive_queue);
  423. sock_put(&tfile->sk);
  424. }
  425. list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
  426. tun_enable_queue(tfile);
  427. skb_queue_purge(&tfile->sk.sk_receive_queue);
  428. sock_put(&tfile->sk);
  429. }
  430. BUG_ON(tun->numdisabled != 0);
  431. if (tun->flags & TUN_PERSIST)
  432. module_put(THIS_MODULE);
  433. }
  434. static int tun_attach(struct tun_struct *tun, struct file *file)
  435. {
  436. struct tun_file *tfile = file->private_data;
  437. int err;
  438. err = security_tun_dev_attach(tfile->socket.sk, tun->security);
  439. if (err < 0)
  440. goto out;
  441. err = -EINVAL;
  442. if (rtnl_dereference(tfile->tun) && !tfile->detached)
  443. goto out;
  444. err = -EBUSY;
  445. if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
  446. goto out;
  447. err = -E2BIG;
  448. if (!tfile->detached &&
  449. tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
  450. goto out;
  451. err = 0;
  452. /* Re-attach the filter to presist device */
  453. if (tun->filter_attached == true) {
  454. err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
  455. if (!err)
  456. goto out;
  457. }
  458. tfile->queue_index = tun->numqueues;
  459. rcu_assign_pointer(tfile->tun, tun);
  460. rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
  461. tun->numqueues++;
  462. if (tfile->detached)
  463. tun_enable_queue(tfile);
  464. else
  465. sock_hold(&tfile->sk);
  466. tun_set_real_num_queues(tun);
  467. /* device is allowed to go away first, so no need to hold extra
  468. * refcnt.
  469. */
  470. out:
  471. return err;
  472. }
  473. static struct tun_struct *__tun_get(struct tun_file *tfile)
  474. {
  475. struct tun_struct *tun;
  476. rcu_read_lock();
  477. tun = rcu_dereference(tfile->tun);
  478. if (tun)
  479. dev_hold(tun->dev);
  480. rcu_read_unlock();
  481. return tun;
  482. }
  483. static struct tun_struct *tun_get(struct file *file)
  484. {
  485. return __tun_get(file->private_data);
  486. }
  487. static void tun_put(struct tun_struct *tun)
  488. {
  489. dev_put(tun->dev);
  490. }
  491. /* TAP filtering */
  492. static void addr_hash_set(u32 *mask, const u8 *addr)
  493. {
  494. int n = ether_crc(ETH_ALEN, addr) >> 26;
  495. mask[n >> 5] |= (1 << (n & 31));
  496. }
  497. static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
  498. {
  499. int n = ether_crc(ETH_ALEN, addr) >> 26;
  500. return mask[n >> 5] & (1 << (n & 31));
  501. }
  502. static int update_filter(struct tap_filter *filter, void __user *arg)
  503. {
  504. struct { u8 u[ETH_ALEN]; } *addr;
  505. struct tun_filter uf;
  506. int err, alen, n, nexact;
  507. if (copy_from_user(&uf, arg, sizeof(uf)))
  508. return -EFAULT;
  509. if (!uf.count) {
  510. /* Disabled */
  511. filter->count = 0;
  512. return 0;
  513. }
  514. alen = ETH_ALEN * uf.count;
  515. addr = kmalloc(alen, GFP_KERNEL);
  516. if (!addr)
  517. return -ENOMEM;
  518. if (copy_from_user(addr, arg + sizeof(uf), alen)) {
  519. err = -EFAULT;
  520. goto done;
  521. }
  522. /* The filter is updated without holding any locks. Which is
  523. * perfectly safe. We disable it first and in the worst
  524. * case we'll accept a few undesired packets. */
  525. filter->count = 0;
  526. wmb();
  527. /* Use first set of addresses as an exact filter */
  528. for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
  529. memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
  530. nexact = n;
  531. /* Remaining multicast addresses are hashed,
  532. * unicast will leave the filter disabled. */
  533. memset(filter->mask, 0, sizeof(filter->mask));
  534. for (; n < uf.count; n++) {
  535. if (!is_multicast_ether_addr(addr[n].u)) {
  536. err = 0; /* no filter */
  537. goto done;
  538. }
  539. addr_hash_set(filter->mask, addr[n].u);
  540. }
  541. /* For ALLMULTI just set the mask to all ones.
  542. * This overrides the mask populated above. */
  543. if ((uf.flags & TUN_FLT_ALLMULTI))
  544. memset(filter->mask, ~0, sizeof(filter->mask));
  545. /* Now enable the filter */
  546. wmb();
  547. filter->count = nexact;
  548. /* Return the number of exact filters */
  549. err = nexact;
  550. done:
  551. kfree(addr);
  552. return err;
  553. }
  554. /* Returns: 0 - drop, !=0 - accept */
  555. static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
  556. {
  557. /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
  558. * at this point. */
  559. struct ethhdr *eh = (struct ethhdr *) skb->data;
  560. int i;
  561. /* Exact match */
  562. for (i = 0; i < filter->count; i++)
  563. if (ether_addr_equal(eh->h_dest, filter->addr[i]))
  564. return 1;
  565. /* Inexact match (multicast only) */
  566. if (is_multicast_ether_addr(eh->h_dest))
  567. return addr_hash_test(filter->mask, eh->h_dest);
  568. return 0;
  569. }
  570. /*
  571. * Checks whether the packet is accepted or not.
  572. * Returns: 0 - drop, !=0 - accept
  573. */
  574. static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
  575. {
  576. if (!filter->count)
  577. return 1;
  578. return run_filter(filter, skb);
  579. }
  580. /* Network device part of the driver */
  581. static const struct ethtool_ops tun_ethtool_ops;
  582. /* Net device detach from fd. */
  583. static void tun_net_uninit(struct net_device *dev)
  584. {
  585. tun_detach_all(dev);
  586. }
  587. /* Net device open. */
  588. static int tun_net_open(struct net_device *dev)
  589. {
  590. netif_tx_start_all_queues(dev);
  591. return 0;
  592. }
  593. /* Net device close. */
  594. static int tun_net_close(struct net_device *dev)
  595. {
  596. netif_tx_stop_all_queues(dev);
  597. return 0;
  598. }
  599. /* Net device start xmit */
  600. static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
  601. {
  602. struct tun_struct *tun = netdev_priv(dev);
  603. int txq = skb->queue_mapping;
  604. struct tun_file *tfile;
  605. rcu_read_lock();
  606. tfile = rcu_dereference(tun->tfiles[txq]);
  607. /* Drop packet if interface is not attached */
  608. if (txq >= tun->numqueues)
  609. goto drop;
  610. tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
  611. BUG_ON(!tfile);
  612. /* Drop if the filter does not like it.
  613. * This is a noop if the filter is disabled.
  614. * Filter can be enabled only for the TAP devices. */
  615. if (!check_filter(&tun->txflt, skb))
  616. goto drop;
  617. if (tfile->socket.sk->sk_filter &&
  618. sk_filter(tfile->socket.sk, skb))
  619. goto drop;
  620. /* Limit the number of packets queued by dividing txq length with the
  621. * number of queues.
  622. */
  623. if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
  624. >= dev->tx_queue_len / tun->numqueues)
  625. goto drop;
  626. /* Orphan the skb - required as we might hang on to it
  627. * for indefinite time. */
  628. if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
  629. goto drop;
  630. skb_orphan(skb);
  631. nf_reset(skb);
  632. /* Enqueue packet */
  633. skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
  634. /* Notify and wake up reader process */
  635. if (tfile->flags & TUN_FASYNC)
  636. kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
  637. wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
  638. POLLRDNORM | POLLRDBAND);
  639. rcu_read_unlock();
  640. return NETDEV_TX_OK;
  641. drop:
  642. dev->stats.tx_dropped++;
  643. skb_tx_error(skb);
  644. kfree_skb(skb);
  645. rcu_read_unlock();
  646. return NETDEV_TX_OK;
  647. }
  648. static void tun_net_mclist(struct net_device *dev)
  649. {
  650. /*
  651. * This callback is supposed to deal with mc filter in
  652. * _rx_ path and has nothing to do with the _tx_ path.
  653. * In rx path we always accept everything userspace gives us.
  654. */
  655. }
  656. #define MIN_MTU 68
  657. #define MAX_MTU 65535
  658. static int
  659. tun_net_change_mtu(struct net_device *dev, int new_mtu)
  660. {
  661. if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
  662. return -EINVAL;
  663. dev->mtu = new_mtu;
  664. return 0;
  665. }
  666. static netdev_features_t tun_net_fix_features(struct net_device *dev,
  667. netdev_features_t features)
  668. {
  669. struct tun_struct *tun = netdev_priv(dev);
  670. return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
  671. }
  672. #ifdef CONFIG_NET_POLL_CONTROLLER
  673. static void tun_poll_controller(struct net_device *dev)
  674. {
  675. /*
  676. * Tun only receives frames when:
  677. * 1) the char device endpoint gets data from user space
  678. * 2) the tun socket gets a sendmsg call from user space
  679. * Since both of those are syncronous operations, we are guaranteed
  680. * never to have pending data when we poll for it
  681. * so theres nothing to do here but return.
  682. * We need this though so netpoll recognizes us as an interface that
  683. * supports polling, which enables bridge devices in virt setups to
  684. * still use netconsole
  685. */
  686. return;
  687. }
  688. #endif
  689. static const struct net_device_ops tun_netdev_ops = {
  690. .ndo_uninit = tun_net_uninit,
  691. .ndo_open = tun_net_open,
  692. .ndo_stop = tun_net_close,
  693. .ndo_start_xmit = tun_net_xmit,
  694. .ndo_change_mtu = tun_net_change_mtu,
  695. .ndo_fix_features = tun_net_fix_features,
  696. .ndo_select_queue = tun_select_queue,
  697. #ifdef CONFIG_NET_POLL_CONTROLLER
  698. .ndo_poll_controller = tun_poll_controller,
  699. #endif
  700. };
  701. static const struct net_device_ops tap_netdev_ops = {
  702. .ndo_uninit = tun_net_uninit,
  703. .ndo_open = tun_net_open,
  704. .ndo_stop = tun_net_close,
  705. .ndo_start_xmit = tun_net_xmit,
  706. .ndo_change_mtu = tun_net_change_mtu,
  707. .ndo_fix_features = tun_net_fix_features,
  708. .ndo_set_rx_mode = tun_net_mclist,
  709. .ndo_set_mac_address = eth_mac_addr,
  710. .ndo_validate_addr = eth_validate_addr,
  711. .ndo_select_queue = tun_select_queue,
  712. #ifdef CONFIG_NET_POLL_CONTROLLER
  713. .ndo_poll_controller = tun_poll_controller,
  714. #endif
  715. };
  716. static int tun_flow_init(struct tun_struct *tun)
  717. {
  718. int i;
  719. for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
  720. INIT_HLIST_HEAD(&tun->flows[i]);
  721. tun->ageing_time = TUN_FLOW_EXPIRE;
  722. setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
  723. mod_timer(&tun->flow_gc_timer,
  724. round_jiffies_up(jiffies + tun->ageing_time));
  725. return 0;
  726. }
  727. static void tun_flow_uninit(struct tun_struct *tun)
  728. {
  729. del_timer_sync(&tun->flow_gc_timer);
  730. tun_flow_flush(tun);
  731. }
  732. /* Initialize net device. */
  733. static void tun_net_init(struct net_device *dev)
  734. {
  735. struct tun_struct *tun = netdev_priv(dev);
  736. switch (tun->flags & TUN_TYPE_MASK) {
  737. case TUN_TUN_DEV:
  738. dev->netdev_ops = &tun_netdev_ops;
  739. /* Point-to-Point TUN Device */
  740. dev->hard_header_len = 0;
  741. dev->addr_len = 0;
  742. dev->mtu = 1500;
  743. /* Zero header length */
  744. dev->type = ARPHRD_NONE;
  745. dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
  746. dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
  747. break;
  748. case TUN_TAP_DEV:
  749. dev->netdev_ops = &tap_netdev_ops;
  750. /* Ethernet TAP Device */
  751. ether_setup(dev);
  752. dev->priv_flags &= ~IFF_TX_SKB_SHARING;
  753. dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
  754. eth_hw_addr_random(dev);
  755. dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
  756. break;
  757. }
  758. }
  759. /* Character device part */
  760. /* Poll */
  761. static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
  762. {
  763. struct tun_file *tfile = file->private_data;
  764. struct tun_struct *tun = __tun_get(tfile);
  765. struct sock *sk;
  766. unsigned int mask = 0;
  767. if (!tun)
  768. return POLLERR;
  769. sk = tfile->socket.sk;
  770. tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
  771. poll_wait(file, &tfile->wq.wait, wait);
  772. if (!skb_queue_empty(&sk->sk_receive_queue))
  773. mask |= POLLIN | POLLRDNORM;
  774. if (sock_writeable(sk) ||
  775. (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
  776. sock_writeable(sk)))
  777. mask |= POLLOUT | POLLWRNORM;
  778. if (tun->dev->reg_state != NETREG_REGISTERED)
  779. mask = POLLERR;
  780. tun_put(tun);
  781. return mask;
  782. }
  783. /* prepad is the amount to reserve at front. len is length after that.
  784. * linear is a hint as to how much to copy (usually headers). */
  785. static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
  786. size_t prepad, size_t len,
  787. size_t linear, int noblock)
  788. {
  789. struct sock *sk = tfile->socket.sk;
  790. struct sk_buff *skb;
  791. int err;
  792. /* Under a page? Don't bother with paged skb. */
  793. if (prepad + len < PAGE_SIZE || !linear)
  794. linear = len;
  795. skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
  796. &err);
  797. if (!skb)
  798. return ERR_PTR(err);
  799. skb_reserve(skb, prepad);
  800. skb_put(skb, linear);
  801. skb->data_len = len - linear;
  802. skb->len += len - linear;
  803. return skb;
  804. }
  805. /* set skb frags from iovec, this can move to core network code for reuse */
  806. static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
  807. int offset, size_t count)
  808. {
  809. int len = iov_length(from, count) - offset;
  810. int copy = skb_headlen(skb);
  811. int size, offset1 = 0;
  812. int i = 0;
  813. /* Skip over from offset */
  814. while (count && (offset >= from->iov_len)) {
  815. offset -= from->iov_len;
  816. ++from;
  817. --count;
  818. }
  819. /* copy up to skb headlen */
  820. while (count && (copy > 0)) {
  821. size = min_t(unsigned int, copy, from->iov_len - offset);
  822. if (copy_from_user(skb->data + offset1, from->iov_base + offset,
  823. size))
  824. return -EFAULT;
  825. if (copy > size) {
  826. ++from;
  827. --count;
  828. offset = 0;
  829. } else
  830. offset += size;
  831. copy -= size;
  832. offset1 += size;
  833. }
  834. if (len == offset1)
  835. return 0;
  836. while (count--) {
  837. struct page *page[MAX_SKB_FRAGS];
  838. int num_pages;
  839. unsigned long base;
  840. unsigned long truesize;
  841. len = from->iov_len - offset;
  842. if (!len) {
  843. offset = 0;
  844. ++from;
  845. continue;
  846. }
  847. base = (unsigned long)from->iov_base + offset;
  848. size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
  849. if (i + size > MAX_SKB_FRAGS)
  850. return -EMSGSIZE;
  851. num_pages = get_user_pages_fast(base, size, 0, &page[i]);
  852. if (num_pages != size) {
  853. for (i = 0; i < num_pages; i++)
  854. put_page(page[i]);
  855. return -EFAULT;
  856. }
  857. truesize = size * PAGE_SIZE;
  858. skb->data_len += len;
  859. skb->len += len;
  860. skb->truesize += truesize;
  861. atomic_add(truesize, &skb->sk->sk_wmem_alloc);
  862. while (len) {
  863. int off = base & ~PAGE_MASK;
  864. int size = min_t(int, len, PAGE_SIZE - off);
  865. __skb_fill_page_desc(skb, i, page[i], off, size);
  866. skb_shinfo(skb)->nr_frags++;
  867. /* increase sk_wmem_alloc */
  868. base += size;
  869. len -= size;
  870. i++;
  871. }
  872. offset = 0;
  873. ++from;
  874. }
  875. return 0;
  876. }
  877. /* Get packet from user space buffer */
  878. static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
  879. void *msg_control, const struct iovec *iv,
  880. size_t total_len, size_t count, int noblock)
  881. {
  882. struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
  883. struct sk_buff *skb;
  884. size_t len = total_len, align = NET_SKB_PAD;
  885. struct virtio_net_hdr gso = { 0 };
  886. int offset = 0;
  887. int copylen;
  888. bool zerocopy = false;
  889. int err;
  890. u32 rxhash;
  891. if (!(tun->flags & TUN_NO_PI)) {
  892. if ((len -= sizeof(pi)) > total_len)
  893. return -EINVAL;
  894. if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
  895. return -EFAULT;
  896. offset += sizeof(pi);
  897. }
  898. if (tun->flags & TUN_VNET_HDR) {
  899. if ((len -= tun->vnet_hdr_sz) > total_len)
  900. return -EINVAL;
  901. if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
  902. return -EFAULT;
  903. if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
  904. gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
  905. gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
  906. if (gso.hdr_len > len)
  907. return -EINVAL;
  908. offset += tun->vnet_hdr_sz;
  909. }
  910. if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
  911. align += NET_IP_ALIGN;
  912. if (unlikely(len < ETH_HLEN ||
  913. (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
  914. return -EINVAL;
  915. }
  916. if (msg_control)
  917. zerocopy = true;
  918. if (zerocopy) {
  919. /* Userspace may produce vectors with count greater than
  920. * MAX_SKB_FRAGS, so we need to linearize parts of the skb
  921. * to let the rest of data to be fit in the frags.
  922. */
  923. if (count > MAX_SKB_FRAGS) {
  924. copylen = iov_length(iv, count - MAX_SKB_FRAGS);
  925. if (copylen < offset)
  926. copylen = 0;
  927. else
  928. copylen -= offset;
  929. } else
  930. copylen = 0;
  931. /* There are 256 bytes to be copied in skb, so there is enough
  932. * room for skb expand head in case it is used.
  933. * The rest of the buffer is mapped from userspace.
  934. */
  935. if (copylen < gso.hdr_len)
  936. copylen = gso.hdr_len;
  937. if (!copylen)
  938. copylen = GOODCOPY_LEN;
  939. } else
  940. copylen = len;
  941. skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
  942. if (IS_ERR(skb)) {
  943. if (PTR_ERR(skb) != -EAGAIN)
  944. tun->dev->stats.rx_dropped++;
  945. return PTR_ERR(skb);
  946. }
  947. if (zerocopy)
  948. err = zerocopy_sg_from_iovec(skb, iv, offset, count);
  949. else
  950. err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
  951. if (err) {
  952. tun->dev->stats.rx_dropped++;
  953. kfree_skb(skb);
  954. return -EFAULT;
  955. }
  956. if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
  957. if (!skb_partial_csum_set(skb, gso.csum_start,
  958. gso.csum_offset)) {
  959. tun->dev->stats.rx_frame_errors++;
  960. kfree_skb(skb);
  961. return -EINVAL;
  962. }
  963. }
  964. switch (tun->flags & TUN_TYPE_MASK) {
  965. case TUN_TUN_DEV:
  966. if (tun->flags & TUN_NO_PI) {
  967. switch (skb->data[0] & 0xf0) {
  968. case 0x40:
  969. pi.proto = htons(ETH_P_IP);
  970. break;
  971. case 0x60:
  972. pi.proto = htons(ETH_P_IPV6);
  973. break;
  974. default:
  975. tun->dev->stats.rx_dropped++;
  976. kfree_skb(skb);
  977. return -EINVAL;
  978. }
  979. }
  980. skb_reset_mac_header(skb);
  981. skb->protocol = pi.proto;
  982. skb->dev = tun->dev;
  983. break;
  984. case TUN_TAP_DEV:
  985. skb->protocol = eth_type_trans(skb, tun->dev);
  986. break;
  987. }
  988. if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
  989. pr_debug("GSO!\n");
  990. switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
  991. case VIRTIO_NET_HDR_GSO_TCPV4:
  992. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  993. break;
  994. case VIRTIO_NET_HDR_GSO_TCPV6:
  995. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  996. break;
  997. case VIRTIO_NET_HDR_GSO_UDP:
  998. skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
  999. break;
  1000. default:
  1001. tun->dev->stats.rx_frame_errors++;
  1002. kfree_skb(skb);
  1003. return -EINVAL;
  1004. }
  1005. if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
  1006. skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
  1007. skb_shinfo(skb)->gso_size = gso.gso_size;
  1008. if (skb_shinfo(skb)->gso_size == 0) {
  1009. tun->dev->stats.rx_frame_errors++;
  1010. kfree_skb(skb);
  1011. return -EINVAL;
  1012. }
  1013. /* Header must be checked, and gso_segs computed. */
  1014. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  1015. skb_shinfo(skb)->gso_segs = 0;
  1016. }
  1017. /* copy skb_ubuf_info for callback when skb has no error */
  1018. if (zerocopy) {
  1019. skb_shinfo(skb)->destructor_arg = msg_control;
  1020. skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
  1021. skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
  1022. }
  1023. skb_reset_network_header(skb);
  1024. rxhash = skb_get_rxhash(skb);
  1025. netif_rx_ni(skb);
  1026. tun->dev->stats.rx_packets++;
  1027. tun->dev->stats.rx_bytes += len;
  1028. tun_flow_update(tun, rxhash, tfile);
  1029. return total_len;
  1030. }
  1031. static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
  1032. unsigned long count, loff_t pos)
  1033. {
  1034. struct file *file = iocb->ki_filp;
  1035. struct tun_struct *tun = tun_get(file);
  1036. struct tun_file *tfile = file->private_data;
  1037. ssize_t result;
  1038. if (!tun)
  1039. return -EBADFD;
  1040. tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
  1041. result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
  1042. count, file->f_flags & O_NONBLOCK);
  1043. tun_put(tun);
  1044. return result;
  1045. }
  1046. /* Put packet to the user space buffer */
  1047. static ssize_t tun_put_user(struct tun_struct *tun,
  1048. struct tun_file *tfile,
  1049. struct sk_buff *skb,
  1050. const struct iovec *iv, int len)
  1051. {
  1052. struct tun_pi pi = { 0, skb->protocol };
  1053. ssize_t total = 0;
  1054. if (!(tun->flags & TUN_NO_PI)) {
  1055. if ((len -= sizeof(pi)) < 0)
  1056. return -EINVAL;
  1057. if (len < skb->len) {
  1058. /* Packet will be striped */
  1059. pi.flags |= TUN_PKT_STRIP;
  1060. }
  1061. if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
  1062. return -EFAULT;
  1063. total += sizeof(pi);
  1064. }
  1065. if (tun->flags & TUN_VNET_HDR) {
  1066. struct virtio_net_hdr gso = { 0 }; /* no info leak */
  1067. if ((len -= tun->vnet_hdr_sz) < 0)
  1068. return -EINVAL;
  1069. if (skb_is_gso(skb)) {
  1070. struct skb_shared_info *sinfo = skb_shinfo(skb);
  1071. /* This is a hint as to how much should be linear. */
  1072. gso.hdr_len = skb_headlen(skb);
  1073. gso.gso_size = sinfo->gso_size;
  1074. if (sinfo->gso_type & SKB_GSO_TCPV4)
  1075. gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
  1076. else if (sinfo->gso_type & SKB_GSO_TCPV6)
  1077. gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
  1078. else if (sinfo->gso_type & SKB_GSO_UDP)
  1079. gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
  1080. else {
  1081. pr_err("unexpected GSO type: "
  1082. "0x%x, gso_size %d, hdr_len %d\n",
  1083. sinfo->gso_type, gso.gso_size,
  1084. gso.hdr_len);
  1085. print_hex_dump(KERN_ERR, "tun: ",
  1086. DUMP_PREFIX_NONE,
  1087. 16, 1, skb->head,
  1088. min((int)gso.hdr_len, 64), true);
  1089. WARN_ON_ONCE(1);
  1090. return -EINVAL;
  1091. }
  1092. if (sinfo->gso_type & SKB_GSO_TCP_ECN)
  1093. gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
  1094. } else
  1095. gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
  1096. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1097. gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
  1098. gso.csum_start = skb_checksum_start_offset(skb);
  1099. gso.csum_offset = skb->csum_offset;
  1100. } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
  1101. gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
  1102. } /* else everything is zero */
  1103. if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
  1104. sizeof(gso))))
  1105. return -EFAULT;
  1106. total += tun->vnet_hdr_sz;
  1107. }
  1108. len = min_t(int, skb->len, len);
  1109. skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
  1110. total += skb->len;
  1111. tun->dev->stats.tx_packets++;
  1112. tun->dev->stats.tx_bytes += len;
  1113. return total;
  1114. }
  1115. static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
  1116. struct kiocb *iocb, const struct iovec *iv,
  1117. ssize_t len, int noblock)
  1118. {
  1119. DECLARE_WAITQUEUE(wait, current);
  1120. struct sk_buff *skb;
  1121. ssize_t ret = 0;
  1122. tun_debug(KERN_INFO, tun, "tun_do_read\n");
  1123. if (unlikely(!noblock))
  1124. add_wait_queue(&tfile->wq.wait, &wait);
  1125. while (len) {
  1126. current->state = TASK_INTERRUPTIBLE;
  1127. /* Read frames from the queue */
  1128. if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
  1129. if (noblock) {
  1130. ret = -EAGAIN;
  1131. break;
  1132. }
  1133. if (signal_pending(current)) {
  1134. ret = -ERESTARTSYS;
  1135. break;
  1136. }
  1137. if (tun->dev->reg_state != NETREG_REGISTERED) {
  1138. ret = -EIO;
  1139. break;
  1140. }
  1141. /* Nothing to read, let's sleep */
  1142. schedule();
  1143. continue;
  1144. }
  1145. ret = tun_put_user(tun, tfile, skb, iv, len);
  1146. kfree_skb(skb);
  1147. break;
  1148. }
  1149. current->state = TASK_RUNNING;
  1150. if (unlikely(!noblock))
  1151. remove_wait_queue(&tfile->wq.wait, &wait);
  1152. return ret;
  1153. }
  1154. static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
  1155. unsigned long count, loff_t pos)
  1156. {
  1157. struct file *file = iocb->ki_filp;
  1158. struct tun_file *tfile = file->private_data;
  1159. struct tun_struct *tun = __tun_get(tfile);
  1160. ssize_t len, ret;
  1161. if (!tun)
  1162. return -EBADFD;
  1163. len = iov_length(iv, count);
  1164. if (len < 0) {
  1165. ret = -EINVAL;
  1166. goto out;
  1167. }
  1168. ret = tun_do_read(tun, tfile, iocb, iv, len,
  1169. file->f_flags & O_NONBLOCK);
  1170. ret = min_t(ssize_t, ret, len);
  1171. out:
  1172. tun_put(tun);
  1173. return ret;
  1174. }
  1175. static void tun_free_netdev(struct net_device *dev)
  1176. {
  1177. struct tun_struct *tun = netdev_priv(dev);
  1178. BUG_ON(!(list_empty(&tun->disabled)));
  1179. tun_flow_uninit(tun);
  1180. security_tun_dev_free_security(tun->security);
  1181. free_netdev(dev);
  1182. }
  1183. static void tun_setup(struct net_device *dev)
  1184. {
  1185. struct tun_struct *tun = netdev_priv(dev);
  1186. tun->owner = INVALID_UID;
  1187. tun->group = INVALID_GID;
  1188. dev->ethtool_ops = &tun_ethtool_ops;
  1189. dev->destructor = tun_free_netdev;
  1190. }
  1191. /* Trivial set of netlink ops to allow deleting tun or tap
  1192. * device with netlink.
  1193. */
  1194. static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
  1195. {
  1196. return -EINVAL;
  1197. }
  1198. static struct rtnl_link_ops tun_link_ops __read_mostly = {
  1199. .kind = DRV_NAME,
  1200. .priv_size = sizeof(struct tun_struct),
  1201. .setup = tun_setup,
  1202. .validate = tun_validate,
  1203. };
  1204. static void tun_sock_write_space(struct sock *sk)
  1205. {
  1206. struct tun_file *tfile;
  1207. wait_queue_head_t *wqueue;
  1208. if (!sock_writeable(sk))
  1209. return;
  1210. if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
  1211. return;
  1212. wqueue = sk_sleep(sk);
  1213. if (wqueue && waitqueue_active(wqueue))
  1214. wake_up_interruptible_sync_poll(wqueue, POLLOUT |
  1215. POLLWRNORM | POLLWRBAND);
  1216. tfile = container_of(sk, struct tun_file, sk);
  1217. kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
  1218. }
  1219. static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
  1220. struct msghdr *m, size_t total_len)
  1221. {
  1222. int ret;
  1223. struct tun_file *tfile = container_of(sock, struct tun_file, socket);
  1224. struct tun_struct *tun = __tun_get(tfile);
  1225. if (!tun)
  1226. return -EBADFD;
  1227. ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
  1228. m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
  1229. tun_put(tun);
  1230. return ret;
  1231. }
  1232. static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
  1233. struct msghdr *m, size_t total_len,
  1234. int flags)
  1235. {
  1236. struct tun_file *tfile = container_of(sock, struct tun_file, socket);
  1237. struct tun_struct *tun = __tun_get(tfile);
  1238. int ret;
  1239. if (!tun)
  1240. return -EBADFD;
  1241. if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
  1242. return -EINVAL;
  1243. ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
  1244. flags & MSG_DONTWAIT);
  1245. if (ret > total_len) {
  1246. m->msg_flags |= MSG_TRUNC;
  1247. ret = flags & MSG_TRUNC ? ret : total_len;
  1248. }
  1249. tun_put(tun);
  1250. return ret;
  1251. }
  1252. static int tun_release(struct socket *sock)
  1253. {
  1254. if (sock->sk)
  1255. sock_put(sock->sk);
  1256. return 0;
  1257. }
  1258. /* Ops structure to mimic raw sockets with tun */
  1259. static const struct proto_ops tun_socket_ops = {
  1260. .sendmsg = tun_sendmsg,
  1261. .recvmsg = tun_recvmsg,
  1262. .release = tun_release,
  1263. };
  1264. static struct proto tun_proto = {
  1265. .name = "tun",
  1266. .owner = THIS_MODULE,
  1267. .obj_size = sizeof(struct tun_file),
  1268. };
  1269. static int tun_flags(struct tun_struct *tun)
  1270. {
  1271. int flags = 0;
  1272. if (tun->flags & TUN_TUN_DEV)
  1273. flags |= IFF_TUN;
  1274. else
  1275. flags |= IFF_TAP;
  1276. if (tun->flags & TUN_NO_PI)
  1277. flags |= IFF_NO_PI;
  1278. /* This flag has no real effect. We track the value for backwards
  1279. * compatibility.
  1280. */
  1281. if (tun->flags & TUN_ONE_QUEUE)
  1282. flags |= IFF_ONE_QUEUE;
  1283. if (tun->flags & TUN_VNET_HDR)
  1284. flags |= IFF_VNET_HDR;
  1285. if (tun->flags & TUN_TAP_MQ)
  1286. flags |= IFF_MULTI_QUEUE;
  1287. return flags;
  1288. }
  1289. static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
  1290. char *buf)
  1291. {
  1292. struct tun_struct *tun = netdev_priv(to_net_dev(dev));
  1293. return sprintf(buf, "0x%x\n", tun_flags(tun));
  1294. }
  1295. static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
  1296. char *buf)
  1297. {
  1298. struct tun_struct *tun = netdev_priv(to_net_dev(dev));
  1299. return uid_valid(tun->owner)?
  1300. sprintf(buf, "%u\n",
  1301. from_kuid_munged(current_user_ns(), tun->owner)):
  1302. sprintf(buf, "-1\n");
  1303. }
  1304. static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
  1305. char *buf)
  1306. {
  1307. struct tun_struct *tun = netdev_priv(to_net_dev(dev));
  1308. return gid_valid(tun->group) ?
  1309. sprintf(buf, "%u\n",
  1310. from_kgid_munged(current_user_ns(), tun->group)):
  1311. sprintf(buf, "-1\n");
  1312. }
  1313. static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
  1314. static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
  1315. static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
  1316. static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
  1317. {
  1318. struct tun_struct *tun;
  1319. struct tun_file *tfile = file->private_data;
  1320. struct net_device *dev;
  1321. int err;
  1322. if (tfile->detached)
  1323. return -EINVAL;
  1324. dev = __dev_get_by_name(net, ifr->ifr_name);
  1325. if (dev) {
  1326. if (ifr->ifr_flags & IFF_TUN_EXCL)
  1327. return -EBUSY;
  1328. if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
  1329. tun = netdev_priv(dev);
  1330. else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
  1331. tun = netdev_priv(dev);
  1332. else
  1333. return -EINVAL;
  1334. if (tun_not_capable(tun))
  1335. return -EPERM;
  1336. err = security_tun_dev_open(tun->security);
  1337. if (err < 0)
  1338. return err;
  1339. err = tun_attach(tun, file);
  1340. if (err < 0)
  1341. return err;
  1342. if (tun->flags & TUN_TAP_MQ &&
  1343. (tun->numqueues + tun->numdisabled > 1))
  1344. return err;
  1345. }
  1346. else {
  1347. char *name;
  1348. unsigned long flags = 0;
  1349. int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
  1350. MAX_TAP_QUEUES : 1;
  1351. if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
  1352. return -EPERM;
  1353. err = security_tun_dev_create();
  1354. if (err < 0)
  1355. return err;
  1356. /* Set dev type */
  1357. if (ifr->ifr_flags & IFF_TUN) {
  1358. /* TUN device */
  1359. flags |= TUN_TUN_DEV;
  1360. name = "tun%d";
  1361. } else if (ifr->ifr_flags & IFF_TAP) {
  1362. /* TAP device */
  1363. flags |= TUN_TAP_DEV;
  1364. name = "tap%d";
  1365. } else
  1366. return -EINVAL;
  1367. if (*ifr->ifr_name)
  1368. name = ifr->ifr_name;
  1369. dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
  1370. tun_setup, queues, queues);
  1371. if (!dev)
  1372. return -ENOMEM;
  1373. dev_net_set(dev, net);
  1374. dev->rtnl_link_ops = &tun_link_ops;
  1375. tun = netdev_priv(dev);
  1376. tun->dev = dev;
  1377. tun->flags = flags;
  1378. tun->txflt.count = 0;
  1379. tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
  1380. tun->filter_attached = false;
  1381. tun->sndbuf = tfile->socket.sk->sk_sndbuf;
  1382. spin_lock_init(&tun->lock);
  1383. err = security_tun_dev_alloc_security(&tun->security);
  1384. if (err < 0)
  1385. goto err_free_dev;
  1386. tun_net_init(dev);
  1387. err = tun_flow_init(tun);
  1388. if (err < 0)
  1389. goto err_free_dev;
  1390. dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
  1391. TUN_USER_FEATURES;
  1392. dev->features = dev->hw_features;
  1393. INIT_LIST_HEAD(&tun->disabled);
  1394. err = tun_attach(tun, file);
  1395. if (err < 0)
  1396. goto err_free_dev;
  1397. err = register_netdevice(tun->dev);
  1398. if (err < 0)
  1399. goto err_free_dev;
  1400. if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
  1401. device_create_file(&tun->dev->dev, &dev_attr_owner) ||
  1402. device_create_file(&tun->dev->dev, &dev_attr_group))
  1403. pr_err("Failed to create tun sysfs files\n");
  1404. }
  1405. netif_carrier_on(tun->dev);
  1406. tun_debug(KERN_INFO, tun, "tun_set_iff\n");
  1407. if (ifr->ifr_flags & IFF_NO_PI)
  1408. tun->flags |= TUN_NO_PI;
  1409. else
  1410. tun->flags &= ~TUN_NO_PI;
  1411. /* This flag has no real effect. We track the value for backwards
  1412. * compatibility.
  1413. */
  1414. if (ifr->ifr_flags & IFF_ONE_QUEUE)
  1415. tun->flags |= TUN_ONE_QUEUE;
  1416. else
  1417. tun->flags &= ~TUN_ONE_QUEUE;
  1418. if (ifr->ifr_flags & IFF_VNET_HDR)
  1419. tun->flags |= TUN_VNET_HDR;
  1420. else
  1421. tun->flags &= ~TUN_VNET_HDR;
  1422. if (ifr->ifr_flags & IFF_MULTI_QUEUE)
  1423. tun->flags |= TUN_TAP_MQ;
  1424. else
  1425. tun->flags &= ~TUN_TAP_MQ;
  1426. /* Make sure persistent devices do not get stuck in
  1427. * xoff state.
  1428. */
  1429. if (netif_running(tun->dev))
  1430. netif_tx_wake_all_queues(tun->dev);
  1431. strcpy(ifr->ifr_name, tun->dev->name);
  1432. return 0;
  1433. err_free_dev:
  1434. free_netdev(dev);
  1435. return err;
  1436. }
  1437. static void tun_get_iff(struct net *net, struct tun_struct *tun,
  1438. struct ifreq *ifr)
  1439. {
  1440. tun_debug(KERN_INFO, tun, "tun_get_iff\n");
  1441. strcpy(ifr->ifr_name, tun->dev->name);
  1442. ifr->ifr_flags = tun_flags(tun);
  1443. }
  1444. /* This is like a cut-down ethtool ops, except done via tun fd so no
  1445. * privs required. */
  1446. static int set_offload(struct tun_struct *tun, unsigned long arg)
  1447. {
  1448. netdev_features_t features = 0;
  1449. if (arg & TUN_F_CSUM) {
  1450. features |= NETIF_F_HW_CSUM;
  1451. arg &= ~TUN_F_CSUM;
  1452. if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
  1453. if (arg & TUN_F_TSO_ECN) {
  1454. features |= NETIF_F_TSO_ECN;
  1455. arg &= ~TUN_F_TSO_ECN;
  1456. }
  1457. if (arg & TUN_F_TSO4)
  1458. features |= NETIF_F_TSO;
  1459. if (arg & TUN_F_TSO6)
  1460. features |= NETIF_F_TSO6;
  1461. arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
  1462. }
  1463. if (arg & TUN_F_UFO) {
  1464. features |= NETIF_F_UFO;
  1465. arg &= ~TUN_F_UFO;
  1466. }
  1467. }
  1468. /* This gives the user a way to test for new features in future by
  1469. * trying to set them. */
  1470. if (arg)
  1471. return -EINVAL;
  1472. tun->set_features = features;
  1473. netdev_update_features(tun->dev);
  1474. return 0;
  1475. }
  1476. static void tun_detach_filter(struct tun_struct *tun, int n)
  1477. {
  1478. int i;
  1479. struct tun_file *tfile;
  1480. for (i = 0; i < n; i++) {
  1481. tfile = rtnl_dereference(tun->tfiles[i]);
  1482. sk_detach_filter(tfile->socket.sk);
  1483. }
  1484. tun->filter_attached = false;
  1485. }
  1486. static int tun_attach_filter(struct tun_struct *tun)
  1487. {
  1488. int i, ret = 0;
  1489. struct tun_file *tfile;
  1490. for (i = 0; i < tun->numqueues; i++) {
  1491. tfile = rtnl_dereference(tun->tfiles[i]);
  1492. ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
  1493. if (ret) {
  1494. tun_detach_filter(tun, i);
  1495. return ret;
  1496. }
  1497. }
  1498. tun->filter_attached = true;
  1499. return ret;
  1500. }
  1501. static void tun_set_sndbuf(struct tun_struct *tun)
  1502. {
  1503. struct tun_file *tfile;
  1504. int i;
  1505. for (i = 0; i < tun->numqueues; i++) {
  1506. tfile = rtnl_dereference(tun->tfiles[i]);
  1507. tfile->socket.sk->sk_sndbuf = tun->sndbuf;
  1508. }
  1509. }
  1510. static int tun_set_queue(struct file *file, struct ifreq *ifr)
  1511. {
  1512. struct tun_file *tfile = file->private_data;
  1513. struct tun_struct *tun;
  1514. int ret = 0;
  1515. rtnl_lock();
  1516. if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
  1517. tun = tfile->detached;
  1518. if (!tun) {
  1519. ret = -EINVAL;
  1520. goto unlock;
  1521. }
  1522. ret = security_tun_dev_attach_queue(tun->security);
  1523. if (ret < 0)
  1524. goto unlock;
  1525. ret = tun_attach(tun, file);
  1526. } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
  1527. tun = rtnl_dereference(tfile->tun);
  1528. if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
  1529. ret = -EINVAL;
  1530. else
  1531. __tun_detach(tfile, false);
  1532. } else
  1533. ret = -EINVAL;
  1534. unlock:
  1535. rtnl_unlock();
  1536. return ret;
  1537. }
  1538. static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
  1539. unsigned long arg, int ifreq_len)
  1540. {
  1541. struct tun_file *tfile = file->private_data;
  1542. struct tun_struct *tun;
  1543. void __user* argp = (void __user*)arg;
  1544. struct ifreq ifr;
  1545. kuid_t owner;
  1546. kgid_t group;
  1547. int sndbuf;
  1548. int vnet_hdr_sz;
  1549. int ret;
  1550. if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
  1551. if (copy_from_user(&ifr, argp, ifreq_len))
  1552. return -EFAULT;
  1553. } else {
  1554. memset(&ifr, 0, sizeof(ifr));
  1555. }
  1556. if (cmd == TUNGETFEATURES) {
  1557. /* Currently this just means: "what IFF flags are valid?".
  1558. * This is needed because we never checked for invalid flags on
  1559. * TUNSETIFF. */
  1560. return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
  1561. IFF_VNET_HDR | IFF_MULTI_QUEUE,
  1562. (unsigned int __user*)argp);
  1563. } else if (cmd == TUNSETQUEUE)
  1564. return tun_set_queue(file, &ifr);
  1565. ret = 0;
  1566. rtnl_lock();
  1567. tun = __tun_get(tfile);
  1568. if (cmd == TUNSETIFF && !tun) {
  1569. ifr.ifr_name[IFNAMSIZ-1] = '\0';
  1570. ret = tun_set_iff(tfile->net, file, &ifr);
  1571. if (ret)
  1572. goto unlock;
  1573. if (copy_to_user(argp, &ifr, ifreq_len))
  1574. ret = -EFAULT;
  1575. goto unlock;
  1576. }
  1577. ret = -EBADFD;
  1578. if (!tun)
  1579. goto unlock;
  1580. tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
  1581. ret = 0;
  1582. switch (cmd) {
  1583. case TUNGETIFF:
  1584. tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
  1585. if (copy_to_user(argp, &ifr, ifreq_len))
  1586. ret = -EFAULT;
  1587. break;
  1588. case TUNSETNOCSUM:
  1589. /* Disable/Enable checksum */
  1590. /* [unimplemented] */
  1591. tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
  1592. arg ? "disabled" : "enabled");
  1593. break;
  1594. case TUNSETPERSIST:
  1595. /* Disable/Enable persist mode. Keep an extra reference to the
  1596. * module to prevent the module being unprobed.
  1597. */
  1598. if (arg && !(tun->flags & TUN_PERSIST)) {
  1599. tun->flags |= TUN_PERSIST;
  1600. __module_get(THIS_MODULE);
  1601. }
  1602. if (!arg && (tun->flags & TUN_PERSIST)) {
  1603. tun->flags &= ~TUN_PERSIST;
  1604. module_put(THIS_MODULE);
  1605. }
  1606. tun_debug(KERN_INFO, tun, "persist %s\n",
  1607. arg ? "enabled" : "disabled");
  1608. break;
  1609. case TUNSETOWNER:
  1610. /* Set owner of the device */
  1611. owner = make_kuid(current_user_ns(), arg);
  1612. if (!uid_valid(owner)) {
  1613. ret = -EINVAL;
  1614. break;
  1615. }
  1616. tun->owner = owner;
  1617. tun_debug(KERN_INFO, tun, "owner set to %u\n",
  1618. from_kuid(&init_user_ns, tun->owner));
  1619. break;
  1620. case TUNSETGROUP:
  1621. /* Set group of the device */
  1622. group = make_kgid(current_user_ns(), arg);
  1623. if (!gid_valid(group)) {
  1624. ret = -EINVAL;
  1625. break;
  1626. }
  1627. tun->group = group;
  1628. tun_debug(KERN_INFO, tun, "group set to %u\n",
  1629. from_kgid(&init_user_ns, tun->group));
  1630. break;
  1631. case TUNSETLINK:
  1632. /* Only allow setting the type when the interface is down */
  1633. if (tun->dev->flags & IFF_UP) {
  1634. tun_debug(KERN_INFO, tun,
  1635. "Linktype set failed because interface is up\n");
  1636. ret = -EBUSY;
  1637. } else {
  1638. tun->dev->type = (int) arg;
  1639. tun_debug(KERN_INFO, tun, "linktype set to %d\n",
  1640. tun->dev->type);
  1641. ret = 0;
  1642. }
  1643. break;
  1644. #ifdef TUN_DEBUG
  1645. case TUNSETDEBUG:
  1646. tun->debug = arg;
  1647. break;
  1648. #endif
  1649. case TUNSETOFFLOAD:
  1650. ret = set_offload(tun, arg);
  1651. break;
  1652. case TUNSETTXFILTER:
  1653. /* Can be set only for TAPs */
  1654. ret = -EINVAL;
  1655. if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
  1656. break;
  1657. ret = update_filter(&tun->txflt, (void __user *)arg);
  1658. break;
  1659. case SIOCGIFHWADDR:
  1660. /* Get hw address */
  1661. memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
  1662. ifr.ifr_hwaddr.sa_family = tun->dev->type;
  1663. if (copy_to_user(argp, &ifr, ifreq_len))
  1664. ret = -EFAULT;
  1665. break;
  1666. case SIOCSIFHWADDR:
  1667. /* Set hw address */
  1668. tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
  1669. ifr.ifr_hwaddr.sa_data);
  1670. ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
  1671. break;
  1672. case TUNGETSNDBUF:
  1673. sndbuf = tfile->socket.sk->sk_sndbuf;
  1674. if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
  1675. ret = -EFAULT;
  1676. break;
  1677. case TUNSETSNDBUF:
  1678. if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
  1679. ret = -EFAULT;
  1680. break;
  1681. }
  1682. tun->sndbuf = sndbuf;
  1683. tun_set_sndbuf(tun);
  1684. break;
  1685. case TUNGETVNETHDRSZ:
  1686. vnet_hdr_sz = tun->vnet_hdr_sz;
  1687. if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
  1688. ret = -EFAULT;
  1689. break;
  1690. case TUNSETVNETHDRSZ:
  1691. if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
  1692. ret = -EFAULT;
  1693. break;
  1694. }
  1695. if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
  1696. ret = -EINVAL;
  1697. break;
  1698. }
  1699. tun->vnet_hdr_sz = vnet_hdr_sz;
  1700. break;
  1701. case TUNATTACHFILTER:
  1702. /* Can be set only for TAPs */
  1703. ret = -EINVAL;
  1704. if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
  1705. break;
  1706. ret = -EFAULT;
  1707. if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
  1708. break;
  1709. ret = tun_attach_filter(tun);
  1710. break;
  1711. case TUNDETACHFILTER:
  1712. /* Can be set only for TAPs */
  1713. ret = -EINVAL;
  1714. if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
  1715. break;
  1716. ret = 0;
  1717. tun_detach_filter(tun, tun->numqueues);
  1718. break;
  1719. default:
  1720. ret = -EINVAL;
  1721. break;
  1722. }
  1723. unlock:
  1724. rtnl_unlock();
  1725. if (tun)
  1726. tun_put(tun);
  1727. return ret;
  1728. }
  1729. static long tun_chr_ioctl(struct file *file,
  1730. unsigned int cmd, unsigned long arg)
  1731. {
  1732. return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
  1733. }
  1734. #ifdef CONFIG_COMPAT
  1735. static long tun_chr_compat_ioctl(struct file *file,
  1736. unsigned int cmd, unsigned long arg)
  1737. {
  1738. switch (cmd) {
  1739. case TUNSETIFF:
  1740. case TUNGETIFF:
  1741. case TUNSETTXFILTER:
  1742. case TUNGETSNDBUF:
  1743. case TUNSETSNDBUF:
  1744. case SIOCGIFHWADDR:
  1745. case SIOCSIFHWADDR:
  1746. arg = (unsigned long)compat_ptr(arg);
  1747. break;
  1748. default:
  1749. arg = (compat_ulong_t)arg;
  1750. break;
  1751. }
  1752. /*
  1753. * compat_ifreq is shorter than ifreq, so we must not access beyond
  1754. * the end of that structure. All fields that are used in this
  1755. * driver are compatible though, we don't need to convert the
  1756. * contents.
  1757. */
  1758. return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
  1759. }
  1760. #endif /* CONFIG_COMPAT */
  1761. static int tun_chr_fasync(int fd, struct file *file, int on)
  1762. {
  1763. struct tun_file *tfile = file->private_data;
  1764. int ret;
  1765. if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
  1766. goto out;
  1767. if (on) {
  1768. ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
  1769. if (ret)
  1770. goto out;
  1771. tfile->flags |= TUN_FASYNC;
  1772. } else
  1773. tfile->flags &= ~TUN_FASYNC;
  1774. ret = 0;
  1775. out:
  1776. return ret;
  1777. }
  1778. static int tun_chr_open(struct inode *inode, struct file * file)
  1779. {
  1780. struct tun_file *tfile;
  1781. DBG1(KERN_INFO, "tunX: tun_chr_open\n");
  1782. tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
  1783. &tun_proto);
  1784. if (!tfile)
  1785. return -ENOMEM;
  1786. rcu_assign_pointer(tfile->tun, NULL);
  1787. tfile->net = get_net(current->nsproxy->net_ns);
  1788. tfile->flags = 0;
  1789. rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
  1790. init_waitqueue_head(&tfile->wq.wait);
  1791. tfile->socket.file = file;
  1792. tfile->socket.ops = &tun_socket_ops;
  1793. sock_init_data(&tfile->socket, &tfile->sk);
  1794. sk_change_net(&tfile->sk, tfile->net);
  1795. tfile->sk.sk_write_space = tun_sock_write_space;
  1796. tfile->sk.sk_sndbuf = INT_MAX;
  1797. file->private_data = tfile;
  1798. set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
  1799. INIT_LIST_HEAD(&tfile->next);
  1800. return 0;
  1801. }
  1802. static int tun_chr_close(struct inode *inode, struct file *file)
  1803. {
  1804. struct tun_file *tfile = file->private_data;
  1805. struct net *net = tfile->net;
  1806. tun_detach(tfile, true);
  1807. put_net(net);
  1808. return 0;
  1809. }
  1810. static const struct file_operations tun_fops = {
  1811. .owner = THIS_MODULE,
  1812. .llseek = no_llseek,
  1813. .read = do_sync_read,
  1814. .aio_read = tun_chr_aio_read,
  1815. .write = do_sync_write,
  1816. .aio_write = tun_chr_aio_write,
  1817. .poll = tun_chr_poll,
  1818. .unlocked_ioctl = tun_chr_ioctl,
  1819. #ifdef CONFIG_COMPAT
  1820. .compat_ioctl = tun_chr_compat_ioctl,
  1821. #endif
  1822. .open = tun_chr_open,
  1823. .release = tun_chr_close,
  1824. .fasync = tun_chr_fasync
  1825. };
  1826. static struct miscdevice tun_miscdev = {
  1827. .minor = TUN_MINOR,
  1828. .name = "tun",
  1829. .nodename = "net/tun",
  1830. .fops = &tun_fops,
  1831. };
  1832. /* ethtool interface */
  1833. static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1834. {
  1835. cmd->supported = 0;
  1836. cmd->advertising = 0;
  1837. ethtool_cmd_speed_set(cmd, SPEED_10);
  1838. cmd->duplex = DUPLEX_FULL;
  1839. cmd->port = PORT_TP;
  1840. cmd->phy_address = 0;
  1841. cmd->transceiver = XCVR_INTERNAL;
  1842. cmd->autoneg = AUTONEG_DISABLE;
  1843. cmd->maxtxpkt = 0;
  1844. cmd->maxrxpkt = 0;
  1845. return 0;
  1846. }
  1847. static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1848. {
  1849. struct tun_struct *tun = netdev_priv(dev);
  1850. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  1851. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  1852. switch (tun->flags & TUN_TYPE_MASK) {
  1853. case TUN_TUN_DEV:
  1854. strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
  1855. break;
  1856. case TUN_TAP_DEV:
  1857. strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
  1858. break;
  1859. }
  1860. }
  1861. static u32 tun_get_msglevel(struct net_device *dev)
  1862. {
  1863. #ifdef TUN_DEBUG
  1864. struct tun_struct *tun = netdev_priv(dev);
  1865. return tun->debug;
  1866. #else
  1867. return -EOPNOTSUPP;
  1868. #endif
  1869. }
  1870. static void tun_set_msglevel(struct net_device *dev, u32 value)
  1871. {
  1872. #ifdef TUN_DEBUG
  1873. struct tun_struct *tun = netdev_priv(dev);
  1874. tun->debug = value;
  1875. #endif
  1876. }
  1877. static const struct ethtool_ops tun_ethtool_ops = {
  1878. .get_settings = tun_get_settings,
  1879. .get_drvinfo = tun_get_drvinfo,
  1880. .get_msglevel = tun_get_msglevel,
  1881. .set_msglevel = tun_set_msglevel,
  1882. .get_link = ethtool_op_get_link,
  1883. };
  1884. static int __init tun_init(void)
  1885. {
  1886. int ret = 0;
  1887. pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
  1888. pr_info("%s\n", DRV_COPYRIGHT);
  1889. ret = rtnl_link_register(&tun_link_ops);
  1890. if (ret) {
  1891. pr_err("Can't register link_ops\n");
  1892. goto err_linkops;
  1893. }
  1894. ret = misc_register(&tun_miscdev);
  1895. if (ret) {
  1896. pr_err("Can't register misc device %d\n", TUN_MINOR);
  1897. goto err_misc;
  1898. }
  1899. return 0;
  1900. err_misc:
  1901. rtnl_link_unregister(&tun_link_ops);
  1902. err_linkops:
  1903. return ret;
  1904. }
  1905. static void tun_cleanup(void)
  1906. {
  1907. misc_deregister(&tun_miscdev);
  1908. rtnl_link_unregister(&tun_link_ops);
  1909. }
  1910. /* Get an underlying socket object from tun file. Returns error unless file is
  1911. * attached to a device. The returned object works like a packet socket, it
  1912. * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
  1913. * holding a reference to the file for as long as the socket is in use. */
  1914. struct socket *tun_get_socket(struct file *file)
  1915. {
  1916. struct tun_file *tfile;
  1917. if (file->f_op != &tun_fops)
  1918. return ERR_PTR(-EINVAL);
  1919. tfile = file->private_data;
  1920. if (!tfile)
  1921. return ERR_PTR(-EBADFD);
  1922. return &tfile->socket;
  1923. }
  1924. EXPORT_SYMBOL_GPL(tun_get_socket);
  1925. module_init(tun_init);
  1926. module_exit(tun_cleanup);
  1927. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  1928. MODULE_AUTHOR(DRV_COPYRIGHT);
  1929. MODULE_LICENSE("GPL");
  1930. MODULE_ALIAS_MISCDEV(TUN_MINOR);
  1931. MODULE_ALIAS("devname:net/tun");