netpoll.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318
  1. /*
  2. * Common framework for low-level network console, dump, and debugger code
  3. *
  4. * Sep 8 2003 Matt Mackall <mpm@selenic.com>
  5. *
  6. * based on the netconsole code from:
  7. *
  8. * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
  9. * Copyright (C) 2002 Red Hat, Inc.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/moduleparam.h>
  13. #include <linux/kernel.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/string.h>
  17. #include <linux/if_arp.h>
  18. #include <linux/inetdevice.h>
  19. #include <linux/inet.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/netpoll.h>
  22. #include <linux/sched.h>
  23. #include <linux/delay.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/workqueue.h>
  26. #include <linux/slab.h>
  27. #include <linux/export.h>
  28. #include <linux/if_vlan.h>
  29. #include <net/tcp.h>
  30. #include <net/udp.h>
  31. #include <net/addrconf.h>
  32. #include <net/ndisc.h>
  33. #include <net/ip6_checksum.h>
  34. #include <asm/unaligned.h>
  35. #include <trace/events/napi.h>
  36. /*
  37. * We maintain a small pool of fully-sized skbs, to make sure the
  38. * message gets out even in extreme OOM situations.
  39. */
  40. #define MAX_UDP_CHUNK 1460
  41. #define MAX_SKBS 32
  42. static struct sk_buff_head skb_pool;
  43. static atomic_t trapped;
  44. DEFINE_STATIC_SRCU(netpoll_srcu);
  45. #define USEC_PER_POLL 50
  46. #define NETPOLL_RX_ENABLED 1
  47. #define NETPOLL_RX_DROP 2
  48. #define MAX_SKB_SIZE \
  49. (sizeof(struct ethhdr) + \
  50. sizeof(struct iphdr) + \
  51. sizeof(struct udphdr) + \
  52. MAX_UDP_CHUNK)
  53. static void zap_completion_queue(void);
  54. static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
  55. static void netpoll_async_cleanup(struct work_struct *work);
  56. static unsigned int carrier_timeout = 4;
  57. module_param(carrier_timeout, uint, 0644);
  58. #define np_info(np, fmt, ...) \
  59. pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
  60. #define np_err(np, fmt, ...) \
  61. pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
  62. #define np_notice(np, fmt, ...) \
  63. pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
  64. static void queue_process(struct work_struct *work)
  65. {
  66. struct netpoll_info *npinfo =
  67. container_of(work, struct netpoll_info, tx_work.work);
  68. struct sk_buff *skb;
  69. unsigned long flags;
  70. while ((skb = skb_dequeue(&npinfo->txq))) {
  71. struct net_device *dev = skb->dev;
  72. const struct net_device_ops *ops = dev->netdev_ops;
  73. struct netdev_queue *txq;
  74. if (!netif_device_present(dev) || !netif_running(dev)) {
  75. __kfree_skb(skb);
  76. continue;
  77. }
  78. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  79. local_irq_save(flags);
  80. __netif_tx_lock(txq, smp_processor_id());
  81. if (netif_xmit_frozen_or_stopped(txq) ||
  82. ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
  83. skb_queue_head(&npinfo->txq, skb);
  84. __netif_tx_unlock(txq);
  85. local_irq_restore(flags);
  86. schedule_delayed_work(&npinfo->tx_work, HZ/10);
  87. return;
  88. }
  89. __netif_tx_unlock(txq);
  90. local_irq_restore(flags);
  91. }
  92. }
  93. static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  94. unsigned short ulen, __be32 saddr, __be32 daddr)
  95. {
  96. __wsum psum;
  97. if (uh->check == 0 || skb_csum_unnecessary(skb))
  98. return 0;
  99. psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  100. if (skb->ip_summed == CHECKSUM_COMPLETE &&
  101. !csum_fold(csum_add(psum, skb->csum)))
  102. return 0;
  103. skb->csum = psum;
  104. return __skb_checksum_complete(skb);
  105. }
  106. /*
  107. * Check whether delayed processing was scheduled for our NIC. If so,
  108. * we attempt to grab the poll lock and use ->poll() to pump the card.
  109. * If this fails, either we've recursed in ->poll() or it's already
  110. * running on another CPU.
  111. *
  112. * Note: we don't mask interrupts with this lock because we're using
  113. * trylock here and interrupts are already disabled in the softirq
  114. * case. Further, we test the poll_owner to avoid recursion on UP
  115. * systems where the lock doesn't exist.
  116. *
  117. * In cases where there is bi-directional communications, reading only
  118. * one message at a time can lead to packets being dropped by the
  119. * network adapter, forcing superfluous retries and possibly timeouts.
  120. * Thus, we set our budget to greater than 1.
  121. */
  122. static int poll_one_napi(struct netpoll_info *npinfo,
  123. struct napi_struct *napi, int budget)
  124. {
  125. int work;
  126. /* net_rx_action's ->poll() invocations and our's are
  127. * synchronized by this test which is only made while
  128. * holding the napi->poll_lock.
  129. */
  130. if (!test_bit(NAPI_STATE_SCHED, &napi->state))
  131. return budget;
  132. npinfo->rx_flags |= NETPOLL_RX_DROP;
  133. atomic_inc(&trapped);
  134. set_bit(NAPI_STATE_NPSVC, &napi->state);
  135. work = napi->poll(napi, budget);
  136. trace_napi_poll(napi);
  137. clear_bit(NAPI_STATE_NPSVC, &napi->state);
  138. atomic_dec(&trapped);
  139. npinfo->rx_flags &= ~NETPOLL_RX_DROP;
  140. return budget - work;
  141. }
  142. static void poll_napi(struct net_device *dev)
  143. {
  144. struct napi_struct *napi;
  145. int budget = 16;
  146. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  147. if (napi->poll_owner != smp_processor_id() &&
  148. spin_trylock(&napi->poll_lock)) {
  149. budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
  150. napi, budget);
  151. spin_unlock(&napi->poll_lock);
  152. if (!budget)
  153. break;
  154. }
  155. }
  156. }
  157. static void service_neigh_queue(struct netpoll_info *npi)
  158. {
  159. if (npi) {
  160. struct sk_buff *skb;
  161. while ((skb = skb_dequeue(&npi->neigh_tx)))
  162. netpoll_neigh_reply(skb, npi);
  163. }
  164. }
  165. static void netpoll_poll_dev(struct net_device *dev)
  166. {
  167. const struct net_device_ops *ops;
  168. struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
  169. /* Don't do any rx activity if the dev_lock mutex is held
  170. * the dev_open/close paths use this to block netpoll activity
  171. * while changing device state
  172. */
  173. if (down_trylock(&ni->dev_lock))
  174. return;
  175. if (!netif_running(dev)) {
  176. up(&ni->dev_lock);
  177. return;
  178. }
  179. ops = dev->netdev_ops;
  180. if (!ops->ndo_poll_controller) {
  181. up(&ni->dev_lock);
  182. return;
  183. }
  184. /* Process pending work on NIC */
  185. ops->ndo_poll_controller(dev);
  186. poll_napi(dev);
  187. up(&ni->dev_lock);
  188. if (dev->flags & IFF_SLAVE) {
  189. if (ni) {
  190. struct net_device *bond_dev;
  191. struct sk_buff *skb;
  192. struct netpoll_info *bond_ni;
  193. bond_dev = netdev_master_upper_dev_get_rcu(dev);
  194. bond_ni = rcu_dereference_bh(bond_dev->npinfo);
  195. while ((skb = skb_dequeue(&ni->neigh_tx))) {
  196. skb->dev = bond_dev;
  197. skb_queue_tail(&bond_ni->neigh_tx, skb);
  198. }
  199. }
  200. }
  201. service_neigh_queue(ni);
  202. zap_completion_queue();
  203. }
  204. int netpoll_rx_disable(struct net_device *dev)
  205. {
  206. struct netpoll_info *ni;
  207. int idx;
  208. might_sleep();
  209. idx = srcu_read_lock(&netpoll_srcu);
  210. ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
  211. if (ni)
  212. down(&ni->dev_lock);
  213. srcu_read_unlock(&netpoll_srcu, idx);
  214. return 0;
  215. }
  216. EXPORT_SYMBOL(netpoll_rx_disable);
  217. void netpoll_rx_enable(struct net_device *dev)
  218. {
  219. struct netpoll_info *ni;
  220. rcu_read_lock();
  221. ni = rcu_dereference(dev->npinfo);
  222. if (ni)
  223. up(&ni->dev_lock);
  224. rcu_read_unlock();
  225. }
  226. EXPORT_SYMBOL(netpoll_rx_enable);
  227. static void refill_skbs(void)
  228. {
  229. struct sk_buff *skb;
  230. unsigned long flags;
  231. spin_lock_irqsave(&skb_pool.lock, flags);
  232. while (skb_pool.qlen < MAX_SKBS) {
  233. skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
  234. if (!skb)
  235. break;
  236. __skb_queue_tail(&skb_pool, skb);
  237. }
  238. spin_unlock_irqrestore(&skb_pool.lock, flags);
  239. }
  240. static void zap_completion_queue(void)
  241. {
  242. unsigned long flags;
  243. struct softnet_data *sd = &get_cpu_var(softnet_data);
  244. if (sd->completion_queue) {
  245. struct sk_buff *clist;
  246. local_irq_save(flags);
  247. clist = sd->completion_queue;
  248. sd->completion_queue = NULL;
  249. local_irq_restore(flags);
  250. while (clist != NULL) {
  251. struct sk_buff *skb = clist;
  252. clist = clist->next;
  253. if (skb->destructor) {
  254. atomic_inc(&skb->users);
  255. dev_kfree_skb_any(skb); /* put this one back */
  256. } else {
  257. __kfree_skb(skb);
  258. }
  259. }
  260. }
  261. put_cpu_var(softnet_data);
  262. }
  263. static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  264. {
  265. int count = 0;
  266. struct sk_buff *skb;
  267. zap_completion_queue();
  268. refill_skbs();
  269. repeat:
  270. skb = alloc_skb(len, GFP_ATOMIC);
  271. if (!skb)
  272. skb = skb_dequeue(&skb_pool);
  273. if (!skb) {
  274. if (++count < 10) {
  275. netpoll_poll_dev(np->dev);
  276. goto repeat;
  277. }
  278. return NULL;
  279. }
  280. atomic_set(&skb->users, 1);
  281. skb_reserve(skb, reserve);
  282. return skb;
  283. }
  284. static int netpoll_owner_active(struct net_device *dev)
  285. {
  286. struct napi_struct *napi;
  287. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  288. if (napi->poll_owner == smp_processor_id())
  289. return 1;
  290. }
  291. return 0;
  292. }
  293. /* call with IRQ disabled */
  294. void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
  295. struct net_device *dev)
  296. {
  297. int status = NETDEV_TX_BUSY;
  298. unsigned long tries;
  299. const struct net_device_ops *ops = dev->netdev_ops;
  300. /* It is up to the caller to keep npinfo alive. */
  301. struct netpoll_info *npinfo;
  302. WARN_ON_ONCE(!irqs_disabled());
  303. npinfo = rcu_dereference_bh(np->dev->npinfo);
  304. if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
  305. __kfree_skb(skb);
  306. return;
  307. }
  308. /* don't get messages out of order, and no recursion */
  309. if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
  310. struct netdev_queue *txq;
  311. txq = netdev_pick_tx(dev, skb);
  312. /* try until next clock tick */
  313. for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
  314. tries > 0; --tries) {
  315. if (__netif_tx_trylock(txq)) {
  316. if (!netif_xmit_stopped(txq)) {
  317. if (vlan_tx_tag_present(skb) &&
  318. !vlan_hw_offload_capable(netif_skb_features(skb),
  319. skb->vlan_proto)) {
  320. skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
  321. if (unlikely(!skb))
  322. break;
  323. skb->vlan_tci = 0;
  324. }
  325. status = ops->ndo_start_xmit(skb, dev);
  326. if (status == NETDEV_TX_OK)
  327. txq_trans_update(txq);
  328. }
  329. __netif_tx_unlock(txq);
  330. if (status == NETDEV_TX_OK)
  331. break;
  332. }
  333. /* tickle device maybe there is some cleanup */
  334. netpoll_poll_dev(np->dev);
  335. udelay(USEC_PER_POLL);
  336. }
  337. WARN_ONCE(!irqs_disabled(),
  338. "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
  339. dev->name, ops->ndo_start_xmit);
  340. }
  341. if (status != NETDEV_TX_OK) {
  342. skb_queue_tail(&npinfo->txq, skb);
  343. schedule_delayed_work(&npinfo->tx_work,0);
  344. }
  345. }
  346. EXPORT_SYMBOL(netpoll_send_skb_on_dev);
  347. void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
  348. {
  349. int total_len, ip_len, udp_len;
  350. struct sk_buff *skb;
  351. struct udphdr *udph;
  352. struct iphdr *iph;
  353. struct ethhdr *eth;
  354. static atomic_t ip_ident;
  355. struct ipv6hdr *ip6h;
  356. udp_len = len + sizeof(*udph);
  357. if (np->ipv6)
  358. ip_len = udp_len + sizeof(*ip6h);
  359. else
  360. ip_len = udp_len + sizeof(*iph);
  361. total_len = ip_len + LL_RESERVED_SPACE(np->dev);
  362. skb = find_skb(np, total_len + np->dev->needed_tailroom,
  363. total_len - len);
  364. if (!skb)
  365. return;
  366. skb_copy_to_linear_data(skb, msg, len);
  367. skb_put(skb, len);
  368. skb_push(skb, sizeof(*udph));
  369. skb_reset_transport_header(skb);
  370. udph = udp_hdr(skb);
  371. udph->source = htons(np->local_port);
  372. udph->dest = htons(np->remote_port);
  373. udph->len = htons(udp_len);
  374. if (np->ipv6) {
  375. udph->check = 0;
  376. udph->check = csum_ipv6_magic(&np->local_ip.in6,
  377. &np->remote_ip.in6,
  378. udp_len, IPPROTO_UDP,
  379. csum_partial(udph, udp_len, 0));
  380. if (udph->check == 0)
  381. udph->check = CSUM_MANGLED_0;
  382. skb_push(skb, sizeof(*ip6h));
  383. skb_reset_network_header(skb);
  384. ip6h = ipv6_hdr(skb);
  385. /* ip6h->version = 6; ip6h->priority = 0; */
  386. put_unaligned(0x60, (unsigned char *)ip6h);
  387. ip6h->flow_lbl[0] = 0;
  388. ip6h->flow_lbl[1] = 0;
  389. ip6h->flow_lbl[2] = 0;
  390. ip6h->payload_len = htons(sizeof(struct udphdr) + len);
  391. ip6h->nexthdr = IPPROTO_UDP;
  392. ip6h->hop_limit = 32;
  393. ip6h->saddr = np->local_ip.in6;
  394. ip6h->daddr = np->remote_ip.in6;
  395. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  396. skb_reset_mac_header(skb);
  397. skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
  398. } else {
  399. udph->check = 0;
  400. udph->check = csum_tcpudp_magic(np->local_ip.ip,
  401. np->remote_ip.ip,
  402. udp_len, IPPROTO_UDP,
  403. csum_partial(udph, udp_len, 0));
  404. if (udph->check == 0)
  405. udph->check = CSUM_MANGLED_0;
  406. skb_push(skb, sizeof(*iph));
  407. skb_reset_network_header(skb);
  408. iph = ip_hdr(skb);
  409. /* iph->version = 4; iph->ihl = 5; */
  410. put_unaligned(0x45, (unsigned char *)iph);
  411. iph->tos = 0;
  412. put_unaligned(htons(ip_len), &(iph->tot_len));
  413. iph->id = htons(atomic_inc_return(&ip_ident));
  414. iph->frag_off = 0;
  415. iph->ttl = 64;
  416. iph->protocol = IPPROTO_UDP;
  417. iph->check = 0;
  418. put_unaligned(np->local_ip.ip, &(iph->saddr));
  419. put_unaligned(np->remote_ip.ip, &(iph->daddr));
  420. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  421. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  422. skb_reset_mac_header(skb);
  423. skb->protocol = eth->h_proto = htons(ETH_P_IP);
  424. }
  425. memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
  426. memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
  427. skb->dev = np->dev;
  428. netpoll_send_skb(np, skb);
  429. }
  430. EXPORT_SYMBOL(netpoll_send_udp);
  431. static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
  432. {
  433. int size, type = ARPOP_REPLY;
  434. __be32 sip, tip;
  435. unsigned char *sha;
  436. struct sk_buff *send_skb;
  437. struct netpoll *np, *tmp;
  438. unsigned long flags;
  439. int hlen, tlen;
  440. int hits = 0, proto;
  441. if (list_empty(&npinfo->rx_np))
  442. return;
  443. /* Before checking the packet, we do some early
  444. inspection whether this is interesting at all */
  445. spin_lock_irqsave(&npinfo->rx_lock, flags);
  446. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  447. if (np->dev == skb->dev)
  448. hits++;
  449. }
  450. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  451. /* No netpoll struct is using this dev */
  452. if (!hits)
  453. return;
  454. proto = ntohs(eth_hdr(skb)->h_proto);
  455. if (proto == ETH_P_IP) {
  456. struct arphdr *arp;
  457. unsigned char *arp_ptr;
  458. /* No arp on this interface */
  459. if (skb->dev->flags & IFF_NOARP)
  460. return;
  461. if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
  462. return;
  463. skb_reset_network_header(skb);
  464. skb_reset_transport_header(skb);
  465. arp = arp_hdr(skb);
  466. if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
  467. arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
  468. arp->ar_pro != htons(ETH_P_IP) ||
  469. arp->ar_op != htons(ARPOP_REQUEST))
  470. return;
  471. arp_ptr = (unsigned char *)(arp+1);
  472. /* save the location of the src hw addr */
  473. sha = arp_ptr;
  474. arp_ptr += skb->dev->addr_len;
  475. memcpy(&sip, arp_ptr, 4);
  476. arp_ptr += 4;
  477. /* If we actually cared about dst hw addr,
  478. it would get copied here */
  479. arp_ptr += skb->dev->addr_len;
  480. memcpy(&tip, arp_ptr, 4);
  481. /* Should we ignore arp? */
  482. if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
  483. return;
  484. size = arp_hdr_len(skb->dev);
  485. spin_lock_irqsave(&npinfo->rx_lock, flags);
  486. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  487. if (tip != np->local_ip.ip)
  488. continue;
  489. hlen = LL_RESERVED_SPACE(np->dev);
  490. tlen = np->dev->needed_tailroom;
  491. send_skb = find_skb(np, size + hlen + tlen, hlen);
  492. if (!send_skb)
  493. continue;
  494. skb_reset_network_header(send_skb);
  495. arp = (struct arphdr *) skb_put(send_skb, size);
  496. send_skb->dev = skb->dev;
  497. send_skb->protocol = htons(ETH_P_ARP);
  498. /* Fill the device header for the ARP frame */
  499. if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
  500. sha, np->dev->dev_addr,
  501. send_skb->len) < 0) {
  502. kfree_skb(send_skb);
  503. continue;
  504. }
  505. /*
  506. * Fill out the arp protocol part.
  507. *
  508. * we only support ethernet device type,
  509. * which (according to RFC 1390) should
  510. * always equal 1 (Ethernet).
  511. */
  512. arp->ar_hrd = htons(np->dev->type);
  513. arp->ar_pro = htons(ETH_P_IP);
  514. arp->ar_hln = np->dev->addr_len;
  515. arp->ar_pln = 4;
  516. arp->ar_op = htons(type);
  517. arp_ptr = (unsigned char *)(arp + 1);
  518. memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
  519. arp_ptr += np->dev->addr_len;
  520. memcpy(arp_ptr, &tip, 4);
  521. arp_ptr += 4;
  522. memcpy(arp_ptr, sha, np->dev->addr_len);
  523. arp_ptr += np->dev->addr_len;
  524. memcpy(arp_ptr, &sip, 4);
  525. netpoll_send_skb(np, send_skb);
  526. /* If there are several rx_hooks for the same address,
  527. we're fine by sending a single reply */
  528. break;
  529. }
  530. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  531. } else if( proto == ETH_P_IPV6) {
  532. #if IS_ENABLED(CONFIG_IPV6)
  533. struct nd_msg *msg;
  534. u8 *lladdr = NULL;
  535. struct ipv6hdr *hdr;
  536. struct icmp6hdr *icmp6h;
  537. const struct in6_addr *saddr;
  538. const struct in6_addr *daddr;
  539. struct inet6_dev *in6_dev = NULL;
  540. struct in6_addr *target;
  541. in6_dev = in6_dev_get(skb->dev);
  542. if (!in6_dev || !in6_dev->cnf.accept_ra)
  543. return;
  544. if (!pskb_may_pull(skb, skb->len))
  545. return;
  546. msg = (struct nd_msg *)skb_transport_header(skb);
  547. __skb_push(skb, skb->data - skb_transport_header(skb));
  548. if (ipv6_hdr(skb)->hop_limit != 255)
  549. return;
  550. if (msg->icmph.icmp6_code != 0)
  551. return;
  552. if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
  553. return;
  554. saddr = &ipv6_hdr(skb)->saddr;
  555. daddr = &ipv6_hdr(skb)->daddr;
  556. size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
  557. spin_lock_irqsave(&npinfo->rx_lock, flags);
  558. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  559. if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
  560. continue;
  561. hlen = LL_RESERVED_SPACE(np->dev);
  562. tlen = np->dev->needed_tailroom;
  563. send_skb = find_skb(np, size + hlen + tlen, hlen);
  564. if (!send_skb)
  565. continue;
  566. send_skb->protocol = htons(ETH_P_IPV6);
  567. send_skb->dev = skb->dev;
  568. skb_reset_network_header(send_skb);
  569. skb_put(send_skb, sizeof(struct ipv6hdr));
  570. hdr = ipv6_hdr(send_skb);
  571. *(__be32*)hdr = htonl(0x60000000);
  572. hdr->payload_len = htons(size);
  573. hdr->nexthdr = IPPROTO_ICMPV6;
  574. hdr->hop_limit = 255;
  575. hdr->saddr = *saddr;
  576. hdr->daddr = *daddr;
  577. send_skb->transport_header = send_skb->tail;
  578. skb_put(send_skb, size);
  579. icmp6h = (struct icmp6hdr *)skb_transport_header(skb);
  580. icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
  581. icmp6h->icmp6_router = 0;
  582. icmp6h->icmp6_solicited = 1;
  583. target = (struct in6_addr *)(skb_transport_header(send_skb) + sizeof(struct icmp6hdr));
  584. *target = msg->target;
  585. icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
  586. IPPROTO_ICMPV6,
  587. csum_partial(icmp6h,
  588. size, 0));
  589. if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
  590. lladdr, np->dev->dev_addr,
  591. send_skb->len) < 0) {
  592. kfree_skb(send_skb);
  593. continue;
  594. }
  595. netpoll_send_skb(np, send_skb);
  596. /* If there are several rx_hooks for the same address,
  597. we're fine by sending a single reply */
  598. break;
  599. }
  600. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  601. #endif
  602. }
  603. }
  604. static bool pkt_is_ns(struct sk_buff *skb)
  605. {
  606. struct nd_msg *msg;
  607. struct ipv6hdr *hdr;
  608. if (skb->protocol != htons(ETH_P_ARP))
  609. return false;
  610. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
  611. return false;
  612. msg = (struct nd_msg *)skb_transport_header(skb);
  613. __skb_push(skb, skb->data - skb_transport_header(skb));
  614. hdr = ipv6_hdr(skb);
  615. if (hdr->nexthdr != IPPROTO_ICMPV6)
  616. return false;
  617. if (hdr->hop_limit != 255)
  618. return false;
  619. if (msg->icmph.icmp6_code != 0)
  620. return false;
  621. if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
  622. return false;
  623. return true;
  624. }
  625. int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
  626. {
  627. int proto, len, ulen;
  628. int hits = 0;
  629. const struct iphdr *iph;
  630. struct udphdr *uh;
  631. struct netpoll *np, *tmp;
  632. if (list_empty(&npinfo->rx_np))
  633. goto out;
  634. if (skb->dev->type != ARPHRD_ETHER)
  635. goto out;
  636. /* check if netpoll clients need ARP */
  637. if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) {
  638. skb_queue_tail(&npinfo->neigh_tx, skb);
  639. return 1;
  640. } else if (pkt_is_ns(skb) && atomic_read(&trapped)) {
  641. skb_queue_tail(&npinfo->neigh_tx, skb);
  642. return 1;
  643. }
  644. if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
  645. skb = vlan_untag(skb);
  646. if (unlikely(!skb))
  647. goto out;
  648. }
  649. proto = ntohs(eth_hdr(skb)->h_proto);
  650. if (proto != ETH_P_IP && proto != ETH_P_IPV6)
  651. goto out;
  652. if (skb->pkt_type == PACKET_OTHERHOST)
  653. goto out;
  654. if (skb_shared(skb))
  655. goto out;
  656. if (proto == ETH_P_IP) {
  657. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  658. goto out;
  659. iph = (struct iphdr *)skb->data;
  660. if (iph->ihl < 5 || iph->version != 4)
  661. goto out;
  662. if (!pskb_may_pull(skb, iph->ihl*4))
  663. goto out;
  664. iph = (struct iphdr *)skb->data;
  665. if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
  666. goto out;
  667. len = ntohs(iph->tot_len);
  668. if (skb->len < len || len < iph->ihl*4)
  669. goto out;
  670. /*
  671. * Our transport medium may have padded the buffer out.
  672. * Now We trim to the true length of the frame.
  673. */
  674. if (pskb_trim_rcsum(skb, len))
  675. goto out;
  676. iph = (struct iphdr *)skb->data;
  677. if (iph->protocol != IPPROTO_UDP)
  678. goto out;
  679. len -= iph->ihl*4;
  680. uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
  681. ulen = ntohs(uh->len);
  682. if (ulen != len)
  683. goto out;
  684. if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
  685. goto out;
  686. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  687. if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
  688. continue;
  689. if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
  690. continue;
  691. if (np->local_port && np->local_port != ntohs(uh->dest))
  692. continue;
  693. np->rx_hook(np, ntohs(uh->source),
  694. (char *)(uh+1),
  695. ulen - sizeof(struct udphdr));
  696. hits++;
  697. }
  698. } else {
  699. #if IS_ENABLED(CONFIG_IPV6)
  700. const struct ipv6hdr *ip6h;
  701. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
  702. goto out;
  703. ip6h = (struct ipv6hdr *)skb->data;
  704. if (ip6h->version != 6)
  705. goto out;
  706. len = ntohs(ip6h->payload_len);
  707. if (!len)
  708. goto out;
  709. if (len + sizeof(struct ipv6hdr) > skb->len)
  710. goto out;
  711. if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
  712. goto out;
  713. ip6h = ipv6_hdr(skb);
  714. if (!pskb_may_pull(skb, sizeof(struct udphdr)))
  715. goto out;
  716. uh = udp_hdr(skb);
  717. ulen = ntohs(uh->len);
  718. if (ulen != skb->len)
  719. goto out;
  720. if (udp6_csum_init(skb, uh, IPPROTO_UDP))
  721. goto out;
  722. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  723. if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
  724. continue;
  725. if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
  726. continue;
  727. if (np->local_port && np->local_port != ntohs(uh->dest))
  728. continue;
  729. np->rx_hook(np, ntohs(uh->source),
  730. (char *)(uh+1),
  731. ulen - sizeof(struct udphdr));
  732. hits++;
  733. }
  734. #endif
  735. }
  736. if (!hits)
  737. goto out;
  738. kfree_skb(skb);
  739. return 1;
  740. out:
  741. if (atomic_read(&trapped)) {
  742. kfree_skb(skb);
  743. return 1;
  744. }
  745. return 0;
  746. }
  747. void netpoll_print_options(struct netpoll *np)
  748. {
  749. np_info(np, "local port %d\n", np->local_port);
  750. if (np->ipv6)
  751. np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
  752. else
  753. np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
  754. np_info(np, "interface '%s'\n", np->dev_name);
  755. np_info(np, "remote port %d\n", np->remote_port);
  756. if (np->ipv6)
  757. np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
  758. else
  759. np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
  760. np_info(np, "remote ethernet address %pM\n", np->remote_mac);
  761. }
  762. EXPORT_SYMBOL(netpoll_print_options);
  763. static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
  764. {
  765. const char *end;
  766. if (!strchr(str, ':') &&
  767. in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
  768. if (!*end)
  769. return 0;
  770. }
  771. if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
  772. #if IS_ENABLED(CONFIG_IPV6)
  773. if (!*end)
  774. return 1;
  775. #else
  776. return -1;
  777. #endif
  778. }
  779. return -1;
  780. }
  781. int netpoll_parse_options(struct netpoll *np, char *opt)
  782. {
  783. char *cur=opt, *delim;
  784. int ipv6;
  785. if (*cur != '@') {
  786. if ((delim = strchr(cur, '@')) == NULL)
  787. goto parse_failed;
  788. *delim = 0;
  789. if (kstrtou16(cur, 10, &np->local_port))
  790. goto parse_failed;
  791. cur = delim;
  792. }
  793. cur++;
  794. if (*cur != '/') {
  795. if ((delim = strchr(cur, '/')) == NULL)
  796. goto parse_failed;
  797. *delim = 0;
  798. ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
  799. if (ipv6 < 0)
  800. goto parse_failed;
  801. else
  802. np->ipv6 = (bool)ipv6;
  803. cur = delim;
  804. }
  805. cur++;
  806. if (*cur != ',') {
  807. /* parse out dev name */
  808. if ((delim = strchr(cur, ',')) == NULL)
  809. goto parse_failed;
  810. *delim = 0;
  811. strlcpy(np->dev_name, cur, sizeof(np->dev_name));
  812. cur = delim;
  813. }
  814. cur++;
  815. if (*cur != '@') {
  816. /* dst port */
  817. if ((delim = strchr(cur, '@')) == NULL)
  818. goto parse_failed;
  819. *delim = 0;
  820. if (*cur == ' ' || *cur == '\t')
  821. np_info(np, "warning: whitespace is not allowed\n");
  822. if (kstrtou16(cur, 10, &np->remote_port))
  823. goto parse_failed;
  824. cur = delim;
  825. }
  826. cur++;
  827. /* dst ip */
  828. if ((delim = strchr(cur, '/')) == NULL)
  829. goto parse_failed;
  830. *delim = 0;
  831. ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
  832. if (ipv6 < 0)
  833. goto parse_failed;
  834. else if (np->ipv6 != (bool)ipv6)
  835. goto parse_failed;
  836. else
  837. np->ipv6 = (bool)ipv6;
  838. cur = delim + 1;
  839. if (*cur != 0) {
  840. /* MAC address */
  841. if (!mac_pton(cur, np->remote_mac))
  842. goto parse_failed;
  843. }
  844. netpoll_print_options(np);
  845. return 0;
  846. parse_failed:
  847. np_info(np, "couldn't parse config at '%s'!\n", cur);
  848. return -1;
  849. }
  850. EXPORT_SYMBOL(netpoll_parse_options);
  851. int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
  852. {
  853. struct netpoll_info *npinfo;
  854. const struct net_device_ops *ops;
  855. unsigned long flags;
  856. int err;
  857. np->dev = ndev;
  858. strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
  859. INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
  860. if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
  861. !ndev->netdev_ops->ndo_poll_controller) {
  862. np_err(np, "%s doesn't support polling, aborting\n",
  863. np->dev_name);
  864. err = -ENOTSUPP;
  865. goto out;
  866. }
  867. if (!ndev->npinfo) {
  868. npinfo = kmalloc(sizeof(*npinfo), gfp);
  869. if (!npinfo) {
  870. err = -ENOMEM;
  871. goto out;
  872. }
  873. npinfo->rx_flags = 0;
  874. INIT_LIST_HEAD(&npinfo->rx_np);
  875. spin_lock_init(&npinfo->rx_lock);
  876. sema_init(&npinfo->dev_lock, 1);
  877. skb_queue_head_init(&npinfo->neigh_tx);
  878. skb_queue_head_init(&npinfo->txq);
  879. INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
  880. atomic_set(&npinfo->refcnt, 1);
  881. ops = np->dev->netdev_ops;
  882. if (ops->ndo_netpoll_setup) {
  883. err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
  884. if (err)
  885. goto free_npinfo;
  886. }
  887. } else {
  888. npinfo = rtnl_dereference(ndev->npinfo);
  889. atomic_inc(&npinfo->refcnt);
  890. }
  891. npinfo->netpoll = np;
  892. if (np->rx_hook) {
  893. spin_lock_irqsave(&npinfo->rx_lock, flags);
  894. npinfo->rx_flags |= NETPOLL_RX_ENABLED;
  895. list_add_tail(&np->rx, &npinfo->rx_np);
  896. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  897. }
  898. /* last thing to do is link it to the net device structure */
  899. rcu_assign_pointer(ndev->npinfo, npinfo);
  900. return 0;
  901. free_npinfo:
  902. kfree(npinfo);
  903. out:
  904. return err;
  905. }
  906. EXPORT_SYMBOL_GPL(__netpoll_setup);
  907. int netpoll_setup(struct netpoll *np)
  908. {
  909. struct net_device *ndev = NULL;
  910. struct in_device *in_dev;
  911. int err;
  912. rtnl_lock();
  913. if (np->dev_name) {
  914. struct net *net = current->nsproxy->net_ns;
  915. ndev = __dev_get_by_name(net, np->dev_name);
  916. }
  917. if (!ndev) {
  918. np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
  919. err = -ENODEV;
  920. goto unlock;
  921. }
  922. dev_hold(ndev);
  923. if (netdev_master_upper_dev_get(ndev)) {
  924. np_err(np, "%s is a slave device, aborting\n", np->dev_name);
  925. err = -EBUSY;
  926. goto put;
  927. }
  928. if (!netif_running(ndev)) {
  929. unsigned long atmost, atleast;
  930. np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
  931. err = dev_open(ndev);
  932. if (err) {
  933. np_err(np, "failed to open %s\n", ndev->name);
  934. goto put;
  935. }
  936. rtnl_unlock();
  937. atleast = jiffies + HZ/10;
  938. atmost = jiffies + carrier_timeout * HZ;
  939. while (!netif_carrier_ok(ndev)) {
  940. if (time_after(jiffies, atmost)) {
  941. np_notice(np, "timeout waiting for carrier\n");
  942. break;
  943. }
  944. msleep(1);
  945. }
  946. /* If carrier appears to come up instantly, we don't
  947. * trust it and pause so that we don't pump all our
  948. * queued console messages into the bitbucket.
  949. */
  950. if (time_before(jiffies, atleast)) {
  951. np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
  952. msleep(4000);
  953. }
  954. rtnl_lock();
  955. }
  956. if (!np->local_ip.ip) {
  957. if (!np->ipv6) {
  958. in_dev = __in_dev_get_rtnl(ndev);
  959. if (!in_dev || !in_dev->ifa_list) {
  960. np_err(np, "no IP address for %s, aborting\n",
  961. np->dev_name);
  962. err = -EDESTADDRREQ;
  963. goto put;
  964. }
  965. np->local_ip.ip = in_dev->ifa_list->ifa_local;
  966. np_info(np, "local IP %pI4\n", &np->local_ip.ip);
  967. } else {
  968. #if IS_ENABLED(CONFIG_IPV6)
  969. struct inet6_dev *idev;
  970. err = -EDESTADDRREQ;
  971. idev = __in6_dev_get(ndev);
  972. if (idev) {
  973. struct inet6_ifaddr *ifp;
  974. read_lock_bh(&idev->lock);
  975. list_for_each_entry(ifp, &idev->addr_list, if_list) {
  976. if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
  977. continue;
  978. np->local_ip.in6 = ifp->addr;
  979. err = 0;
  980. break;
  981. }
  982. read_unlock_bh(&idev->lock);
  983. }
  984. if (err) {
  985. np_err(np, "no IPv6 address for %s, aborting\n",
  986. np->dev_name);
  987. goto put;
  988. } else
  989. np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
  990. #else
  991. np_err(np, "IPv6 is not supported %s, aborting\n",
  992. np->dev_name);
  993. err = -EINVAL;
  994. goto put;
  995. #endif
  996. }
  997. }
  998. /* fill up the skb queue */
  999. refill_skbs();
  1000. err = __netpoll_setup(np, ndev, GFP_KERNEL);
  1001. if (err)
  1002. goto put;
  1003. rtnl_unlock();
  1004. return 0;
  1005. put:
  1006. dev_put(ndev);
  1007. unlock:
  1008. rtnl_unlock();
  1009. return err;
  1010. }
  1011. EXPORT_SYMBOL(netpoll_setup);
  1012. static int __init netpoll_init(void)
  1013. {
  1014. skb_queue_head_init(&skb_pool);
  1015. return 0;
  1016. }
  1017. core_initcall(netpoll_init);
  1018. static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
  1019. {
  1020. struct netpoll_info *npinfo =
  1021. container_of(rcu_head, struct netpoll_info, rcu);
  1022. skb_queue_purge(&npinfo->neigh_tx);
  1023. skb_queue_purge(&npinfo->txq);
  1024. /* we can't call cancel_delayed_work_sync here, as we are in softirq */
  1025. cancel_delayed_work(&npinfo->tx_work);
  1026. /* clean after last, unfinished work */
  1027. __skb_queue_purge(&npinfo->txq);
  1028. /* now cancel it again */
  1029. cancel_delayed_work(&npinfo->tx_work);
  1030. kfree(npinfo);
  1031. }
  1032. void __netpoll_cleanup(struct netpoll *np)
  1033. {
  1034. struct netpoll_info *npinfo;
  1035. unsigned long flags;
  1036. /* rtnl_dereference would be preferable here but
  1037. * rcu_cleanup_netpoll path can put us in here safely without
  1038. * holding the rtnl, so plain rcu_dereference it is
  1039. */
  1040. npinfo = rtnl_dereference(np->dev->npinfo);
  1041. if (!npinfo)
  1042. return;
  1043. if (!list_empty(&npinfo->rx_np)) {
  1044. spin_lock_irqsave(&npinfo->rx_lock, flags);
  1045. list_del(&np->rx);
  1046. if (list_empty(&npinfo->rx_np))
  1047. npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
  1048. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  1049. }
  1050. synchronize_srcu(&netpoll_srcu);
  1051. if (atomic_dec_and_test(&npinfo->refcnt)) {
  1052. const struct net_device_ops *ops;
  1053. ops = np->dev->netdev_ops;
  1054. if (ops->ndo_netpoll_cleanup)
  1055. ops->ndo_netpoll_cleanup(np->dev);
  1056. rcu_assign_pointer(np->dev->npinfo, NULL);
  1057. call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
  1058. }
  1059. }
  1060. EXPORT_SYMBOL_GPL(__netpoll_cleanup);
  1061. static void netpoll_async_cleanup(struct work_struct *work)
  1062. {
  1063. struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
  1064. rtnl_lock();
  1065. __netpoll_cleanup(np);
  1066. rtnl_unlock();
  1067. kfree(np);
  1068. }
  1069. void __netpoll_free_async(struct netpoll *np)
  1070. {
  1071. schedule_work(&np->cleanup_work);
  1072. }
  1073. EXPORT_SYMBOL_GPL(__netpoll_free_async);
  1074. void netpoll_cleanup(struct netpoll *np)
  1075. {
  1076. if (!np->dev)
  1077. return;
  1078. rtnl_lock();
  1079. __netpoll_cleanup(np);
  1080. rtnl_unlock();
  1081. dev_put(np->dev);
  1082. np->dev = NULL;
  1083. }
  1084. EXPORT_SYMBOL(netpoll_cleanup);
  1085. int netpoll_trap(void)
  1086. {
  1087. return atomic_read(&trapped);
  1088. }
  1089. EXPORT_SYMBOL(netpoll_trap);
  1090. void netpoll_set_trap(int trap)
  1091. {
  1092. if (trap)
  1093. atomic_inc(&trapped);
  1094. else
  1095. atomic_dec(&trapped);
  1096. }
  1097. EXPORT_SYMBOL(netpoll_set_trap);