netpoll.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. /*
  2. * Common framework for low-level network console, dump, and debugger code
  3. *
  4. * Sep 8 2003 Matt Mackall <mpm@selenic.com>
  5. *
  6. * based on the netconsole code from:
  7. *
  8. * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
  9. * Copyright (C) 2002 Red Hat, Inc.
  10. */
  11. #include <linux/moduleparam.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/etherdevice.h>
  14. #include <linux/string.h>
  15. #include <linux/if_arp.h>
  16. #include <linux/inetdevice.h>
  17. #include <linux/inet.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/netpoll.h>
  20. #include <linux/sched.h>
  21. #include <linux/delay.h>
  22. #include <linux/rcupdate.h>
  23. #include <linux/workqueue.h>
  24. #include <linux/slab.h>
  25. #include <net/tcp.h>
  26. #include <net/udp.h>
  27. #include <asm/unaligned.h>
  28. #include <trace/events/napi.h>
  29. /*
  30. * We maintain a small pool of fully-sized skbs, to make sure the
  31. * message gets out even in extreme OOM situations.
  32. */
  33. #define MAX_UDP_CHUNK 1460
  34. #define MAX_SKBS 32
  35. #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
  36. static struct sk_buff_head skb_pool;
  37. static atomic_t trapped;
  38. #define USEC_PER_POLL 50
  39. #define NETPOLL_RX_ENABLED 1
  40. #define NETPOLL_RX_DROP 2
  41. #define MAX_SKB_SIZE \
  42. (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
  43. sizeof(struct iphdr) + sizeof(struct ethhdr))
  44. static void zap_completion_queue(void);
  45. static void arp_reply(struct sk_buff *skb);
  46. static unsigned int carrier_timeout = 4;
  47. module_param(carrier_timeout, uint, 0644);
  48. static void queue_process(struct work_struct *work)
  49. {
  50. struct netpoll_info *npinfo =
  51. container_of(work, struct netpoll_info, tx_work.work);
  52. struct sk_buff *skb;
  53. unsigned long flags;
  54. while ((skb = skb_dequeue(&npinfo->txq))) {
  55. struct net_device *dev = skb->dev;
  56. const struct net_device_ops *ops = dev->netdev_ops;
  57. struct netdev_queue *txq;
  58. if (!netif_device_present(dev) || !netif_running(dev)) {
  59. __kfree_skb(skb);
  60. continue;
  61. }
  62. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  63. local_irq_save(flags);
  64. __netif_tx_lock(txq, smp_processor_id());
  65. if (netif_tx_queue_stopped(txq) ||
  66. netif_tx_queue_frozen(txq) ||
  67. ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
  68. skb_queue_head(&npinfo->txq, skb);
  69. __netif_tx_unlock(txq);
  70. local_irq_restore(flags);
  71. schedule_delayed_work(&npinfo->tx_work, HZ/10);
  72. return;
  73. }
  74. __netif_tx_unlock(txq);
  75. local_irq_restore(flags);
  76. }
  77. }
  78. static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  79. unsigned short ulen, __be32 saddr, __be32 daddr)
  80. {
  81. __wsum psum;
  82. if (uh->check == 0 || skb_csum_unnecessary(skb))
  83. return 0;
  84. psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  85. if (skb->ip_summed == CHECKSUM_COMPLETE &&
  86. !csum_fold(csum_add(psum, skb->csum)))
  87. return 0;
  88. skb->csum = psum;
  89. return __skb_checksum_complete(skb);
  90. }
  91. /*
  92. * Check whether delayed processing was scheduled for our NIC. If so,
  93. * we attempt to grab the poll lock and use ->poll() to pump the card.
  94. * If this fails, either we've recursed in ->poll() or it's already
  95. * running on another CPU.
  96. *
  97. * Note: we don't mask interrupts with this lock because we're using
  98. * trylock here and interrupts are already disabled in the softirq
  99. * case. Further, we test the poll_owner to avoid recursion on UP
  100. * systems where the lock doesn't exist.
  101. *
  102. * In cases where there is bi-directional communications, reading only
  103. * one message at a time can lead to packets being dropped by the
  104. * network adapter, forcing superfluous retries and possibly timeouts.
  105. * Thus, we set our budget to greater than 1.
  106. */
  107. static int poll_one_napi(struct netpoll_info *npinfo,
  108. struct napi_struct *napi, int budget)
  109. {
  110. int work;
  111. /* net_rx_action's ->poll() invocations and our's are
  112. * synchronized by this test which is only made while
  113. * holding the napi->poll_lock.
  114. */
  115. if (!test_bit(NAPI_STATE_SCHED, &napi->state))
  116. return budget;
  117. npinfo->rx_flags |= NETPOLL_RX_DROP;
  118. atomic_inc(&trapped);
  119. set_bit(NAPI_STATE_NPSVC, &napi->state);
  120. work = napi->poll(napi, budget);
  121. trace_napi_poll(napi);
  122. clear_bit(NAPI_STATE_NPSVC, &napi->state);
  123. atomic_dec(&trapped);
  124. npinfo->rx_flags &= ~NETPOLL_RX_DROP;
  125. return budget - work;
  126. }
  127. static void poll_napi(struct net_device *dev)
  128. {
  129. struct napi_struct *napi;
  130. int budget = 16;
  131. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  132. if (napi->poll_owner != smp_processor_id() &&
  133. spin_trylock(&napi->poll_lock)) {
  134. budget = poll_one_napi(dev->npinfo, napi, budget);
  135. spin_unlock(&napi->poll_lock);
  136. if (!budget)
  137. break;
  138. }
  139. }
  140. }
  141. static void service_arp_queue(struct netpoll_info *npi)
  142. {
  143. if (npi) {
  144. struct sk_buff *skb;
  145. while ((skb = skb_dequeue(&npi->arp_tx)))
  146. arp_reply(skb);
  147. }
  148. }
  149. void netpoll_poll_dev(struct net_device *dev)
  150. {
  151. const struct net_device_ops *ops;
  152. if (!dev || !netif_running(dev))
  153. return;
  154. ops = dev->netdev_ops;
  155. if (!ops->ndo_poll_controller)
  156. return;
  157. /* Process pending work on NIC */
  158. ops->ndo_poll_controller(dev);
  159. poll_napi(dev);
  160. service_arp_queue(dev->npinfo);
  161. zap_completion_queue();
  162. }
  163. void netpoll_poll(struct netpoll *np)
  164. {
  165. netpoll_poll_dev(np->dev);
  166. }
  167. static void refill_skbs(void)
  168. {
  169. struct sk_buff *skb;
  170. unsigned long flags;
  171. spin_lock_irqsave(&skb_pool.lock, flags);
  172. while (skb_pool.qlen < MAX_SKBS) {
  173. skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
  174. if (!skb)
  175. break;
  176. __skb_queue_tail(&skb_pool, skb);
  177. }
  178. spin_unlock_irqrestore(&skb_pool.lock, flags);
  179. }
  180. static void zap_completion_queue(void)
  181. {
  182. unsigned long flags;
  183. struct softnet_data *sd = &get_cpu_var(softnet_data);
  184. if (sd->completion_queue) {
  185. struct sk_buff *clist;
  186. local_irq_save(flags);
  187. clist = sd->completion_queue;
  188. sd->completion_queue = NULL;
  189. local_irq_restore(flags);
  190. while (clist != NULL) {
  191. struct sk_buff *skb = clist;
  192. clist = clist->next;
  193. if (skb->destructor) {
  194. atomic_inc(&skb->users);
  195. dev_kfree_skb_any(skb); /* put this one back */
  196. } else {
  197. __kfree_skb(skb);
  198. }
  199. }
  200. }
  201. put_cpu_var(softnet_data);
  202. }
  203. static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  204. {
  205. int count = 0;
  206. struct sk_buff *skb;
  207. zap_completion_queue();
  208. refill_skbs();
  209. repeat:
  210. skb = alloc_skb(len, GFP_ATOMIC);
  211. if (!skb)
  212. skb = skb_dequeue(&skb_pool);
  213. if (!skb) {
  214. if (++count < 10) {
  215. netpoll_poll(np);
  216. goto repeat;
  217. }
  218. return NULL;
  219. }
  220. atomic_set(&skb->users, 1);
  221. skb_reserve(skb, reserve);
  222. return skb;
  223. }
  224. static int netpoll_owner_active(struct net_device *dev)
  225. {
  226. struct napi_struct *napi;
  227. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  228. if (napi->poll_owner == smp_processor_id())
  229. return 1;
  230. }
  231. return 0;
  232. }
  233. void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
  234. {
  235. int status = NETDEV_TX_BUSY;
  236. unsigned long tries;
  237. struct net_device *dev = np->dev;
  238. const struct net_device_ops *ops = dev->netdev_ops;
  239. struct netpoll_info *npinfo = np->dev->npinfo;
  240. if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
  241. __kfree_skb(skb);
  242. return;
  243. }
  244. /* don't get messages out of order, and no recursion */
  245. if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
  246. struct netdev_queue *txq;
  247. unsigned long flags;
  248. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  249. local_irq_save(flags);
  250. /* try until next clock tick */
  251. for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
  252. tries > 0; --tries) {
  253. if (__netif_tx_trylock(txq)) {
  254. if (!netif_tx_queue_stopped(txq)) {
  255. dev->priv_flags |= IFF_IN_NETPOLL;
  256. status = ops->ndo_start_xmit(skb, dev);
  257. dev->priv_flags &= ~IFF_IN_NETPOLL;
  258. if (status == NETDEV_TX_OK)
  259. txq_trans_update(txq);
  260. }
  261. __netif_tx_unlock(txq);
  262. if (status == NETDEV_TX_OK)
  263. break;
  264. }
  265. /* tickle device maybe there is some cleanup */
  266. netpoll_poll(np);
  267. udelay(USEC_PER_POLL);
  268. }
  269. WARN_ONCE(!irqs_disabled(),
  270. "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
  271. dev->name, ops->ndo_start_xmit);
  272. local_irq_restore(flags);
  273. }
  274. if (status != NETDEV_TX_OK) {
  275. skb_queue_tail(&npinfo->txq, skb);
  276. schedule_delayed_work(&npinfo->tx_work,0);
  277. }
  278. }
  279. void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
  280. {
  281. int total_len, eth_len, ip_len, udp_len;
  282. struct sk_buff *skb;
  283. struct udphdr *udph;
  284. struct iphdr *iph;
  285. struct ethhdr *eth;
  286. udp_len = len + sizeof(*udph);
  287. ip_len = eth_len = udp_len + sizeof(*iph);
  288. total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
  289. skb = find_skb(np, total_len, total_len - len);
  290. if (!skb)
  291. return;
  292. skb_copy_to_linear_data(skb, msg, len);
  293. skb->len += len;
  294. skb_push(skb, sizeof(*udph));
  295. skb_reset_transport_header(skb);
  296. udph = udp_hdr(skb);
  297. udph->source = htons(np->local_port);
  298. udph->dest = htons(np->remote_port);
  299. udph->len = htons(udp_len);
  300. udph->check = 0;
  301. udph->check = csum_tcpudp_magic(np->local_ip,
  302. np->remote_ip,
  303. udp_len, IPPROTO_UDP,
  304. csum_partial(udph, udp_len, 0));
  305. if (udph->check == 0)
  306. udph->check = CSUM_MANGLED_0;
  307. skb_push(skb, sizeof(*iph));
  308. skb_reset_network_header(skb);
  309. iph = ip_hdr(skb);
  310. /* iph->version = 4; iph->ihl = 5; */
  311. put_unaligned(0x45, (unsigned char *)iph);
  312. iph->tos = 0;
  313. put_unaligned(htons(ip_len), &(iph->tot_len));
  314. iph->id = 0;
  315. iph->frag_off = 0;
  316. iph->ttl = 64;
  317. iph->protocol = IPPROTO_UDP;
  318. iph->check = 0;
  319. put_unaligned(np->local_ip, &(iph->saddr));
  320. put_unaligned(np->remote_ip, &(iph->daddr));
  321. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  322. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  323. skb_reset_mac_header(skb);
  324. skb->protocol = eth->h_proto = htons(ETH_P_IP);
  325. memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
  326. memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
  327. skb->dev = np->dev;
  328. netpoll_send_skb(np, skb);
  329. }
  330. static void arp_reply(struct sk_buff *skb)
  331. {
  332. struct netpoll_info *npinfo = skb->dev->npinfo;
  333. struct arphdr *arp;
  334. unsigned char *arp_ptr;
  335. int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
  336. __be32 sip, tip;
  337. unsigned char *sha;
  338. struct sk_buff *send_skb;
  339. struct netpoll *np, *tmp;
  340. unsigned long flags;
  341. int hits = 0;
  342. if (list_empty(&npinfo->rx_np))
  343. return;
  344. /* Before checking the packet, we do some early
  345. inspection whether this is interesting at all */
  346. spin_lock_irqsave(&npinfo->rx_lock, flags);
  347. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  348. if (np->dev == skb->dev)
  349. hits++;
  350. }
  351. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  352. /* No netpoll struct is using this dev */
  353. if (!hits)
  354. return;
  355. /* No arp on this interface */
  356. if (skb->dev->flags & IFF_NOARP)
  357. return;
  358. if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
  359. return;
  360. skb_reset_network_header(skb);
  361. skb_reset_transport_header(skb);
  362. arp = arp_hdr(skb);
  363. if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
  364. arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
  365. arp->ar_pro != htons(ETH_P_IP) ||
  366. arp->ar_op != htons(ARPOP_REQUEST))
  367. return;
  368. arp_ptr = (unsigned char *)(arp+1);
  369. /* save the location of the src hw addr */
  370. sha = arp_ptr;
  371. arp_ptr += skb->dev->addr_len;
  372. memcpy(&sip, arp_ptr, 4);
  373. arp_ptr += 4;
  374. /* If we actually cared about dst hw addr,
  375. it would get copied here */
  376. arp_ptr += skb->dev->addr_len;
  377. memcpy(&tip, arp_ptr, 4);
  378. /* Should we ignore arp? */
  379. if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
  380. return;
  381. size = arp_hdr_len(skb->dev);
  382. spin_lock_irqsave(&npinfo->rx_lock, flags);
  383. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  384. if (tip != np->local_ip)
  385. continue;
  386. send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
  387. LL_RESERVED_SPACE(np->dev));
  388. if (!send_skb)
  389. continue;
  390. skb_reset_network_header(send_skb);
  391. arp = (struct arphdr *) skb_put(send_skb, size);
  392. send_skb->dev = skb->dev;
  393. send_skb->protocol = htons(ETH_P_ARP);
  394. /* Fill the device header for the ARP frame */
  395. if (dev_hard_header(send_skb, skb->dev, ptype,
  396. sha, np->dev->dev_addr,
  397. send_skb->len) < 0) {
  398. kfree_skb(send_skb);
  399. continue;
  400. }
  401. /*
  402. * Fill out the arp protocol part.
  403. *
  404. * we only support ethernet device type,
  405. * which (according to RFC 1390) should
  406. * always equal 1 (Ethernet).
  407. */
  408. arp->ar_hrd = htons(np->dev->type);
  409. arp->ar_pro = htons(ETH_P_IP);
  410. arp->ar_hln = np->dev->addr_len;
  411. arp->ar_pln = 4;
  412. arp->ar_op = htons(type);
  413. arp_ptr = (unsigned char *)(arp + 1);
  414. memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
  415. arp_ptr += np->dev->addr_len;
  416. memcpy(arp_ptr, &tip, 4);
  417. arp_ptr += 4;
  418. memcpy(arp_ptr, sha, np->dev->addr_len);
  419. arp_ptr += np->dev->addr_len;
  420. memcpy(arp_ptr, &sip, 4);
  421. netpoll_send_skb(np, send_skb);
  422. /* If there are several rx_hooks for the same address,
  423. we're fine by sending a single reply */
  424. break;
  425. }
  426. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  427. }
  428. int __netpoll_rx(struct sk_buff *skb)
  429. {
  430. int proto, len, ulen;
  431. int hits = 0;
  432. struct iphdr *iph;
  433. struct udphdr *uh;
  434. struct netpoll_info *npinfo = skb->dev->npinfo;
  435. struct netpoll *np, *tmp;
  436. if (list_empty(&npinfo->rx_np))
  437. goto out;
  438. if (skb->dev->type != ARPHRD_ETHER)
  439. goto out;
  440. /* check if netpoll clients need ARP */
  441. if (skb->protocol == htons(ETH_P_ARP) &&
  442. atomic_read(&trapped)) {
  443. skb_queue_tail(&npinfo->arp_tx, skb);
  444. return 1;
  445. }
  446. proto = ntohs(eth_hdr(skb)->h_proto);
  447. if (proto != ETH_P_IP)
  448. goto out;
  449. if (skb->pkt_type == PACKET_OTHERHOST)
  450. goto out;
  451. if (skb_shared(skb))
  452. goto out;
  453. iph = (struct iphdr *)skb->data;
  454. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  455. goto out;
  456. if (iph->ihl < 5 || iph->version != 4)
  457. goto out;
  458. if (!pskb_may_pull(skb, iph->ihl*4))
  459. goto out;
  460. if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
  461. goto out;
  462. len = ntohs(iph->tot_len);
  463. if (skb->len < len || len < iph->ihl*4)
  464. goto out;
  465. /*
  466. * Our transport medium may have padded the buffer out.
  467. * Now We trim to the true length of the frame.
  468. */
  469. if (pskb_trim_rcsum(skb, len))
  470. goto out;
  471. if (iph->protocol != IPPROTO_UDP)
  472. goto out;
  473. len -= iph->ihl*4;
  474. uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
  475. ulen = ntohs(uh->len);
  476. if (ulen != len)
  477. goto out;
  478. if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
  479. goto out;
  480. list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
  481. if (np->local_ip && np->local_ip != iph->daddr)
  482. continue;
  483. if (np->remote_ip && np->remote_ip != iph->saddr)
  484. continue;
  485. if (np->local_port && np->local_port != ntohs(uh->dest))
  486. continue;
  487. np->rx_hook(np, ntohs(uh->source),
  488. (char *)(uh+1),
  489. ulen - sizeof(struct udphdr));
  490. hits++;
  491. }
  492. if (!hits)
  493. goto out;
  494. kfree_skb(skb);
  495. return 1;
  496. out:
  497. if (atomic_read(&trapped)) {
  498. kfree_skb(skb);
  499. return 1;
  500. }
  501. return 0;
  502. }
  503. void netpoll_print_options(struct netpoll *np)
  504. {
  505. printk(KERN_INFO "%s: local port %d\n",
  506. np->name, np->local_port);
  507. printk(KERN_INFO "%s: local IP %pI4\n",
  508. np->name, &np->local_ip);
  509. printk(KERN_INFO "%s: interface '%s'\n",
  510. np->name, np->dev_name);
  511. printk(KERN_INFO "%s: remote port %d\n",
  512. np->name, np->remote_port);
  513. printk(KERN_INFO "%s: remote IP %pI4\n",
  514. np->name, &np->remote_ip);
  515. printk(KERN_INFO "%s: remote ethernet address %pM\n",
  516. np->name, np->remote_mac);
  517. }
  518. int netpoll_parse_options(struct netpoll *np, char *opt)
  519. {
  520. char *cur=opt, *delim;
  521. if (*cur != '@') {
  522. if ((delim = strchr(cur, '@')) == NULL)
  523. goto parse_failed;
  524. *delim = 0;
  525. np->local_port = simple_strtol(cur, NULL, 10);
  526. cur = delim;
  527. }
  528. cur++;
  529. if (*cur != '/') {
  530. if ((delim = strchr(cur, '/')) == NULL)
  531. goto parse_failed;
  532. *delim = 0;
  533. np->local_ip = in_aton(cur);
  534. cur = delim;
  535. }
  536. cur++;
  537. if (*cur != ',') {
  538. /* parse out dev name */
  539. if ((delim = strchr(cur, ',')) == NULL)
  540. goto parse_failed;
  541. *delim = 0;
  542. strlcpy(np->dev_name, cur, sizeof(np->dev_name));
  543. cur = delim;
  544. }
  545. cur++;
  546. if (*cur != '@') {
  547. /* dst port */
  548. if ((delim = strchr(cur, '@')) == NULL)
  549. goto parse_failed;
  550. *delim = 0;
  551. if (*cur == ' ' || *cur == '\t')
  552. printk(KERN_INFO "%s: warning: whitespace"
  553. "is not allowed\n", np->name);
  554. np->remote_port = simple_strtol(cur, NULL, 10);
  555. cur = delim;
  556. }
  557. cur++;
  558. /* dst ip */
  559. if ((delim = strchr(cur, '/')) == NULL)
  560. goto parse_failed;
  561. *delim = 0;
  562. np->remote_ip = in_aton(cur);
  563. cur = delim + 1;
  564. if (*cur != 0) {
  565. /* MAC address */
  566. if ((delim = strchr(cur, ':')) == NULL)
  567. goto parse_failed;
  568. *delim = 0;
  569. np->remote_mac[0] = simple_strtol(cur, NULL, 16);
  570. cur = delim + 1;
  571. if ((delim = strchr(cur, ':')) == NULL)
  572. goto parse_failed;
  573. *delim = 0;
  574. np->remote_mac[1] = simple_strtol(cur, NULL, 16);
  575. cur = delim + 1;
  576. if ((delim = strchr(cur, ':')) == NULL)
  577. goto parse_failed;
  578. *delim = 0;
  579. np->remote_mac[2] = simple_strtol(cur, NULL, 16);
  580. cur = delim + 1;
  581. if ((delim = strchr(cur, ':')) == NULL)
  582. goto parse_failed;
  583. *delim = 0;
  584. np->remote_mac[3] = simple_strtol(cur, NULL, 16);
  585. cur = delim + 1;
  586. if ((delim = strchr(cur, ':')) == NULL)
  587. goto parse_failed;
  588. *delim = 0;
  589. np->remote_mac[4] = simple_strtol(cur, NULL, 16);
  590. cur = delim + 1;
  591. np->remote_mac[5] = simple_strtol(cur, NULL, 16);
  592. }
  593. netpoll_print_options(np);
  594. return 0;
  595. parse_failed:
  596. printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
  597. np->name, cur);
  598. return -1;
  599. }
  600. int netpoll_setup(struct netpoll *np)
  601. {
  602. struct net_device *ndev = NULL;
  603. struct in_device *in_dev;
  604. struct netpoll_info *npinfo;
  605. struct netpoll *npe, *tmp;
  606. unsigned long flags;
  607. int err;
  608. if (np->dev_name)
  609. ndev = dev_get_by_name(&init_net, np->dev_name);
  610. if (!ndev) {
  611. printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
  612. np->name, np->dev_name);
  613. return -ENODEV;
  614. }
  615. np->dev = ndev;
  616. if (!ndev->npinfo) {
  617. npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
  618. if (!npinfo) {
  619. err = -ENOMEM;
  620. goto put;
  621. }
  622. npinfo->rx_flags = 0;
  623. INIT_LIST_HEAD(&npinfo->rx_np);
  624. spin_lock_init(&npinfo->rx_lock);
  625. skb_queue_head_init(&npinfo->arp_tx);
  626. skb_queue_head_init(&npinfo->txq);
  627. INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
  628. atomic_set(&npinfo->refcnt, 1);
  629. } else {
  630. npinfo = ndev->npinfo;
  631. atomic_inc(&npinfo->refcnt);
  632. }
  633. npinfo->netpoll = np;
  634. if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
  635. !ndev->netdev_ops->ndo_poll_controller) {
  636. printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
  637. np->name, np->dev_name);
  638. err = -ENOTSUPP;
  639. goto release;
  640. }
  641. if (!netif_running(ndev)) {
  642. unsigned long atmost, atleast;
  643. printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
  644. np->name, np->dev_name);
  645. rtnl_lock();
  646. err = dev_open(ndev);
  647. rtnl_unlock();
  648. if (err) {
  649. printk(KERN_ERR "%s: failed to open %s\n",
  650. np->name, ndev->name);
  651. goto release;
  652. }
  653. atleast = jiffies + HZ/10;
  654. atmost = jiffies + carrier_timeout * HZ;
  655. while (!netif_carrier_ok(ndev)) {
  656. if (time_after(jiffies, atmost)) {
  657. printk(KERN_NOTICE
  658. "%s: timeout waiting for carrier\n",
  659. np->name);
  660. break;
  661. }
  662. msleep(1);
  663. }
  664. /* If carrier appears to come up instantly, we don't
  665. * trust it and pause so that we don't pump all our
  666. * queued console messages into the bitbucket.
  667. */
  668. if (time_before(jiffies, atleast)) {
  669. printk(KERN_NOTICE "%s: carrier detect appears"
  670. " untrustworthy, waiting 4 seconds\n",
  671. np->name);
  672. msleep(4000);
  673. }
  674. }
  675. if (!np->local_ip) {
  676. rcu_read_lock();
  677. in_dev = __in_dev_get_rcu(ndev);
  678. if (!in_dev || !in_dev->ifa_list) {
  679. rcu_read_unlock();
  680. printk(KERN_ERR "%s: no IP address for %s, aborting\n",
  681. np->name, np->dev_name);
  682. err = -EDESTADDRREQ;
  683. goto release;
  684. }
  685. np->local_ip = in_dev->ifa_list->ifa_local;
  686. rcu_read_unlock();
  687. printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
  688. }
  689. if (np->rx_hook) {
  690. spin_lock_irqsave(&npinfo->rx_lock, flags);
  691. npinfo->rx_flags |= NETPOLL_RX_ENABLED;
  692. list_add_tail(&np->rx, &npinfo->rx_np);
  693. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  694. }
  695. /* fill up the skb queue */
  696. refill_skbs();
  697. /* last thing to do is link it to the net device structure */
  698. ndev->npinfo = npinfo;
  699. /* avoid racing with NAPI reading npinfo */
  700. synchronize_rcu();
  701. return 0;
  702. release:
  703. if (!ndev->npinfo) {
  704. spin_lock_irqsave(&npinfo->rx_lock, flags);
  705. list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
  706. npe->dev = NULL;
  707. }
  708. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  709. kfree(npinfo);
  710. }
  711. put:
  712. dev_put(ndev);
  713. return err;
  714. }
  715. static int __init netpoll_init(void)
  716. {
  717. skb_queue_head_init(&skb_pool);
  718. return 0;
  719. }
  720. core_initcall(netpoll_init);
  721. void netpoll_cleanup(struct netpoll *np)
  722. {
  723. struct netpoll_info *npinfo;
  724. unsigned long flags;
  725. if (np->dev) {
  726. npinfo = np->dev->npinfo;
  727. if (npinfo) {
  728. if (!list_empty(&npinfo->rx_np)) {
  729. spin_lock_irqsave(&npinfo->rx_lock, flags);
  730. list_del(&np->rx);
  731. if (list_empty(&npinfo->rx_np))
  732. npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
  733. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  734. }
  735. if (atomic_dec_and_test(&npinfo->refcnt)) {
  736. const struct net_device_ops *ops;
  737. skb_queue_purge(&npinfo->arp_tx);
  738. skb_queue_purge(&npinfo->txq);
  739. cancel_rearming_delayed_work(&npinfo->tx_work);
  740. /* clean after last, unfinished work */
  741. __skb_queue_purge(&npinfo->txq);
  742. kfree(npinfo);
  743. ops = np->dev->netdev_ops;
  744. if (ops->ndo_netpoll_cleanup)
  745. ops->ndo_netpoll_cleanup(np->dev);
  746. else
  747. np->dev->npinfo = NULL;
  748. }
  749. }
  750. dev_put(np->dev);
  751. }
  752. np->dev = NULL;
  753. }
  754. int netpoll_trap(void)
  755. {
  756. return atomic_read(&trapped);
  757. }
  758. void netpoll_set_trap(int trap)
  759. {
  760. if (trap)
  761. atomic_inc(&trapped);
  762. else
  763. atomic_dec(&trapped);
  764. }
  765. EXPORT_SYMBOL(netpoll_send_skb);
  766. EXPORT_SYMBOL(netpoll_set_trap);
  767. EXPORT_SYMBOL(netpoll_trap);
  768. EXPORT_SYMBOL(netpoll_print_options);
  769. EXPORT_SYMBOL(netpoll_parse_options);
  770. EXPORT_SYMBOL(netpoll_setup);
  771. EXPORT_SYMBOL(netpoll_cleanup);
  772. EXPORT_SYMBOL(netpoll_send_udp);
  773. EXPORT_SYMBOL(netpoll_poll_dev);
  774. EXPORT_SYMBOL(netpoll_poll);