netpoll.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /*
  2. * Common framework for low-level network console, dump, and debugger code
  3. *
  4. * Sep 8 2003 Matt Mackall <mpm@selenic.com>
  5. *
  6. * based on the netconsole code from:
  7. *
  8. * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
  9. * Copyright (C) 2002 Red Hat, Inc.
  10. */
  11. #include <linux/netdevice.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/string.h>
  14. #include <linux/if_arp.h>
  15. #include <linux/inetdevice.h>
  16. #include <linux/inet.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/netpoll.h>
  19. #include <linux/sched.h>
  20. #include <linux/delay.h>
  21. #include <linux/rcupdate.h>
  22. #include <linux/workqueue.h>
  23. #include <net/tcp.h>
  24. #include <net/udp.h>
  25. #include <asm/unaligned.h>
  26. /*
  27. * We maintain a small pool of fully-sized skbs, to make sure the
  28. * message gets out even in extreme OOM situations.
  29. */
  30. #define MAX_UDP_CHUNK 1460
  31. #define MAX_SKBS 32
  32. #define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
  33. static struct sk_buff_head skb_pool;
  34. static atomic_t trapped;
  35. #define USEC_PER_POLL 50
  36. #define NETPOLL_RX_ENABLED 1
  37. #define NETPOLL_RX_DROP 2
  38. #define MAX_SKB_SIZE \
  39. (MAX_UDP_CHUNK + sizeof(struct udphdr) + \
  40. sizeof(struct iphdr) + sizeof(struct ethhdr))
  41. static void zap_completion_queue(void);
  42. static void arp_reply(struct sk_buff *skb);
  43. static void queue_process(struct work_struct *work)
  44. {
  45. struct netpoll_info *npinfo =
  46. container_of(work, struct netpoll_info, tx_work.work);
  47. struct sk_buff *skb;
  48. unsigned long flags;
  49. while ((skb = skb_dequeue(&npinfo->txq))) {
  50. struct net_device *dev = skb->dev;
  51. if (!netif_device_present(dev) || !netif_running(dev)) {
  52. __kfree_skb(skb);
  53. continue;
  54. }
  55. local_irq_save(flags);
  56. netif_tx_lock(dev);
  57. if ((netif_queue_stopped(dev) ||
  58. netif_subqueue_stopped(dev, skb)) ||
  59. dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
  60. skb_queue_head(&npinfo->txq, skb);
  61. netif_tx_unlock(dev);
  62. local_irq_restore(flags);
  63. schedule_delayed_work(&npinfo->tx_work, HZ/10);
  64. return;
  65. }
  66. netif_tx_unlock(dev);
  67. local_irq_restore(flags);
  68. }
  69. }
  70. static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
  71. unsigned short ulen, __be32 saddr, __be32 daddr)
  72. {
  73. __wsum psum;
  74. if (uh->check == 0 || skb_csum_unnecessary(skb))
  75. return 0;
  76. psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
  77. if (skb->ip_summed == CHECKSUM_COMPLETE &&
  78. !csum_fold(csum_add(psum, skb->csum)))
  79. return 0;
  80. skb->csum = psum;
  81. return __skb_checksum_complete(skb);
  82. }
  83. /*
  84. * Check whether delayed processing was scheduled for our NIC. If so,
  85. * we attempt to grab the poll lock and use ->poll() to pump the card.
  86. * If this fails, either we've recursed in ->poll() or it's already
  87. * running on another CPU.
  88. *
  89. * Note: we don't mask interrupts with this lock because we're using
  90. * trylock here and interrupts are already disabled in the softirq
  91. * case. Further, we test the poll_owner to avoid recursion on UP
  92. * systems where the lock doesn't exist.
  93. *
  94. * In cases where there is bi-directional communications, reading only
  95. * one message at a time can lead to packets being dropped by the
  96. * network adapter, forcing superfluous retries and possibly timeouts.
  97. * Thus, we set our budget to greater than 1.
  98. */
  99. static int poll_one_napi(struct netpoll_info *npinfo,
  100. struct napi_struct *napi, int budget)
  101. {
  102. int work;
  103. /* net_rx_action's ->poll() invocations and our's are
  104. * synchronized by this test which is only made while
  105. * holding the napi->poll_lock.
  106. */
  107. if (!test_bit(NAPI_STATE_SCHED, &napi->state))
  108. return budget;
  109. npinfo->rx_flags |= NETPOLL_RX_DROP;
  110. atomic_inc(&trapped);
  111. work = napi->poll(napi, budget);
  112. atomic_dec(&trapped);
  113. npinfo->rx_flags &= ~NETPOLL_RX_DROP;
  114. return budget - work;
  115. }
  116. static void poll_napi(struct net_device *dev)
  117. {
  118. struct napi_struct *napi;
  119. int budget = 16;
  120. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  121. if (napi->poll_owner != smp_processor_id() &&
  122. spin_trylock(&napi->poll_lock)) {
  123. budget = poll_one_napi(dev->npinfo, napi, budget);
  124. spin_unlock(&napi->poll_lock);
  125. if (!budget)
  126. break;
  127. }
  128. }
  129. }
  130. static void service_arp_queue(struct netpoll_info *npi)
  131. {
  132. if (npi) {
  133. struct sk_buff *skb;
  134. while ((skb = skb_dequeue(&npi->arp_tx)))
  135. arp_reply(skb);
  136. }
  137. }
  138. void netpoll_poll(struct netpoll *np)
  139. {
  140. struct net_device *dev = np->dev;
  141. if (!dev || !netif_running(dev) || !dev->poll_controller)
  142. return;
  143. /* Process pending work on NIC */
  144. dev->poll_controller(dev);
  145. poll_napi(dev);
  146. service_arp_queue(dev->npinfo);
  147. zap_completion_queue();
  148. }
  149. static void refill_skbs(void)
  150. {
  151. struct sk_buff *skb;
  152. unsigned long flags;
  153. spin_lock_irqsave(&skb_pool.lock, flags);
  154. while (skb_pool.qlen < MAX_SKBS) {
  155. skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
  156. if (!skb)
  157. break;
  158. __skb_queue_tail(&skb_pool, skb);
  159. }
  160. spin_unlock_irqrestore(&skb_pool.lock, flags);
  161. }
  162. static void zap_completion_queue(void)
  163. {
  164. unsigned long flags;
  165. struct softnet_data *sd = &get_cpu_var(softnet_data);
  166. if (sd->completion_queue) {
  167. struct sk_buff *clist;
  168. local_irq_save(flags);
  169. clist = sd->completion_queue;
  170. sd->completion_queue = NULL;
  171. local_irq_restore(flags);
  172. while (clist != NULL) {
  173. struct sk_buff *skb = clist;
  174. clist = clist->next;
  175. if (skb->destructor) {
  176. atomic_inc(&skb->users);
  177. dev_kfree_skb_any(skb); /* put this one back */
  178. } else {
  179. __kfree_skb(skb);
  180. }
  181. }
  182. }
  183. put_cpu_var(softnet_data);
  184. }
  185. static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
  186. {
  187. int count = 0;
  188. struct sk_buff *skb;
  189. zap_completion_queue();
  190. refill_skbs();
  191. repeat:
  192. skb = alloc_skb(len, GFP_ATOMIC);
  193. if (!skb)
  194. skb = skb_dequeue(&skb_pool);
  195. if (!skb) {
  196. if (++count < 10) {
  197. netpoll_poll(np);
  198. goto repeat;
  199. }
  200. return NULL;
  201. }
  202. atomic_set(&skb->users, 1);
  203. skb_reserve(skb, reserve);
  204. return skb;
  205. }
  206. static int netpoll_owner_active(struct net_device *dev)
  207. {
  208. struct napi_struct *napi;
  209. list_for_each_entry(napi, &dev->napi_list, dev_list) {
  210. if (napi->poll_owner == smp_processor_id())
  211. return 1;
  212. }
  213. return 0;
  214. }
  215. static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
  216. {
  217. int status = NETDEV_TX_BUSY;
  218. unsigned long tries;
  219. struct net_device *dev = np->dev;
  220. struct netpoll_info *npinfo = np->dev->npinfo;
  221. if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
  222. __kfree_skb(skb);
  223. return;
  224. }
  225. /* don't get messages out of order, and no recursion */
  226. if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
  227. unsigned long flags;
  228. local_irq_save(flags);
  229. /* try until next clock tick */
  230. for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
  231. tries > 0; --tries) {
  232. if (netif_tx_trylock(dev)) {
  233. if (!netif_queue_stopped(dev) &&
  234. !netif_subqueue_stopped(dev, skb))
  235. status = dev->hard_start_xmit(skb, dev);
  236. netif_tx_unlock(dev);
  237. if (status == NETDEV_TX_OK)
  238. break;
  239. }
  240. /* tickle device maybe there is some cleanup */
  241. netpoll_poll(np);
  242. udelay(USEC_PER_POLL);
  243. }
  244. local_irq_restore(flags);
  245. }
  246. if (status != NETDEV_TX_OK) {
  247. skb_queue_tail(&npinfo->txq, skb);
  248. schedule_delayed_work(&npinfo->tx_work,0);
  249. }
  250. }
  251. void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
  252. {
  253. int total_len, eth_len, ip_len, udp_len;
  254. struct sk_buff *skb;
  255. struct udphdr *udph;
  256. struct iphdr *iph;
  257. struct ethhdr *eth;
  258. udp_len = len + sizeof(*udph);
  259. ip_len = eth_len = udp_len + sizeof(*iph);
  260. total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
  261. skb = find_skb(np, total_len, total_len - len);
  262. if (!skb)
  263. return;
  264. skb_copy_to_linear_data(skb, msg, len);
  265. skb->len += len;
  266. skb_push(skb, sizeof(*udph));
  267. skb_reset_transport_header(skb);
  268. udph = udp_hdr(skb);
  269. udph->source = htons(np->local_port);
  270. udph->dest = htons(np->remote_port);
  271. udph->len = htons(udp_len);
  272. udph->check = 0;
  273. udph->check = csum_tcpudp_magic(htonl(np->local_ip),
  274. htonl(np->remote_ip),
  275. udp_len, IPPROTO_UDP,
  276. csum_partial((unsigned char *)udph, udp_len, 0));
  277. if (udph->check == 0)
  278. udph->check = CSUM_MANGLED_0;
  279. skb_push(skb, sizeof(*iph));
  280. skb_reset_network_header(skb);
  281. iph = ip_hdr(skb);
  282. /* iph->version = 4; iph->ihl = 5; */
  283. put_unaligned(0x45, (unsigned char *)iph);
  284. iph->tos = 0;
  285. put_unaligned(htons(ip_len), &(iph->tot_len));
  286. iph->id = 0;
  287. iph->frag_off = 0;
  288. iph->ttl = 64;
  289. iph->protocol = IPPROTO_UDP;
  290. iph->check = 0;
  291. put_unaligned(htonl(np->local_ip), &(iph->saddr));
  292. put_unaligned(htonl(np->remote_ip), &(iph->daddr));
  293. iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  294. eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
  295. skb_reset_mac_header(skb);
  296. skb->protocol = eth->h_proto = htons(ETH_P_IP);
  297. memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
  298. memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
  299. skb->dev = np->dev;
  300. netpoll_send_skb(np, skb);
  301. }
  302. static void arp_reply(struct sk_buff *skb)
  303. {
  304. struct netpoll_info *npinfo = skb->dev->npinfo;
  305. struct arphdr *arp;
  306. unsigned char *arp_ptr;
  307. int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
  308. __be32 sip, tip;
  309. unsigned char *sha;
  310. struct sk_buff *send_skb;
  311. struct netpoll *np = NULL;
  312. if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev)
  313. np = npinfo->rx_np;
  314. if (!np)
  315. return;
  316. /* No arp on this interface */
  317. if (skb->dev->flags & IFF_NOARP)
  318. return;
  319. if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
  320. return;
  321. skb_reset_network_header(skb);
  322. skb_reset_transport_header(skb);
  323. arp = arp_hdr(skb);
  324. if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
  325. arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
  326. arp->ar_pro != htons(ETH_P_IP) ||
  327. arp->ar_op != htons(ARPOP_REQUEST))
  328. return;
  329. arp_ptr = (unsigned char *)(arp+1);
  330. /* save the location of the src hw addr */
  331. sha = arp_ptr;
  332. arp_ptr += skb->dev->addr_len;
  333. memcpy(&sip, arp_ptr, 4);
  334. arp_ptr += 4;
  335. /* if we actually cared about dst hw addr, it would get copied here */
  336. arp_ptr += skb->dev->addr_len;
  337. memcpy(&tip, arp_ptr, 4);
  338. /* Should we ignore arp? */
  339. if (tip != htonl(np->local_ip) ||
  340. ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
  341. return;
  342. size = arp_hdr_len(skb->dev);
  343. send_skb = find_skb(np, size + LL_ALLOCATED_SPACE(np->dev),
  344. LL_RESERVED_SPACE(np->dev));
  345. if (!send_skb)
  346. return;
  347. skb_reset_network_header(send_skb);
  348. arp = (struct arphdr *) skb_put(send_skb, size);
  349. send_skb->dev = skb->dev;
  350. send_skb->protocol = htons(ETH_P_ARP);
  351. /* Fill the device header for the ARP frame */
  352. if (dev_hard_header(send_skb, skb->dev, ptype,
  353. sha, np->dev->dev_addr,
  354. send_skb->len) < 0) {
  355. kfree_skb(send_skb);
  356. return;
  357. }
  358. /*
  359. * Fill out the arp protocol part.
  360. *
  361. * we only support ethernet device type,
  362. * which (according to RFC 1390) should always equal 1 (Ethernet).
  363. */
  364. arp->ar_hrd = htons(np->dev->type);
  365. arp->ar_pro = htons(ETH_P_IP);
  366. arp->ar_hln = np->dev->addr_len;
  367. arp->ar_pln = 4;
  368. arp->ar_op = htons(type);
  369. arp_ptr=(unsigned char *)(arp + 1);
  370. memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
  371. arp_ptr += np->dev->addr_len;
  372. memcpy(arp_ptr, &tip, 4);
  373. arp_ptr += 4;
  374. memcpy(arp_ptr, sha, np->dev->addr_len);
  375. arp_ptr += np->dev->addr_len;
  376. memcpy(arp_ptr, &sip, 4);
  377. netpoll_send_skb(np, send_skb);
  378. }
  379. int __netpoll_rx(struct sk_buff *skb)
  380. {
  381. int proto, len, ulen;
  382. struct iphdr *iph;
  383. struct udphdr *uh;
  384. struct netpoll_info *npi = skb->dev->npinfo;
  385. struct netpoll *np = npi->rx_np;
  386. if (!np)
  387. goto out;
  388. if (skb->dev->type != ARPHRD_ETHER)
  389. goto out;
  390. /* check if netpoll clients need ARP */
  391. if (skb->protocol == htons(ETH_P_ARP) &&
  392. atomic_read(&trapped)) {
  393. skb_queue_tail(&npi->arp_tx, skb);
  394. return 1;
  395. }
  396. proto = ntohs(eth_hdr(skb)->h_proto);
  397. if (proto != ETH_P_IP)
  398. goto out;
  399. if (skb->pkt_type == PACKET_OTHERHOST)
  400. goto out;
  401. if (skb_shared(skb))
  402. goto out;
  403. iph = (struct iphdr *)skb->data;
  404. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  405. goto out;
  406. if (iph->ihl < 5 || iph->version != 4)
  407. goto out;
  408. if (!pskb_may_pull(skb, iph->ihl*4))
  409. goto out;
  410. if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
  411. goto out;
  412. len = ntohs(iph->tot_len);
  413. if (skb->len < len || len < iph->ihl*4)
  414. goto out;
  415. /*
  416. * Our transport medium may have padded the buffer out.
  417. * Now We trim to the true length of the frame.
  418. */
  419. if (pskb_trim_rcsum(skb, len))
  420. goto out;
  421. if (iph->protocol != IPPROTO_UDP)
  422. goto out;
  423. len -= iph->ihl*4;
  424. uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
  425. ulen = ntohs(uh->len);
  426. if (ulen != len)
  427. goto out;
  428. if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
  429. goto out;
  430. if (np->local_ip && np->local_ip != ntohl(iph->daddr))
  431. goto out;
  432. if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
  433. goto out;
  434. if (np->local_port && np->local_port != ntohs(uh->dest))
  435. goto out;
  436. np->rx_hook(np, ntohs(uh->source),
  437. (char *)(uh+1),
  438. ulen - sizeof(struct udphdr));
  439. kfree_skb(skb);
  440. return 1;
  441. out:
  442. if (atomic_read(&trapped)) {
  443. kfree_skb(skb);
  444. return 1;
  445. }
  446. return 0;
  447. }
  448. void netpoll_print_options(struct netpoll *np)
  449. {
  450. DECLARE_MAC_BUF(mac);
  451. printk(KERN_INFO "%s: local port %d\n",
  452. np->name, np->local_port);
  453. printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
  454. np->name, HIPQUAD(np->local_ip));
  455. printk(KERN_INFO "%s: interface %s\n",
  456. np->name, np->dev_name);
  457. printk(KERN_INFO "%s: remote port %d\n",
  458. np->name, np->remote_port);
  459. printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
  460. np->name, HIPQUAD(np->remote_ip));
  461. printk(KERN_INFO "%s: remote ethernet address %s\n",
  462. np->name, print_mac(mac, np->remote_mac));
  463. }
  464. int netpoll_parse_options(struct netpoll *np, char *opt)
  465. {
  466. char *cur=opt, *delim;
  467. if (*cur != '@') {
  468. if ((delim = strchr(cur, '@')) == NULL)
  469. goto parse_failed;
  470. *delim = 0;
  471. np->local_port = simple_strtol(cur, NULL, 10);
  472. cur = delim;
  473. }
  474. cur++;
  475. if (*cur != '/') {
  476. if ((delim = strchr(cur, '/')) == NULL)
  477. goto parse_failed;
  478. *delim = 0;
  479. np->local_ip = ntohl(in_aton(cur));
  480. cur = delim;
  481. }
  482. cur++;
  483. if (*cur != ',') {
  484. /* parse out dev name */
  485. if ((delim = strchr(cur, ',')) == NULL)
  486. goto parse_failed;
  487. *delim = 0;
  488. strlcpy(np->dev_name, cur, sizeof(np->dev_name));
  489. cur = delim;
  490. }
  491. cur++;
  492. if (*cur != '@') {
  493. /* dst port */
  494. if ((delim = strchr(cur, '@')) == NULL)
  495. goto parse_failed;
  496. *delim = 0;
  497. np->remote_port = simple_strtol(cur, NULL, 10);
  498. cur = delim;
  499. }
  500. cur++;
  501. /* dst ip */
  502. if ((delim = strchr(cur, '/')) == NULL)
  503. goto parse_failed;
  504. *delim = 0;
  505. np->remote_ip = ntohl(in_aton(cur));
  506. cur = delim + 1;
  507. if (*cur != 0) {
  508. /* MAC address */
  509. if ((delim = strchr(cur, ':')) == NULL)
  510. goto parse_failed;
  511. *delim = 0;
  512. np->remote_mac[0] = simple_strtol(cur, NULL, 16);
  513. cur = delim + 1;
  514. if ((delim = strchr(cur, ':')) == NULL)
  515. goto parse_failed;
  516. *delim = 0;
  517. np->remote_mac[1] = simple_strtol(cur, NULL, 16);
  518. cur = delim + 1;
  519. if ((delim = strchr(cur, ':')) == NULL)
  520. goto parse_failed;
  521. *delim = 0;
  522. np->remote_mac[2] = simple_strtol(cur, NULL, 16);
  523. cur = delim + 1;
  524. if ((delim = strchr(cur, ':')) == NULL)
  525. goto parse_failed;
  526. *delim = 0;
  527. np->remote_mac[3] = simple_strtol(cur, NULL, 16);
  528. cur = delim + 1;
  529. if ((delim = strchr(cur, ':')) == NULL)
  530. goto parse_failed;
  531. *delim = 0;
  532. np->remote_mac[4] = simple_strtol(cur, NULL, 16);
  533. cur = delim + 1;
  534. np->remote_mac[5] = simple_strtol(cur, NULL, 16);
  535. }
  536. netpoll_print_options(np);
  537. return 0;
  538. parse_failed:
  539. printk(KERN_INFO "%s: couldn't parse config at %s!\n",
  540. np->name, cur);
  541. return -1;
  542. }
  543. int netpoll_setup(struct netpoll *np)
  544. {
  545. struct net_device *ndev = NULL;
  546. struct in_device *in_dev;
  547. struct netpoll_info *npinfo;
  548. unsigned long flags;
  549. int err;
  550. if (np->dev_name)
  551. ndev = dev_get_by_name(&init_net, np->dev_name);
  552. if (!ndev) {
  553. printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
  554. np->name, np->dev_name);
  555. return -ENODEV;
  556. }
  557. np->dev = ndev;
  558. if (!ndev->npinfo) {
  559. npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
  560. if (!npinfo) {
  561. err = -ENOMEM;
  562. goto release;
  563. }
  564. npinfo->rx_flags = 0;
  565. npinfo->rx_np = NULL;
  566. spin_lock_init(&npinfo->rx_lock);
  567. skb_queue_head_init(&npinfo->arp_tx);
  568. skb_queue_head_init(&npinfo->txq);
  569. INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
  570. atomic_set(&npinfo->refcnt, 1);
  571. } else {
  572. npinfo = ndev->npinfo;
  573. atomic_inc(&npinfo->refcnt);
  574. }
  575. if (!ndev->poll_controller) {
  576. printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
  577. np->name, np->dev_name);
  578. err = -ENOTSUPP;
  579. goto release;
  580. }
  581. if (!netif_running(ndev)) {
  582. unsigned long atmost, atleast;
  583. printk(KERN_INFO "%s: device %s not up yet, forcing it\n",
  584. np->name, np->dev_name);
  585. rtnl_lock();
  586. err = dev_open(ndev);
  587. rtnl_unlock();
  588. if (err) {
  589. printk(KERN_ERR "%s: failed to open %s\n",
  590. np->name, ndev->name);
  591. goto release;
  592. }
  593. atleast = jiffies + HZ/10;
  594. atmost = jiffies + 4*HZ;
  595. while (!netif_carrier_ok(ndev)) {
  596. if (time_after(jiffies, atmost)) {
  597. printk(KERN_NOTICE
  598. "%s: timeout waiting for carrier\n",
  599. np->name);
  600. break;
  601. }
  602. cond_resched();
  603. }
  604. /* If carrier appears to come up instantly, we don't
  605. * trust it and pause so that we don't pump all our
  606. * queued console messages into the bitbucket.
  607. */
  608. if (time_before(jiffies, atleast)) {
  609. printk(KERN_NOTICE "%s: carrier detect appears"
  610. " untrustworthy, waiting 4 seconds\n",
  611. np->name);
  612. msleep(4000);
  613. }
  614. }
  615. if (!np->local_ip) {
  616. rcu_read_lock();
  617. in_dev = __in_dev_get_rcu(ndev);
  618. if (!in_dev || !in_dev->ifa_list) {
  619. rcu_read_unlock();
  620. printk(KERN_ERR "%s: no IP address for %s, aborting\n",
  621. np->name, np->dev_name);
  622. err = -EDESTADDRREQ;
  623. goto release;
  624. }
  625. np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
  626. rcu_read_unlock();
  627. printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
  628. np->name, HIPQUAD(np->local_ip));
  629. }
  630. if (np->rx_hook) {
  631. spin_lock_irqsave(&npinfo->rx_lock, flags);
  632. npinfo->rx_flags |= NETPOLL_RX_ENABLED;
  633. npinfo->rx_np = np;
  634. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  635. }
  636. /* fill up the skb queue */
  637. refill_skbs();
  638. /* last thing to do is link it to the net device structure */
  639. ndev->npinfo = npinfo;
  640. /* avoid racing with NAPI reading npinfo */
  641. synchronize_rcu();
  642. return 0;
  643. release:
  644. if (!ndev->npinfo)
  645. kfree(npinfo);
  646. np->dev = NULL;
  647. dev_put(ndev);
  648. return err;
  649. }
  650. static int __init netpoll_init(void)
  651. {
  652. skb_queue_head_init(&skb_pool);
  653. return 0;
  654. }
  655. core_initcall(netpoll_init);
  656. void netpoll_cleanup(struct netpoll *np)
  657. {
  658. struct netpoll_info *npinfo;
  659. unsigned long flags;
  660. if (np->dev) {
  661. npinfo = np->dev->npinfo;
  662. if (npinfo) {
  663. if (npinfo->rx_np == np) {
  664. spin_lock_irqsave(&npinfo->rx_lock, flags);
  665. npinfo->rx_np = NULL;
  666. npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
  667. spin_unlock_irqrestore(&npinfo->rx_lock, flags);
  668. }
  669. if (atomic_dec_and_test(&npinfo->refcnt)) {
  670. skb_queue_purge(&npinfo->arp_tx);
  671. skb_queue_purge(&npinfo->txq);
  672. cancel_rearming_delayed_work(&npinfo->tx_work);
  673. /* clean after last, unfinished work */
  674. __skb_queue_purge(&npinfo->txq);
  675. kfree(npinfo);
  676. np->dev->npinfo = NULL;
  677. }
  678. }
  679. dev_put(np->dev);
  680. }
  681. np->dev = NULL;
  682. }
  683. int netpoll_trap(void)
  684. {
  685. return atomic_read(&trapped);
  686. }
  687. void netpoll_set_trap(int trap)
  688. {
  689. if (trap)
  690. atomic_inc(&trapped);
  691. else
  692. atomic_dec(&trapped);
  693. }
  694. EXPORT_SYMBOL(netpoll_set_trap);
  695. EXPORT_SYMBOL(netpoll_trap);
  696. EXPORT_SYMBOL(netpoll_print_options);
  697. EXPORT_SYMBOL(netpoll_parse_options);
  698. EXPORT_SYMBOL(netpoll_setup);
  699. EXPORT_SYMBOL(netpoll_cleanup);
  700. EXPORT_SYMBOL(netpoll_send_udp);
  701. EXPORT_SYMBOL(netpoll_poll);