u_ether.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
  3. *
  4. * Copyright (C) 2003-2005,2008 David Brownell
  5. * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
  6. * Copyright (C) 2008 Nokia Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. /* #define VERBOSE_DEBUG */
  23. #include <linux/kernel.h>
  24. #include <linux/utsname.h>
  25. #include <linux/device.h>
  26. #include <linux/ctype.h>
  27. #include <linux/etherdevice.h>
  28. #include <linux/ethtool.h>
  29. #include "u_ether.h"
  30. /*
  31. * This component encapsulates the Ethernet link glue needed to provide
  32. * one (!) network link through the USB gadget stack, normally "usb0".
  33. *
  34. * The control and data models are handled by the function driver which
  35. * connects to this code; such as CDC Ethernet, "CDC Subset", or RNDIS.
  36. * That includes all descriptor and endpoint management.
  37. *
  38. * Link level addressing is handled by this component using module
  39. * parameters; if no such parameters are provided, random link level
  40. * addresses are used. Each end of the link uses one address. The
  41. * host end address is exported in various ways, and is often recorded
  42. * in configuration databases.
  43. *
  44. * The driver which assembles each configuration using such a link is
  45. * responsible for ensuring that each configuration includes at most one
  46. * instance of is network link. (The network layer provides ways for
  47. * this single "physical" link to be used by multiple virtual links.)
  48. */
  49. #define UETH__VERSION "29-May-2008"
  50. struct eth_dev {
  51. /* lock is held while accessing port_usb
  52. * or updating its backlink port_usb->ioport
  53. */
  54. spinlock_t lock;
  55. struct gether *port_usb;
  56. struct net_device *net;
  57. struct usb_gadget *gadget;
  58. spinlock_t req_lock; /* guard {rx,tx}_reqs */
  59. struct list_head tx_reqs, rx_reqs;
  60. atomic_t tx_qlen;
  61. unsigned header_len;
  62. struct sk_buff *(*wrap)(struct sk_buff *skb);
  63. int (*unwrap)(struct sk_buff *skb);
  64. struct work_struct work;
  65. unsigned long todo;
  66. #define WORK_RX_MEMORY 0
  67. bool zlp;
  68. u8 host_mac[ETH_ALEN];
  69. };
  70. /*-------------------------------------------------------------------------*/
  71. #define RX_EXTRA 20 /* bytes guarding against rx overflows */
  72. #define DEFAULT_QLEN 2 /* double buffering by default */
  73. #ifdef CONFIG_USB_GADGET_DUALSPEED
  74. static unsigned qmult = 5;
  75. module_param(qmult, uint, S_IRUGO|S_IWUSR);
  76. MODULE_PARM_DESC(qmult, "queue length multiplier at high speed");
  77. #else /* full speed (low speed doesn't do bulk) */
  78. #define qmult 1
  79. #endif
  80. /* for dual-speed hardware, use deeper queues at highspeed */
  81. static inline int qlen(struct usb_gadget *gadget)
  82. {
  83. if (gadget_is_dualspeed(gadget) && gadget->speed == USB_SPEED_HIGH)
  84. return qmult * DEFAULT_QLEN;
  85. else
  86. return DEFAULT_QLEN;
  87. }
  88. /*-------------------------------------------------------------------------*/
  89. /* REVISIT there must be a better way than having two sets
  90. * of debug calls ...
  91. */
  92. #undef DBG
  93. #undef VDBG
  94. #undef ERROR
  95. #undef INFO
  96. #define xprintk(d, level, fmt, args...) \
  97. printk(level "%s: " fmt , (d)->net->name , ## args)
  98. #ifdef DEBUG
  99. #undef DEBUG
  100. #define DBG(dev, fmt, args...) \
  101. xprintk(dev , KERN_DEBUG , fmt , ## args)
  102. #else
  103. #define DBG(dev, fmt, args...) \
  104. do { } while (0)
  105. #endif /* DEBUG */
  106. #ifdef VERBOSE_DEBUG
  107. #define VDBG DBG
  108. #else
  109. #define VDBG(dev, fmt, args...) \
  110. do { } while (0)
  111. #endif /* DEBUG */
  112. #define ERROR(dev, fmt, args...) \
  113. xprintk(dev , KERN_ERR , fmt , ## args)
  114. #define INFO(dev, fmt, args...) \
  115. xprintk(dev , KERN_INFO , fmt , ## args)
  116. /*-------------------------------------------------------------------------*/
  117. /* NETWORK DRIVER HOOKUP (to the layer above this driver) */
  118. static int ueth_change_mtu(struct net_device *net, int new_mtu)
  119. {
  120. struct eth_dev *dev = netdev_priv(net);
  121. unsigned long flags;
  122. int status = 0;
  123. /* don't change MTU on "live" link (peer won't know) */
  124. spin_lock_irqsave(&dev->lock, flags);
  125. if (dev->port_usb)
  126. status = -EBUSY;
  127. else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
  128. status = -ERANGE;
  129. else
  130. net->mtu = new_mtu;
  131. spin_unlock_irqrestore(&dev->lock, flags);
  132. return status;
  133. }
  134. static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
  135. {
  136. struct eth_dev *dev = netdev_priv(net);
  137. strlcpy(p->driver, "g_ether", sizeof p->driver);
  138. strlcpy(p->version, UETH__VERSION, sizeof p->version);
  139. strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
  140. strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
  141. }
  142. /* REVISIT can also support:
  143. * - WOL (by tracking suspends and issuing remote wakeup)
  144. * - msglevel (implies updated messaging)
  145. * - ... probably more ethtool ops
  146. */
  147. static struct ethtool_ops ops = {
  148. .get_drvinfo = eth_get_drvinfo,
  149. .get_link = ethtool_op_get_link,
  150. };
  151. static void defer_kevent(struct eth_dev *dev, int flag)
  152. {
  153. if (test_and_set_bit(flag, &dev->todo))
  154. return;
  155. if (!schedule_work(&dev->work))
  156. ERROR(dev, "kevent %d may have been dropped\n", flag);
  157. else
  158. DBG(dev, "kevent %d scheduled\n", flag);
  159. }
  160. static void rx_complete(struct usb_ep *ep, struct usb_request *req);
  161. static int
  162. rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
  163. {
  164. struct sk_buff *skb;
  165. int retval = -ENOMEM;
  166. size_t size = 0;
  167. struct usb_ep *out;
  168. unsigned long flags;
  169. spin_lock_irqsave(&dev->lock, flags);
  170. if (dev->port_usb)
  171. out = dev->port_usb->out_ep;
  172. else
  173. out = NULL;
  174. spin_unlock_irqrestore(&dev->lock, flags);
  175. if (!out)
  176. return -ENOTCONN;
  177. /* Padding up to RX_EXTRA handles minor disagreements with host.
  178. * Normally we use the USB "terminate on short read" convention;
  179. * so allow up to (N*maxpacket), since that memory is normally
  180. * already allocated. Some hardware doesn't deal well with short
  181. * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
  182. * byte off the end (to force hardware errors on overflow).
  183. *
  184. * RNDIS uses internal framing, and explicitly allows senders to
  185. * pad to end-of-packet. That's potentially nice for speed, but
  186. * means receivers can't recover lost synch on their own (because
  187. * new packets don't only start after a short RX).
  188. */
  189. size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
  190. size += dev->port_usb->header_len;
  191. size += out->maxpacket - 1;
  192. size -= size % out->maxpacket;
  193. skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
  194. if (skb == NULL) {
  195. DBG(dev, "no rx skb\n");
  196. goto enomem;
  197. }
  198. /* Some platforms perform better when IP packets are aligned,
  199. * but on at least one, checksumming fails otherwise. Note:
  200. * RNDIS headers involve variable numbers of LE32 values.
  201. */
  202. skb_reserve(skb, NET_IP_ALIGN);
  203. req->buf = skb->data;
  204. req->length = size;
  205. req->complete = rx_complete;
  206. req->context = skb;
  207. retval = usb_ep_queue(out, req, gfp_flags);
  208. if (retval == -ENOMEM)
  209. enomem:
  210. defer_kevent(dev, WORK_RX_MEMORY);
  211. if (retval) {
  212. DBG(dev, "rx submit --> %d\n", retval);
  213. if (skb)
  214. dev_kfree_skb_any(skb);
  215. spin_lock_irqsave(&dev->req_lock, flags);
  216. list_add(&req->list, &dev->rx_reqs);
  217. spin_unlock_irqrestore(&dev->req_lock, flags);
  218. }
  219. return retval;
  220. }
  221. static void rx_complete(struct usb_ep *ep, struct usb_request *req)
  222. {
  223. struct sk_buff *skb = req->context;
  224. struct eth_dev *dev = ep->driver_data;
  225. int status = req->status;
  226. switch (status) {
  227. /* normal completion */
  228. case 0:
  229. skb_put(skb, req->actual);
  230. if (dev->unwrap)
  231. status = dev->unwrap(skb);
  232. if (status < 0
  233. || ETH_HLEN > skb->len
  234. || skb->len > ETH_FRAME_LEN) {
  235. dev->net->stats.rx_errors++;
  236. dev->net->stats.rx_length_errors++;
  237. DBG(dev, "rx length %d\n", skb->len);
  238. break;
  239. }
  240. skb->protocol = eth_type_trans(skb, dev->net);
  241. dev->net->stats.rx_packets++;
  242. dev->net->stats.rx_bytes += skb->len;
  243. /* no buffer copies needed, unless hardware can't
  244. * use skb buffers.
  245. */
  246. status = netif_rx(skb);
  247. skb = NULL;
  248. break;
  249. /* software-driven interface shutdown */
  250. case -ECONNRESET: /* unlink */
  251. case -ESHUTDOWN: /* disconnect etc */
  252. VDBG(dev, "rx shutdown, code %d\n", status);
  253. goto quiesce;
  254. /* for hardware automagic (such as pxa) */
  255. case -ECONNABORTED: /* endpoint reset */
  256. DBG(dev, "rx %s reset\n", ep->name);
  257. defer_kevent(dev, WORK_RX_MEMORY);
  258. quiesce:
  259. dev_kfree_skb_any(skb);
  260. goto clean;
  261. /* data overrun */
  262. case -EOVERFLOW:
  263. dev->net->stats.rx_over_errors++;
  264. /* FALLTHROUGH */
  265. default:
  266. dev->net->stats.rx_errors++;
  267. DBG(dev, "rx status %d\n", status);
  268. break;
  269. }
  270. if (skb)
  271. dev_kfree_skb_any(skb);
  272. if (!netif_running(dev->net)) {
  273. clean:
  274. spin_lock(&dev->req_lock);
  275. list_add(&req->list, &dev->rx_reqs);
  276. spin_unlock(&dev->req_lock);
  277. req = NULL;
  278. }
  279. if (req)
  280. rx_submit(dev, req, GFP_ATOMIC);
  281. }
  282. static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
  283. {
  284. unsigned i;
  285. struct usb_request *req;
  286. if (!n)
  287. return -ENOMEM;
  288. /* queue/recycle up to N requests */
  289. i = n;
  290. list_for_each_entry(req, list, list) {
  291. if (i-- == 0)
  292. goto extra;
  293. }
  294. while (i--) {
  295. req = usb_ep_alloc_request(ep, GFP_ATOMIC);
  296. if (!req)
  297. return list_empty(list) ? -ENOMEM : 0;
  298. list_add(&req->list, list);
  299. }
  300. return 0;
  301. extra:
  302. /* free extras */
  303. for (;;) {
  304. struct list_head *next;
  305. next = req->list.next;
  306. list_del(&req->list);
  307. usb_ep_free_request(ep, req);
  308. if (next == list)
  309. break;
  310. req = container_of(next, struct usb_request, list);
  311. }
  312. return 0;
  313. }
  314. static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
  315. {
  316. int status;
  317. spin_lock(&dev->req_lock);
  318. status = prealloc(&dev->tx_reqs, link->in_ep, n);
  319. if (status < 0)
  320. goto fail;
  321. status = prealloc(&dev->rx_reqs, link->out_ep, n);
  322. if (status < 0)
  323. goto fail;
  324. goto done;
  325. fail:
  326. DBG(dev, "can't alloc requests\n");
  327. done:
  328. spin_unlock(&dev->req_lock);
  329. return status;
  330. }
  331. static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
  332. {
  333. struct usb_request *req;
  334. unsigned long flags;
  335. /* fill unused rxq slots with some skb */
  336. spin_lock_irqsave(&dev->req_lock, flags);
  337. while (!list_empty(&dev->rx_reqs)) {
  338. req = container_of(dev->rx_reqs.next,
  339. struct usb_request, list);
  340. list_del_init(&req->list);
  341. spin_unlock_irqrestore(&dev->req_lock, flags);
  342. if (rx_submit(dev, req, gfp_flags) < 0) {
  343. defer_kevent(dev, WORK_RX_MEMORY);
  344. return;
  345. }
  346. spin_lock_irqsave(&dev->req_lock, flags);
  347. }
  348. spin_unlock_irqrestore(&dev->req_lock, flags);
  349. }
  350. static void eth_work(struct work_struct *work)
  351. {
  352. struct eth_dev *dev = container_of(work, struct eth_dev, work);
  353. if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
  354. if (netif_running(dev->net))
  355. rx_fill(dev, GFP_KERNEL);
  356. }
  357. if (dev->todo)
  358. DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
  359. }
  360. static void tx_complete(struct usb_ep *ep, struct usb_request *req)
  361. {
  362. struct sk_buff *skb = req->context;
  363. struct eth_dev *dev = ep->driver_data;
  364. switch (req->status) {
  365. default:
  366. dev->net->stats.tx_errors++;
  367. VDBG(dev, "tx err %d\n", req->status);
  368. /* FALLTHROUGH */
  369. case -ECONNRESET: /* unlink */
  370. case -ESHUTDOWN: /* disconnect etc */
  371. break;
  372. case 0:
  373. dev->net->stats.tx_bytes += skb->len;
  374. }
  375. dev->net->stats.tx_packets++;
  376. spin_lock(&dev->req_lock);
  377. list_add(&req->list, &dev->tx_reqs);
  378. spin_unlock(&dev->req_lock);
  379. dev_kfree_skb_any(skb);
  380. atomic_dec(&dev->tx_qlen);
  381. if (netif_carrier_ok(dev->net))
  382. netif_wake_queue(dev->net);
  383. }
  384. static inline int is_promisc(u16 cdc_filter)
  385. {
  386. return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
  387. }
  388. static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
  389. {
  390. struct eth_dev *dev = netdev_priv(net);
  391. int length = skb->len;
  392. int retval;
  393. struct usb_request *req = NULL;
  394. unsigned long flags;
  395. struct usb_ep *in;
  396. u16 cdc_filter;
  397. spin_lock_irqsave(&dev->lock, flags);
  398. if (dev->port_usb) {
  399. in = dev->port_usb->in_ep;
  400. cdc_filter = dev->port_usb->cdc_filter;
  401. } else {
  402. in = NULL;
  403. cdc_filter = 0;
  404. }
  405. spin_unlock_irqrestore(&dev->lock, flags);
  406. if (!in) {
  407. dev_kfree_skb_any(skb);
  408. return 0;
  409. }
  410. /* apply outgoing CDC or RNDIS filters */
  411. if (!is_promisc(cdc_filter)) {
  412. u8 *dest = skb->data;
  413. if (is_multicast_ether_addr(dest)) {
  414. u16 type;
  415. /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
  416. * SET_ETHERNET_MULTICAST_FILTERS requests
  417. */
  418. if (is_broadcast_ether_addr(dest))
  419. type = USB_CDC_PACKET_TYPE_BROADCAST;
  420. else
  421. type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
  422. if (!(cdc_filter & type)) {
  423. dev_kfree_skb_any(skb);
  424. return 0;
  425. }
  426. }
  427. /* ignores USB_CDC_PACKET_TYPE_DIRECTED */
  428. }
  429. spin_lock_irqsave(&dev->req_lock, flags);
  430. /*
  431. * this freelist can be empty if an interrupt triggered disconnect()
  432. * and reconfigured the gadget (shutting down this queue) after the
  433. * network stack decided to xmit but before we got the spinlock.
  434. */
  435. if (list_empty(&dev->tx_reqs)) {
  436. spin_unlock_irqrestore(&dev->req_lock, flags);
  437. return NETDEV_TX_BUSY;
  438. }
  439. req = container_of(dev->tx_reqs.next, struct usb_request, list);
  440. list_del(&req->list);
  441. /* temporarily stop TX queue when the freelist empties */
  442. if (list_empty(&dev->tx_reqs))
  443. netif_stop_queue(net);
  444. spin_unlock_irqrestore(&dev->req_lock, flags);
  445. /* no buffer copies needed, unless the network stack did it
  446. * or the hardware can't use skb buffers.
  447. * or there's not enough space for extra headers we need
  448. */
  449. if (dev->wrap) {
  450. struct sk_buff *skb_new;
  451. skb_new = dev->wrap(skb);
  452. if (!skb_new)
  453. goto drop;
  454. dev_kfree_skb_any(skb);
  455. skb = skb_new;
  456. length = skb->len;
  457. }
  458. req->buf = skb->data;
  459. req->context = skb;
  460. req->complete = tx_complete;
  461. /* use zlp framing on tx for strict CDC-Ether conformance,
  462. * though any robust network rx path ignores extra padding.
  463. * and some hardware doesn't like to write zlps.
  464. */
  465. req->zero = 1;
  466. if (!dev->zlp && (length % in->maxpacket) == 0)
  467. length++;
  468. req->length = length;
  469. /* throttle highspeed IRQ rate back slightly */
  470. if (gadget_is_dualspeed(dev->gadget))
  471. req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH)
  472. ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
  473. : 0;
  474. retval = usb_ep_queue(in, req, GFP_ATOMIC);
  475. switch (retval) {
  476. default:
  477. DBG(dev, "tx queue err %d\n", retval);
  478. break;
  479. case 0:
  480. net->trans_start = jiffies;
  481. atomic_inc(&dev->tx_qlen);
  482. }
  483. if (retval) {
  484. drop:
  485. dev->net->stats.tx_dropped++;
  486. dev_kfree_skb_any(skb);
  487. spin_lock_irqsave(&dev->req_lock, flags);
  488. if (list_empty(&dev->tx_reqs))
  489. netif_start_queue(net);
  490. list_add(&req->list, &dev->tx_reqs);
  491. spin_unlock_irqrestore(&dev->req_lock, flags);
  492. }
  493. return 0;
  494. }
  495. /*-------------------------------------------------------------------------*/
  496. static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
  497. {
  498. DBG(dev, "%s\n", __func__);
  499. /* fill the rx queue */
  500. rx_fill(dev, gfp_flags);
  501. /* and open the tx floodgates */
  502. atomic_set(&dev->tx_qlen, 0);
  503. netif_wake_queue(dev->net);
  504. }
  505. static int eth_open(struct net_device *net)
  506. {
  507. struct eth_dev *dev = netdev_priv(net);
  508. struct gether *link;
  509. DBG(dev, "%s\n", __func__);
  510. if (netif_carrier_ok(dev->net))
  511. eth_start(dev, GFP_KERNEL);
  512. spin_lock_irq(&dev->lock);
  513. link = dev->port_usb;
  514. if (link && link->open)
  515. link->open(link);
  516. spin_unlock_irq(&dev->lock);
  517. return 0;
  518. }
  519. static int eth_stop(struct net_device *net)
  520. {
  521. struct eth_dev *dev = netdev_priv(net);
  522. unsigned long flags;
  523. VDBG(dev, "%s\n", __func__);
  524. netif_stop_queue(net);
  525. DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
  526. dev->net->stats.rx_packets, dev->net->stats.tx_packets,
  527. dev->net->stats.rx_errors, dev->net->stats.tx_errors
  528. );
  529. /* ensure there are no more active requests */
  530. spin_lock_irqsave(&dev->lock, flags);
  531. if (dev->port_usb) {
  532. struct gether *link = dev->port_usb;
  533. if (link->close)
  534. link->close(link);
  535. /* NOTE: we have no abort-queue primitive we could use
  536. * to cancel all pending I/O. Instead, we disable then
  537. * reenable the endpoints ... this idiom may leave toggle
  538. * wrong, but that's a self-correcting error.
  539. *
  540. * REVISIT: we *COULD* just let the transfers complete at
  541. * their own pace; the network stack can handle old packets.
  542. * For the moment we leave this here, since it works.
  543. */
  544. usb_ep_disable(link->in_ep);
  545. usb_ep_disable(link->out_ep);
  546. if (netif_carrier_ok(net)) {
  547. DBG(dev, "host still using in/out endpoints\n");
  548. usb_ep_enable(link->in_ep, link->in);
  549. usb_ep_enable(link->out_ep, link->out);
  550. }
  551. }
  552. spin_unlock_irqrestore(&dev->lock, flags);
  553. return 0;
  554. }
  555. /*-------------------------------------------------------------------------*/
  556. /* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
  557. static char *dev_addr;
  558. module_param(dev_addr, charp, S_IRUGO);
  559. MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
  560. /* this address is invisible to ifconfig */
  561. static char *host_addr;
  562. module_param(host_addr, charp, S_IRUGO);
  563. MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
  564. static u8 __init nibble(unsigned char c)
  565. {
  566. if (isdigit(c))
  567. return c - '0';
  568. c = toupper(c);
  569. if (isxdigit(c))
  570. return 10 + c - 'A';
  571. return 0;
  572. }
  573. static int __init get_ether_addr(const char *str, u8 *dev_addr)
  574. {
  575. if (str) {
  576. unsigned i;
  577. for (i = 0; i < 6; i++) {
  578. unsigned char num;
  579. if ((*str == '.') || (*str == ':'))
  580. str++;
  581. num = nibble(*str++) << 4;
  582. num |= (nibble(*str++));
  583. dev_addr [i] = num;
  584. }
  585. if (is_valid_ether_addr(dev_addr))
  586. return 0;
  587. }
  588. random_ether_addr(dev_addr);
  589. return 1;
  590. }
  591. static struct eth_dev *the_dev;
  592. static const struct net_device_ops eth_netdev_ops = {
  593. .ndo_open = eth_open,
  594. .ndo_stop = eth_stop,
  595. .ndo_start_xmit = eth_start_xmit,
  596. .ndo_change_mtu = ueth_change_mtu,
  597. .ndo_set_mac_address = eth_mac_addr,
  598. .ndo_validate_addr = eth_validate_addr,
  599. };
  600. /**
  601. * gether_setup - initialize one ethernet-over-usb link
  602. * @g: gadget to associated with these links
  603. * @ethaddr: NULL, or a buffer in which the ethernet address of the
  604. * host side of the link is recorded
  605. * Context: may sleep
  606. *
  607. * This sets up the single network link that may be exported by a
  608. * gadget driver using this framework. The link layer addresses are
  609. * set up using module parameters.
  610. *
  611. * Returns negative errno, or zero on success
  612. */
  613. int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
  614. {
  615. struct eth_dev *dev;
  616. struct net_device *net;
  617. int status;
  618. if (the_dev)
  619. return -EBUSY;
  620. net = alloc_etherdev(sizeof *dev);
  621. if (!net)
  622. return -ENOMEM;
  623. dev = netdev_priv(net);
  624. spin_lock_init(&dev->lock);
  625. spin_lock_init(&dev->req_lock);
  626. INIT_WORK(&dev->work, eth_work);
  627. INIT_LIST_HEAD(&dev->tx_reqs);
  628. INIT_LIST_HEAD(&dev->rx_reqs);
  629. /* network device setup */
  630. dev->net = net;
  631. strcpy(net->name, "usb%d");
  632. if (get_ether_addr(dev_addr, net->dev_addr))
  633. dev_warn(&g->dev,
  634. "using random %s ethernet address\n", "self");
  635. if (get_ether_addr(host_addr, dev->host_mac))
  636. dev_warn(&g->dev,
  637. "using random %s ethernet address\n", "host");
  638. if (ethaddr)
  639. memcpy(ethaddr, dev->host_mac, ETH_ALEN);
  640. net->netdev_ops = &eth_netdev_ops;
  641. SET_ETHTOOL_OPS(net, &ops);
  642. /* two kinds of host-initiated state changes:
  643. * - iff DATA transfer is active, carrier is "on"
  644. * - tx queueing enabled if open *and* carrier is "on"
  645. */
  646. netif_stop_queue(net);
  647. netif_carrier_off(net);
  648. dev->gadget = g;
  649. SET_NETDEV_DEV(net, &g->dev);
  650. status = register_netdev(net);
  651. if (status < 0) {
  652. dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
  653. free_netdev(net);
  654. } else {
  655. INFO(dev, "MAC %pM\n", net->dev_addr);
  656. INFO(dev, "HOST MAC %pM\n", dev->host_mac);
  657. the_dev = dev;
  658. }
  659. return status;
  660. }
  661. /**
  662. * gether_cleanup - remove Ethernet-over-USB device
  663. * Context: may sleep
  664. *
  665. * This is called to free all resources allocated by @gether_setup().
  666. */
  667. void gether_cleanup(void)
  668. {
  669. if (!the_dev)
  670. return;
  671. unregister_netdev(the_dev->net);
  672. free_netdev(the_dev->net);
  673. /* assuming we used keventd, it must quiesce too */
  674. flush_scheduled_work();
  675. the_dev = NULL;
  676. }
  677. /**
  678. * gether_connect - notify network layer that USB link is active
  679. * @link: the USB link, set up with endpoints, descriptors matching
  680. * current device speed, and any framing wrapper(s) set up.
  681. * Context: irqs blocked
  682. *
  683. * This is called to activate endpoints and let the network layer know
  684. * the connection is active ("carrier detect"). It may cause the I/O
  685. * queues to open and start letting network packets flow, but will in
  686. * any case activate the endpoints so that they respond properly to the
  687. * USB host.
  688. *
  689. * Verify net_device pointer returned using IS_ERR(). If it doesn't
  690. * indicate some error code (negative errno), ep->driver_data values
  691. * have been overwritten.
  692. */
  693. struct net_device *gether_connect(struct gether *link)
  694. {
  695. struct eth_dev *dev = the_dev;
  696. int result = 0;
  697. if (!dev)
  698. return ERR_PTR(-EINVAL);
  699. link->in_ep->driver_data = dev;
  700. result = usb_ep_enable(link->in_ep, link->in);
  701. if (result != 0) {
  702. DBG(dev, "enable %s --> %d\n",
  703. link->in_ep->name, result);
  704. goto fail0;
  705. }
  706. link->out_ep->driver_data = dev;
  707. result = usb_ep_enable(link->out_ep, link->out);
  708. if (result != 0) {
  709. DBG(dev, "enable %s --> %d\n",
  710. link->out_ep->name, result);
  711. goto fail1;
  712. }
  713. if (result == 0)
  714. result = alloc_requests(dev, link, qlen(dev->gadget));
  715. if (result == 0) {
  716. dev->zlp = link->is_zlp_ok;
  717. DBG(dev, "qlen %d\n", qlen(dev->gadget));
  718. dev->header_len = link->header_len;
  719. dev->unwrap = link->unwrap;
  720. dev->wrap = link->wrap;
  721. spin_lock(&dev->lock);
  722. dev->port_usb = link;
  723. link->ioport = dev;
  724. if (netif_running(dev->net)) {
  725. if (link->open)
  726. link->open(link);
  727. } else {
  728. if (link->close)
  729. link->close(link);
  730. }
  731. spin_unlock(&dev->lock);
  732. netif_carrier_on(dev->net);
  733. if (netif_running(dev->net))
  734. eth_start(dev, GFP_ATOMIC);
  735. /* on error, disable any endpoints */
  736. } else {
  737. (void) usb_ep_disable(link->out_ep);
  738. fail1:
  739. (void) usb_ep_disable(link->in_ep);
  740. }
  741. fail0:
  742. /* caller is responsible for cleanup on error */
  743. if (result < 0)
  744. return ERR_PTR(result);
  745. return dev->net;
  746. }
  747. /**
  748. * gether_disconnect - notify network layer that USB link is inactive
  749. * @link: the USB link, on which gether_connect() was called
  750. * Context: irqs blocked
  751. *
  752. * This is called to deactivate endpoints and let the network layer know
  753. * the connection went inactive ("no carrier").
  754. *
  755. * On return, the state is as if gether_connect() had never been called.
  756. * The endpoints are inactive, and accordingly without active USB I/O.
  757. * Pointers to endpoint descriptors and endpoint private data are nulled.
  758. */
  759. void gether_disconnect(struct gether *link)
  760. {
  761. struct eth_dev *dev = link->ioport;
  762. struct usb_request *req;
  763. WARN_ON(!dev);
  764. if (!dev)
  765. return;
  766. DBG(dev, "%s\n", __func__);
  767. netif_stop_queue(dev->net);
  768. netif_carrier_off(dev->net);
  769. /* disable endpoints, forcing (synchronous) completion
  770. * of all pending i/o. then free the request objects
  771. * and forget about the endpoints.
  772. */
  773. usb_ep_disable(link->in_ep);
  774. spin_lock(&dev->req_lock);
  775. while (!list_empty(&dev->tx_reqs)) {
  776. req = container_of(dev->tx_reqs.next,
  777. struct usb_request, list);
  778. list_del(&req->list);
  779. spin_unlock(&dev->req_lock);
  780. usb_ep_free_request(link->in_ep, req);
  781. spin_lock(&dev->req_lock);
  782. }
  783. spin_unlock(&dev->req_lock);
  784. link->in_ep->driver_data = NULL;
  785. link->in = NULL;
  786. usb_ep_disable(link->out_ep);
  787. spin_lock(&dev->req_lock);
  788. while (!list_empty(&dev->rx_reqs)) {
  789. req = container_of(dev->rx_reqs.next,
  790. struct usb_request, list);
  791. list_del(&req->list);
  792. spin_unlock(&dev->req_lock);
  793. usb_ep_free_request(link->out_ep, req);
  794. spin_lock(&dev->req_lock);
  795. }
  796. spin_unlock(&dev->req_lock);
  797. link->out_ep->driver_data = NULL;
  798. link->out = NULL;
  799. /* finish forgetting about this USB link episode */
  800. dev->header_len = 0;
  801. dev->unwrap = NULL;
  802. dev->wrap = NULL;
  803. spin_lock(&dev->lock);
  804. dev->port_usb = NULL;
  805. link->ioport = NULL;
  806. spin_unlock(&dev->lock);
  807. }