tx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. /*
  2. * WUSB Wire Adapter: WLP interface
  3. * Deal with TX (massaging data to transmit, handling it)
  4. *
  5. * Copyright (C) 2005-2006 Intel Corporation
  6. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. *
  22. *
  23. * Transmission engine. Get an skb, create from that a WLP transmit
  24. * context, add a WLP TX header (which we keep prefilled in the
  25. * device's instance), fill out the target-specific fields and
  26. * fire it.
  27. *
  28. * ROADMAP:
  29. *
  30. * Entry points:
  31. *
  32. * i1480u_tx_release(): called by i1480u_disconnect() to release
  33. * pending tx contexts.
  34. *
  35. * i1480u_tx_cb(): callback for TX contexts (USB URBs)
  36. * i1480u_tx_destroy():
  37. *
  38. * i1480u_tx_timeout(): called for timeout handling from the
  39. * network stack.
  40. *
  41. * i1480u_hard_start_xmit(): called for transmitting an skb from
  42. * the network stack. Will interact with WLP
  43. * substack to verify and prepare frame.
  44. * i1480u_xmit_frame(): actual transmission on hardware
  45. *
  46. * i1480u_tx_create() Creates TX context
  47. * i1480u_tx_create_1() For packets in 1 fragment
  48. * i1480u_tx_create_n() For packets in >1 fragments
  49. *
  50. * TODO:
  51. *
  52. * - FIXME: rewrite using usb_sg_*(), add asynch support to
  53. * usb_sg_*(). It might not make too much sense as most of
  54. * the times the MTU will be smaller than one page...
  55. */
  56. #include <linux/slab.h>
  57. #include "i1480u-wlp.h"
  58. enum {
  59. /* This is only for Next and Last TX packets */
  60. i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
  61. - sizeof(struct untd_hdr_rst),
  62. };
  63. /* Free resources allocated to a i1480u tx context. */
  64. static
  65. void i1480u_tx_free(struct i1480u_tx *wtx)
  66. {
  67. kfree(wtx->buf);
  68. if (wtx->skb)
  69. dev_kfree_skb_irq(wtx->skb);
  70. usb_free_urb(wtx->urb);
  71. kfree(wtx);
  72. }
  73. static
  74. void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
  75. {
  76. unsigned long flags;
  77. spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
  78. list_del(&wtx->list_node);
  79. i1480u_tx_free(wtx);
  80. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  81. }
  82. static
  83. void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
  84. {
  85. unsigned long flags;
  86. struct i1480u_tx *wtx, *next;
  87. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  88. list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
  89. usb_unlink_urb(wtx->urb);
  90. }
  91. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  92. }
  93. /*
  94. * Callback for a completed tx USB URB.
  95. *
  96. * TODO:
  97. *
  98. * - FIXME: recover errors more gracefully
  99. * - FIXME: handle NAKs (I dont think they come here) for flow ctl
  100. */
  101. static
  102. void i1480u_tx_cb(struct urb *urb)
  103. {
  104. struct i1480u_tx *wtx = urb->context;
  105. struct i1480u *i1480u = wtx->i1480u;
  106. struct net_device *net_dev = i1480u->net_dev;
  107. struct device *dev = &i1480u->usb_iface->dev;
  108. unsigned long flags;
  109. switch (urb->status) {
  110. case 0:
  111. spin_lock_irqsave(&i1480u->lock, flags);
  112. net_dev->stats.tx_packets++;
  113. net_dev->stats.tx_bytes += urb->actual_length;
  114. spin_unlock_irqrestore(&i1480u->lock, flags);
  115. break;
  116. case -ECONNRESET: /* Not an error, but a controlled situation; */
  117. case -ENOENT: /* (we killed the URB)...so, no broadcast */
  118. dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
  119. netif_stop_queue(net_dev);
  120. break;
  121. case -ESHUTDOWN: /* going away! */
  122. dev_dbg(dev, "notif endp: down %d\n", urb->status);
  123. netif_stop_queue(net_dev);
  124. break;
  125. default:
  126. dev_err(dev, "TX: unknown URB status %d\n", urb->status);
  127. if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
  128. EDC_ERROR_TIMEFRAME)) {
  129. dev_err(dev, "TX: max acceptable errors exceeded."
  130. "Reset device.\n");
  131. netif_stop_queue(net_dev);
  132. i1480u_tx_unlink_urbs(i1480u);
  133. wlp_reset_all(&i1480u->wlp);
  134. }
  135. break;
  136. }
  137. i1480u_tx_destroy(i1480u, wtx);
  138. if (atomic_dec_return(&i1480u->tx_inflight.count)
  139. <= i1480u->tx_inflight.threshold
  140. && netif_queue_stopped(net_dev)
  141. && i1480u->tx_inflight.threshold != 0) {
  142. netif_start_queue(net_dev);
  143. atomic_inc(&i1480u->tx_inflight.restart_count);
  144. }
  145. return;
  146. }
  147. /*
  148. * Given a buffer that doesn't fit in a single fragment, create an
  149. * scatter/gather structure for delivery to the USB pipe.
  150. *
  151. * Implements functionality of i1480u_tx_create().
  152. *
  153. * @wtx: tx descriptor
  154. * @skb: skb to send
  155. * @gfp_mask: gfp allocation mask
  156. * @returns: Pointer to @wtx if ok, NULL on error.
  157. *
  158. * Sorry, TOO LONG a function, but breaking it up is kind of hard
  159. *
  160. * This will break the buffer in chunks smaller than
  161. * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
  162. * to each:
  163. *
  164. * 1st header \
  165. * i1480 tx header | fragment 1
  166. * fragment data /
  167. * nxt header \ fragment 2
  168. * fragment data /
  169. * ..
  170. * ..
  171. * last header \ fragment 3
  172. * last fragment data /
  173. *
  174. * This does not fill the i1480 TX header, it is left up to the
  175. * caller to do that; you can get it from @wtx->wlp_tx_hdr.
  176. *
  177. * This function consumes the skb unless there is an error.
  178. */
  179. static
  180. int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
  181. gfp_t gfp_mask)
  182. {
  183. int result;
  184. void *pl;
  185. size_t pl_size;
  186. void *pl_itr, *buf_itr;
  187. size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
  188. struct untd_hdr_1st *untd_hdr_1st;
  189. struct wlp_tx_hdr *wlp_tx_hdr;
  190. struct untd_hdr_rst *untd_hdr_rst;
  191. wtx->skb = NULL;
  192. pl = skb->data;
  193. pl_itr = pl;
  194. pl_size = skb->len;
  195. pl_size_left = pl_size; /* payload size */
  196. /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
  197. * the headers */
  198. pl_size_1st = i1480u_MAX_FRG_SIZE
  199. - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
  200. BUG_ON(pl_size_1st > pl_size);
  201. pl_size_left -= pl_size_1st;
  202. /* The rest have an smaller header (no i1480 TX header). We
  203. * need to break up the payload in blocks smaller than
  204. * i1480u_MAX_PL_SIZE (payload excluding header). */
  205. frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
  206. /* Allocate space for the new buffer. In this new buffer we'll
  207. * place the headers followed by the data fragment, headers,
  208. * data fragments, etc..
  209. */
  210. result = -ENOMEM;
  211. wtx->buf_size = sizeof(*untd_hdr_1st)
  212. + sizeof(*wlp_tx_hdr)
  213. + frgs * sizeof(*untd_hdr_rst)
  214. + pl_size;
  215. wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
  216. if (wtx->buf == NULL)
  217. goto error_buf_alloc;
  218. buf_itr = wtx->buf; /* We got the space, let's fill it up */
  219. /* Fill 1st fragment */
  220. untd_hdr_1st = buf_itr;
  221. buf_itr += sizeof(*untd_hdr_1st);
  222. untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
  223. untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
  224. untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
  225. untd_hdr_1st->fragment_len =
  226. cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
  227. memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
  228. /* Set up i1480 header info */
  229. wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
  230. buf_itr += sizeof(*wlp_tx_hdr);
  231. /* Copy the first fragment */
  232. memcpy(buf_itr, pl_itr, pl_size_1st);
  233. pl_itr += pl_size_1st;
  234. buf_itr += pl_size_1st;
  235. /* Now do each remaining fragment */
  236. result = -EINVAL;
  237. while (pl_size_left > 0) {
  238. if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
  239. > wtx->buf_size) {
  240. printk(KERN_ERR "BUG: no space for header\n");
  241. goto error_bug;
  242. }
  243. untd_hdr_rst = buf_itr;
  244. buf_itr += sizeof(*untd_hdr_rst);
  245. if (pl_size_left > i1480u_MAX_PL_SIZE) {
  246. frg_pl_size = i1480u_MAX_PL_SIZE;
  247. untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
  248. } else {
  249. frg_pl_size = pl_size_left;
  250. untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
  251. }
  252. untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
  253. untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
  254. untd_hdr_rst->padding = 0;
  255. if (buf_itr + frg_pl_size - wtx->buf
  256. > wtx->buf_size) {
  257. printk(KERN_ERR "BUG: no space for payload\n");
  258. goto error_bug;
  259. }
  260. memcpy(buf_itr, pl_itr, frg_pl_size);
  261. buf_itr += frg_pl_size;
  262. pl_itr += frg_pl_size;
  263. pl_size_left -= frg_pl_size;
  264. }
  265. dev_kfree_skb_irq(skb);
  266. return 0;
  267. error_bug:
  268. printk(KERN_ERR
  269. "BUG: skb %u bytes\n"
  270. "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
  271. "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
  272. skb->len,
  273. frg_pl_size, i1480u_MAX_FRG_SIZE,
  274. buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
  275. kfree(wtx->buf);
  276. error_buf_alloc:
  277. return result;
  278. }
  279. /*
  280. * Given a buffer that fits in a single fragment, fill out a @wtx
  281. * struct for transmitting it down the USB pipe.
  282. *
  283. * Uses the fact that we have space reserved in front of the skbuff
  284. * for hardware headers :]
  285. *
  286. * This does not fill the i1480 TX header, it is left up to the
  287. * caller to do that; you can get it from @wtx->wlp_tx_hdr.
  288. *
  289. * @pl: pointer to payload data
  290. * @pl_size: size of the payuload
  291. *
  292. * This function does not consume the @skb.
  293. */
  294. static
  295. int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
  296. gfp_t gfp_mask)
  297. {
  298. struct untd_hdr_cmp *untd_hdr_cmp;
  299. struct wlp_tx_hdr *wlp_tx_hdr;
  300. wtx->buf = NULL;
  301. wtx->skb = skb;
  302. BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
  303. wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
  304. wtx->wlp_tx_hdr = wlp_tx_hdr;
  305. BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
  306. untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
  307. untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
  308. untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
  309. untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
  310. untd_hdr_cmp->padding = 0;
  311. return 0;
  312. }
  313. /*
  314. * Given a skb to transmit, massage it to become palatable for the TX pipe
  315. *
  316. * This will break the buffer in chunks smaller than
  317. * i1480u_MAX_FRG_SIZE and add proper headers to each.
  318. *
  319. * 1st header \
  320. * i1480 tx header | fragment 1
  321. * fragment data /
  322. * nxt header \ fragment 2
  323. * fragment data /
  324. * ..
  325. * ..
  326. * last header \ fragment 3
  327. * last fragment data /
  328. *
  329. * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
  330. *
  331. * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
  332. * following is composed:
  333. *
  334. * complete header \
  335. * i1480 tx header | single fragment
  336. * packet data /
  337. *
  338. * We were going to use s/g support, but because the interface is
  339. * synch and at the end there is plenty of overhead to do it, it
  340. * didn't seem that worth for data that is going to be smaller than
  341. * one page.
  342. */
  343. static
  344. struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
  345. struct sk_buff *skb, gfp_t gfp_mask)
  346. {
  347. int result;
  348. struct usb_endpoint_descriptor *epd;
  349. int usb_pipe;
  350. unsigned long flags;
  351. struct i1480u_tx *wtx;
  352. const size_t pl_max_size =
  353. i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
  354. - sizeof(struct wlp_tx_hdr);
  355. wtx = kmalloc(sizeof(*wtx), gfp_mask);
  356. if (wtx == NULL)
  357. goto error_wtx_alloc;
  358. wtx->urb = usb_alloc_urb(0, gfp_mask);
  359. if (wtx->urb == NULL)
  360. goto error_urb_alloc;
  361. epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
  362. usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
  363. /* Fits in a single complete packet or need to split? */
  364. if (skb->len > pl_max_size) {
  365. result = i1480u_tx_create_n(wtx, skb, gfp_mask);
  366. if (result < 0)
  367. goto error_create;
  368. usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
  369. wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
  370. } else {
  371. result = i1480u_tx_create_1(wtx, skb, gfp_mask);
  372. if (result < 0)
  373. goto error_create;
  374. usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
  375. skb->data, skb->len, i1480u_tx_cb, wtx);
  376. }
  377. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  378. list_add(&wtx->list_node, &i1480u->tx_list);
  379. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  380. return wtx;
  381. error_create:
  382. kfree(wtx->urb);
  383. error_urb_alloc:
  384. kfree(wtx);
  385. error_wtx_alloc:
  386. return NULL;
  387. }
  388. /*
  389. * Actual fragmentation and transmission of frame
  390. *
  391. * @wlp: WLP substack data structure
  392. * @skb: To be transmitted
  393. * @dst: Device address of destination
  394. * @returns: 0 on success, <0 on failure
  395. *
  396. * This function can also be called directly (not just from
  397. * hard_start_xmit), so we also check here if the interface is up before
  398. * taking sending anything.
  399. */
  400. int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
  401. struct uwb_dev_addr *dst)
  402. {
  403. int result = -ENXIO;
  404. struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
  405. struct device *dev = &i1480u->usb_iface->dev;
  406. struct net_device *net_dev = i1480u->net_dev;
  407. struct i1480u_tx *wtx;
  408. struct wlp_tx_hdr *wlp_tx_hdr;
  409. static unsigned char dev_bcast[2] = { 0xff, 0xff };
  410. BUG_ON(i1480u->wlp.rc == NULL);
  411. if ((net_dev->flags & IFF_UP) == 0)
  412. goto out;
  413. result = -EBUSY;
  414. if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
  415. netif_stop_queue(net_dev);
  416. goto error_max_inflight;
  417. }
  418. result = -ENOMEM;
  419. wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
  420. if (unlikely(wtx == NULL)) {
  421. if (printk_ratelimit())
  422. dev_err(dev, "TX: no memory for WLP TX URB,"
  423. "dropping packet (in flight %d)\n",
  424. atomic_read(&i1480u->tx_inflight.count));
  425. netif_stop_queue(net_dev);
  426. goto error_wtx_alloc;
  427. }
  428. wtx->i1480u = i1480u;
  429. /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
  430. * locking. We do so because they are kind of orthogonal to
  431. * each other (and thus not changed in an atomic batch).
  432. * The ETH header is right after the WLP TX header. */
  433. wlp_tx_hdr = wtx->wlp_tx_hdr;
  434. *wlp_tx_hdr = i1480u->options.def_tx_hdr;
  435. wlp_tx_hdr->dstaddr = *dst;
  436. if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
  437. && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
  438. /*Broadcast message directed to DRP host. Send as best effort
  439. * on PCA. */
  440. wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
  441. }
  442. result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
  443. if (result < 0) {
  444. dev_err(dev, "TX: cannot submit URB: %d\n", result);
  445. /* We leave the freeing of skb to calling function */
  446. wtx->skb = NULL;
  447. goto error_tx_urb_submit;
  448. }
  449. atomic_inc(&i1480u->tx_inflight.count);
  450. net_dev->trans_start = jiffies;
  451. return result;
  452. error_tx_urb_submit:
  453. i1480u_tx_destroy(i1480u, wtx);
  454. error_wtx_alloc:
  455. error_max_inflight:
  456. out:
  457. return result;
  458. }
  459. /*
  460. * Transmit an skb Called when an skbuf has to be transmitted
  461. *
  462. * The skb is first passed to WLP substack to ensure this is a valid
  463. * frame. If valid the device address of destination will be filled and
  464. * the WLP header prepended to the skb. If this step fails we fake sending
  465. * the frame, if we return an error the network stack will just keep trying.
  466. *
  467. * Broadcast frames inside a WSS needs to be treated special as multicast is
  468. * not supported. A broadcast frame is sent as unicast to each member of the
  469. * WSS - this is done by the WLP substack when it finds a broadcast frame.
  470. * So, we test if the WLP substack took over the skb and only transmit it
  471. * if it has not (been taken over).
  472. *
  473. * @net_dev->xmit_lock is held
  474. */
  475. netdev_tx_t i1480u_hard_start_xmit(struct sk_buff *skb,
  476. struct net_device *net_dev)
  477. {
  478. int result;
  479. struct i1480u *i1480u = netdev_priv(net_dev);
  480. struct device *dev = &i1480u->usb_iface->dev;
  481. struct uwb_dev_addr dst;
  482. if ((net_dev->flags & IFF_UP) == 0)
  483. goto error;
  484. result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
  485. if (result < 0) {
  486. dev_err(dev, "WLP verification of TX frame failed (%d). "
  487. "Dropping packet.\n", result);
  488. goto error;
  489. } else if (result == 1) {
  490. /* trans_start time will be set when WLP actually transmits
  491. * the frame */
  492. goto out;
  493. }
  494. result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
  495. if (result < 0) {
  496. dev_err(dev, "Frame TX failed (%d).\n", result);
  497. goto error;
  498. }
  499. return NETDEV_TX_OK;
  500. error:
  501. dev_kfree_skb_any(skb);
  502. net_dev->stats.tx_dropped++;
  503. out:
  504. return NETDEV_TX_OK;
  505. }
  506. /*
  507. * Called when a pkt transmission doesn't complete in a reasonable period
  508. * Device reset may sleep - do it outside of interrupt context (delayed)
  509. */
  510. void i1480u_tx_timeout(struct net_device *net_dev)
  511. {
  512. struct i1480u *i1480u = netdev_priv(net_dev);
  513. wlp_reset_all(&i1480u->wlp);
  514. }
  515. void i1480u_tx_release(struct i1480u *i1480u)
  516. {
  517. unsigned long flags;
  518. struct i1480u_tx *wtx, *next;
  519. int count = 0, empty;
  520. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  521. list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
  522. count++;
  523. usb_unlink_urb(wtx->urb);
  524. }
  525. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  526. count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
  527. /*
  528. * We don't like this sollution too much (dirty as it is), but
  529. * it is cheaper than putting a refcount on each i1480u_tx and
  530. * i1480uting for all of them to go away...
  531. *
  532. * Called when no more packets can be added to tx_list
  533. * so can i1480ut for it to be empty.
  534. */
  535. while (1) {
  536. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  537. empty = list_empty(&i1480u->tx_list);
  538. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  539. if (empty)
  540. break;
  541. count--;
  542. BUG_ON(count == 0);
  543. msleep(20);
  544. }
  545. }