tx.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632
  1. /*
  2. * WUSB Wire Adapter: WLP interface
  3. * Deal with TX (massaging data to transmit, handling it)
  4. *
  5. * Copyright (C) 2005-2006 Intel Corporation
  6. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. *
  22. *
  23. * Transmission engine. Get an skb, create from that a WLP transmit
  24. * context, add a WLP TX header (which we keep prefilled in the
  25. * device's instance), fill out the target-specific fields and
  26. * fire it.
  27. *
  28. * ROADMAP:
  29. *
  30. * Entry points:
  31. *
  32. * i1480u_tx_release(): called by i1480u_disconnect() to release
  33. * pending tx contexts.
  34. *
  35. * i1480u_tx_cb(): callback for TX contexts (USB URBs)
  36. * i1480u_tx_destroy():
  37. *
  38. * i1480u_tx_timeout(): called for timeout handling from the
  39. * network stack.
  40. *
  41. * i1480u_hard_start_xmit(): called for transmitting an skb from
  42. * the network stack. Will interact with WLP
  43. * substack to verify and prepare frame.
  44. * i1480u_xmit_frame(): actual transmission on hardware
  45. *
  46. * i1480u_tx_create() Creates TX context
  47. * i1480u_tx_create_1() For packets in 1 fragment
  48. * i1480u_tx_create_n() For packets in >1 fragments
  49. *
  50. * TODO:
  51. *
  52. * - FIXME: rewrite using usb_sg_*(), add asynch support to
  53. * usb_sg_*(). It might not make too much sense as most of
  54. * the times the MTU will be smaller than one page...
  55. */
  56. #include "i1480u-wlp.h"
  57. #define D_LOCAL 5
  58. #include <linux/uwb/debug.h>
  59. enum {
  60. /* This is only for Next and Last TX packets */
  61. i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
  62. - sizeof(struct untd_hdr_rst),
  63. };
  64. /** Free resources allocated to a i1480u tx context. */
  65. static
  66. void i1480u_tx_free(struct i1480u_tx *wtx)
  67. {
  68. kfree(wtx->buf);
  69. if (wtx->skb)
  70. dev_kfree_skb_irq(wtx->skb);
  71. usb_free_urb(wtx->urb);
  72. kfree(wtx);
  73. }
  74. static
  75. void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
  76. {
  77. unsigned long flags;
  78. spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
  79. list_del(&wtx->list_node);
  80. i1480u_tx_free(wtx);
  81. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  82. }
  83. static
  84. void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
  85. {
  86. unsigned long flags;
  87. struct i1480u_tx *wtx, *next;
  88. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  89. list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
  90. usb_unlink_urb(wtx->urb);
  91. }
  92. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  93. }
  94. /**
  95. * Callback for a completed tx USB URB.
  96. *
  97. * TODO:
  98. *
  99. * - FIXME: recover errors more gracefully
  100. * - FIXME: handle NAKs (I dont think they come here) for flow ctl
  101. */
  102. static
  103. void i1480u_tx_cb(struct urb *urb)
  104. {
  105. struct i1480u_tx *wtx = urb->context;
  106. struct i1480u *i1480u = wtx->i1480u;
  107. struct net_device *net_dev = i1480u->net_dev;
  108. struct device *dev = &i1480u->usb_iface->dev;
  109. unsigned long flags;
  110. switch (urb->status) {
  111. case 0:
  112. spin_lock_irqsave(&i1480u->lock, flags);
  113. i1480u->stats.tx_packets++;
  114. i1480u->stats.tx_bytes += urb->actual_length;
  115. spin_unlock_irqrestore(&i1480u->lock, flags);
  116. break;
  117. case -ECONNRESET: /* Not an error, but a controlled situation; */
  118. case -ENOENT: /* (we killed the URB)...so, no broadcast */
  119. dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
  120. netif_stop_queue(net_dev);
  121. break;
  122. case -ESHUTDOWN: /* going away! */
  123. dev_dbg(dev, "notif endp: down %d\n", urb->status);
  124. netif_stop_queue(net_dev);
  125. break;
  126. default:
  127. dev_err(dev, "TX: unknown URB status %d\n", urb->status);
  128. if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
  129. EDC_ERROR_TIMEFRAME)) {
  130. dev_err(dev, "TX: max acceptable errors exceeded."
  131. "Reset device.\n");
  132. netif_stop_queue(net_dev);
  133. i1480u_tx_unlink_urbs(i1480u);
  134. wlp_reset_all(&i1480u->wlp);
  135. }
  136. break;
  137. }
  138. i1480u_tx_destroy(i1480u, wtx);
  139. if (atomic_dec_return(&i1480u->tx_inflight.count)
  140. <= i1480u->tx_inflight.threshold
  141. && netif_queue_stopped(net_dev)
  142. && i1480u->tx_inflight.threshold != 0) {
  143. if (d_test(2) && printk_ratelimit())
  144. d_printf(2, dev, "Restart queue. \n");
  145. netif_start_queue(net_dev);
  146. atomic_inc(&i1480u->tx_inflight.restart_count);
  147. }
  148. return;
  149. }
  150. /**
  151. * Given a buffer that doesn't fit in a single fragment, create an
  152. * scatter/gather structure for delivery to the USB pipe.
  153. *
  154. * Implements functionality of i1480u_tx_create().
  155. *
  156. * @wtx: tx descriptor
  157. * @skb: skb to send
  158. * @gfp_mask: gfp allocation mask
  159. * @returns: Pointer to @wtx if ok, NULL on error.
  160. *
  161. * Sorry, TOO LONG a function, but breaking it up is kind of hard
  162. *
  163. * This will break the buffer in chunks smaller than
  164. * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
  165. * to each:
  166. *
  167. * 1st header \
  168. * i1480 tx header | fragment 1
  169. * fragment data /
  170. * nxt header \ fragment 2
  171. * fragment data /
  172. * ..
  173. * ..
  174. * last header \ fragment 3
  175. * last fragment data /
  176. *
  177. * This does not fill the i1480 TX header, it is left up to the
  178. * caller to do that; you can get it from @wtx->wlp_tx_hdr.
  179. *
  180. * This function consumes the skb unless there is an error.
  181. */
  182. static
  183. int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
  184. gfp_t gfp_mask)
  185. {
  186. int result;
  187. void *pl;
  188. size_t pl_size;
  189. void *pl_itr, *buf_itr;
  190. size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
  191. struct untd_hdr_1st *untd_hdr_1st;
  192. struct wlp_tx_hdr *wlp_tx_hdr;
  193. struct untd_hdr_rst *untd_hdr_rst;
  194. wtx->skb = NULL;
  195. pl = skb->data;
  196. pl_itr = pl;
  197. pl_size = skb->len;
  198. pl_size_left = pl_size; /* payload size */
  199. /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
  200. * the headers */
  201. pl_size_1st = i1480u_MAX_FRG_SIZE
  202. - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
  203. BUG_ON(pl_size_1st > pl_size);
  204. pl_size_left -= pl_size_1st;
  205. /* The rest have an smaller header (no i1480 TX header). We
  206. * need to break up the payload in blocks smaller than
  207. * i1480u_MAX_PL_SIZE (payload excluding header). */
  208. frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
  209. /* Allocate space for the new buffer. In this new buffer we'll
  210. * place the headers followed by the data fragment, headers,
  211. * data fragments, etc..
  212. */
  213. result = -ENOMEM;
  214. wtx->buf_size = sizeof(*untd_hdr_1st)
  215. + sizeof(*wlp_tx_hdr)
  216. + frgs * sizeof(*untd_hdr_rst)
  217. + pl_size;
  218. wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
  219. if (wtx->buf == NULL)
  220. goto error_buf_alloc;
  221. buf_itr = wtx->buf; /* We got the space, let's fill it up */
  222. /* Fill 1st fragment */
  223. untd_hdr_1st = buf_itr;
  224. buf_itr += sizeof(*untd_hdr_1st);
  225. untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
  226. untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
  227. untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
  228. untd_hdr_1st->fragment_len =
  229. cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
  230. memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
  231. /* Set up i1480 header info */
  232. wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
  233. buf_itr += sizeof(*wlp_tx_hdr);
  234. /* Copy the first fragment */
  235. memcpy(buf_itr, pl_itr, pl_size_1st);
  236. pl_itr += pl_size_1st;
  237. buf_itr += pl_size_1st;
  238. /* Now do each remaining fragment */
  239. result = -EINVAL;
  240. while (pl_size_left > 0) {
  241. d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
  242. pl_size_left, buf_itr - wtx->buf);
  243. if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
  244. > wtx->buf_size) {
  245. printk(KERN_ERR "BUG: no space for header\n");
  246. goto error_bug;
  247. }
  248. d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
  249. pl_size_left, buf_itr - wtx->buf);
  250. untd_hdr_rst = buf_itr;
  251. buf_itr += sizeof(*untd_hdr_rst);
  252. if (pl_size_left > i1480u_MAX_PL_SIZE) {
  253. frg_pl_size = i1480u_MAX_PL_SIZE;
  254. untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
  255. } else {
  256. frg_pl_size = pl_size_left;
  257. untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
  258. }
  259. d_printf(5, NULL,
  260. "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
  261. pl_size_left, buf_itr - wtx->buf, frg_pl_size);
  262. untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
  263. untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
  264. untd_hdr_rst->padding = 0;
  265. if (buf_itr + frg_pl_size - wtx->buf
  266. > wtx->buf_size) {
  267. printk(KERN_ERR "BUG: no space for payload\n");
  268. goto error_bug;
  269. }
  270. memcpy(buf_itr, pl_itr, frg_pl_size);
  271. buf_itr += frg_pl_size;
  272. pl_itr += frg_pl_size;
  273. pl_size_left -= frg_pl_size;
  274. d_printf(5, NULL,
  275. "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
  276. pl_size_left, buf_itr - wtx->buf, frg_pl_size);
  277. }
  278. dev_kfree_skb_irq(skb);
  279. return 0;
  280. error_bug:
  281. printk(KERN_ERR
  282. "BUG: skb %u bytes\n"
  283. "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
  284. "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
  285. skb->len,
  286. frg_pl_size, i1480u_MAX_FRG_SIZE,
  287. buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
  288. kfree(wtx->buf);
  289. error_buf_alloc:
  290. return result;
  291. }
  292. /**
  293. * Given a buffer that fits in a single fragment, fill out a @wtx
  294. * struct for transmitting it down the USB pipe.
  295. *
  296. * Uses the fact that we have space reserved in front of the skbuff
  297. * for hardware headers :]
  298. *
  299. * This does not fill the i1480 TX header, it is left up to the
  300. * caller to do that; you can get it from @wtx->wlp_tx_hdr.
  301. *
  302. * @pl: pointer to payload data
  303. * @pl_size: size of the payuload
  304. *
  305. * This function does not consume the @skb.
  306. */
  307. static
  308. int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
  309. gfp_t gfp_mask)
  310. {
  311. struct untd_hdr_cmp *untd_hdr_cmp;
  312. struct wlp_tx_hdr *wlp_tx_hdr;
  313. wtx->buf = NULL;
  314. wtx->skb = skb;
  315. BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
  316. wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
  317. wtx->wlp_tx_hdr = wlp_tx_hdr;
  318. BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
  319. untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
  320. untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
  321. untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
  322. untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
  323. untd_hdr_cmp->padding = 0;
  324. return 0;
  325. }
  326. /**
  327. * Given a skb to transmit, massage it to become palatable for the TX pipe
  328. *
  329. * This will break the buffer in chunks smaller than
  330. * i1480u_MAX_FRG_SIZE and add proper headers to each.
  331. *
  332. * 1st header \
  333. * i1480 tx header | fragment 1
  334. * fragment data /
  335. * nxt header \ fragment 2
  336. * fragment data /
  337. * ..
  338. * ..
  339. * last header \ fragment 3
  340. * last fragment data /
  341. *
  342. * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
  343. *
  344. * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
  345. * following is composed:
  346. *
  347. * complete header \
  348. * i1480 tx header | single fragment
  349. * packet data /
  350. *
  351. * We were going to use s/g support, but because the interface is
  352. * synch and at the end there is plenty of overhead to do it, it
  353. * didn't seem that worth for data that is going to be smaller than
  354. * one page.
  355. */
  356. static
  357. struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
  358. struct sk_buff *skb, gfp_t gfp_mask)
  359. {
  360. int result;
  361. struct usb_endpoint_descriptor *epd;
  362. int usb_pipe;
  363. unsigned long flags;
  364. struct i1480u_tx *wtx;
  365. const size_t pl_max_size =
  366. i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
  367. - sizeof(struct wlp_tx_hdr);
  368. wtx = kmalloc(sizeof(*wtx), gfp_mask);
  369. if (wtx == NULL)
  370. goto error_wtx_alloc;
  371. wtx->urb = usb_alloc_urb(0, gfp_mask);
  372. if (wtx->urb == NULL)
  373. goto error_urb_alloc;
  374. epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
  375. usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
  376. /* Fits in a single complete packet or need to split? */
  377. if (skb->len > pl_max_size) {
  378. result = i1480u_tx_create_n(wtx, skb, gfp_mask);
  379. if (result < 0)
  380. goto error_create;
  381. usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
  382. wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
  383. } else {
  384. result = i1480u_tx_create_1(wtx, skb, gfp_mask);
  385. if (result < 0)
  386. goto error_create;
  387. usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
  388. skb->data, skb->len, i1480u_tx_cb, wtx);
  389. }
  390. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  391. list_add(&wtx->list_node, &i1480u->tx_list);
  392. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  393. return wtx;
  394. error_create:
  395. kfree(wtx->urb);
  396. error_urb_alloc:
  397. kfree(wtx);
  398. error_wtx_alloc:
  399. return NULL;
  400. }
  401. /**
  402. * Actual fragmentation and transmission of frame
  403. *
  404. * @wlp: WLP substack data structure
  405. * @skb: To be transmitted
  406. * @dst: Device address of destination
  407. * @returns: 0 on success, <0 on failure
  408. *
  409. * This function can also be called directly (not just from
  410. * hard_start_xmit), so we also check here if the interface is up before
  411. * taking sending anything.
  412. */
  413. int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
  414. struct uwb_dev_addr *dst)
  415. {
  416. int result = -ENXIO;
  417. struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
  418. struct device *dev = &i1480u->usb_iface->dev;
  419. struct net_device *net_dev = i1480u->net_dev;
  420. struct i1480u_tx *wtx;
  421. struct wlp_tx_hdr *wlp_tx_hdr;
  422. static unsigned char dev_bcast[2] = { 0xff, 0xff };
  423. #if 0
  424. int lockup = 50;
  425. #endif
  426. d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
  427. net_dev);
  428. BUG_ON(i1480u->wlp.rc == NULL);
  429. if ((net_dev->flags & IFF_UP) == 0)
  430. goto out;
  431. result = -EBUSY;
  432. if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
  433. if (d_test(2) && printk_ratelimit())
  434. d_printf(2, dev, "Max frames in flight "
  435. "stopping queue.\n");
  436. netif_stop_queue(net_dev);
  437. goto error_max_inflight;
  438. }
  439. result = -ENOMEM;
  440. wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
  441. if (unlikely(wtx == NULL)) {
  442. if (printk_ratelimit())
  443. dev_err(dev, "TX: no memory for WLP TX URB,"
  444. "dropping packet (in flight %d)\n",
  445. atomic_read(&i1480u->tx_inflight.count));
  446. netif_stop_queue(net_dev);
  447. goto error_wtx_alloc;
  448. }
  449. wtx->i1480u = i1480u;
  450. /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
  451. * locking. We do so because they are kind of orthogonal to
  452. * each other (and thus not changed in an atomic batch).
  453. * The ETH header is right after the WLP TX header. */
  454. wlp_tx_hdr = wtx->wlp_tx_hdr;
  455. *wlp_tx_hdr = i1480u->options.def_tx_hdr;
  456. wlp_tx_hdr->dstaddr = *dst;
  457. if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
  458. && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
  459. /*Broadcast message directed to DRP host. Send as best effort
  460. * on PCA. */
  461. wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
  462. }
  463. #if 0
  464. dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
  465. dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
  466. #endif
  467. #if 0
  468. /* simulates a device lockup after every lockup# packets */
  469. if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
  470. /* Simulate a dropped transmit interrupt */
  471. net_dev->trans_start = jiffies;
  472. netif_stop_queue(net_dev);
  473. dev_err(dev, "Simulate lockup at %ld\n", jiffies);
  474. return result;
  475. }
  476. #endif
  477. result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
  478. if (result < 0) {
  479. dev_err(dev, "TX: cannot submit URB: %d\n", result);
  480. /* We leave the freeing of skb to calling function */
  481. wtx->skb = NULL;
  482. goto error_tx_urb_submit;
  483. }
  484. atomic_inc(&i1480u->tx_inflight.count);
  485. net_dev->trans_start = jiffies;
  486. d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
  487. net_dev, result);
  488. return result;
  489. error_tx_urb_submit:
  490. i1480u_tx_destroy(i1480u, wtx);
  491. error_wtx_alloc:
  492. error_max_inflight:
  493. out:
  494. d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
  495. net_dev, result);
  496. return result;
  497. }
  498. /**
  499. * Transmit an skb Called when an skbuf has to be transmitted
  500. *
  501. * The skb is first passed to WLP substack to ensure this is a valid
  502. * frame. If valid the device address of destination will be filled and
  503. * the WLP header prepended to the skb. If this step fails we fake sending
  504. * the frame, if we return an error the network stack will just keep trying.
  505. *
  506. * Broadcast frames inside a WSS needs to be treated special as multicast is
  507. * not supported. A broadcast frame is sent as unicast to each member of the
  508. * WSS - this is done by the WLP substack when it finds a broadcast frame.
  509. * So, we test if the WLP substack took over the skb and only transmit it
  510. * if it has not (been taken over).
  511. *
  512. * @net_dev->xmit_lock is held
  513. */
  514. int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
  515. {
  516. int result;
  517. struct i1480u *i1480u = netdev_priv(net_dev);
  518. struct device *dev = &i1480u->usb_iface->dev;
  519. struct uwb_dev_addr dst;
  520. d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
  521. net_dev);
  522. BUG_ON(i1480u->wlp.rc == NULL);
  523. if ((net_dev->flags & IFF_UP) == 0)
  524. goto error;
  525. result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
  526. if (result < 0) {
  527. dev_err(dev, "WLP verification of TX frame failed (%d). "
  528. "Dropping packet.\n", result);
  529. goto error;
  530. } else if (result == 1) {
  531. d_printf(6, dev, "WLP will transmit frame. \n");
  532. /* trans_start time will be set when WLP actually transmits
  533. * the frame */
  534. goto out;
  535. }
  536. d_printf(6, dev, "Transmitting frame. \n");
  537. result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
  538. if (result < 0) {
  539. dev_err(dev, "Frame TX failed (%d).\n", result);
  540. goto error;
  541. }
  542. d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
  543. net_dev, result);
  544. return NETDEV_TX_OK;
  545. error:
  546. dev_kfree_skb_any(skb);
  547. i1480u->stats.tx_dropped++;
  548. out:
  549. d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
  550. net_dev, result);
  551. return NETDEV_TX_OK;
  552. }
  553. /**
  554. * Called when a pkt transmission doesn't complete in a reasonable period
  555. * Device reset may sleep - do it outside of interrupt context (delayed)
  556. */
  557. void i1480u_tx_timeout(struct net_device *net_dev)
  558. {
  559. struct i1480u *i1480u = netdev_priv(net_dev);
  560. wlp_reset_all(&i1480u->wlp);
  561. }
  562. void i1480u_tx_release(struct i1480u *i1480u)
  563. {
  564. unsigned long flags;
  565. struct i1480u_tx *wtx, *next;
  566. int count = 0, empty;
  567. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  568. list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
  569. count++;
  570. usb_unlink_urb(wtx->urb);
  571. }
  572. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  573. count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
  574. /*
  575. * We don't like this sollution too much (dirty as it is), but
  576. * it is cheaper than putting a refcount on each i1480u_tx and
  577. * i1480uting for all of them to go away...
  578. *
  579. * Called when no more packets can be added to tx_list
  580. * so can i1480ut for it to be empty.
  581. */
  582. while (1) {
  583. spin_lock_irqsave(&i1480u->tx_list_lock, flags);
  584. empty = list_empty(&i1480u->tx_list);
  585. spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
  586. if (empty)
  587. break;
  588. count--;
  589. BUG_ON(count == 0);
  590. msleep(20);
  591. }
  592. }