rx.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474
  1. /*
  2. * WUSB Wire Adapter: WLP interface
  3. * Driver for the Linux Network stack.
  4. *
  5. * Copyright (C) 2005-2006 Intel Corporation
  6. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. *
  22. *
  23. * i1480u's RX handling is simple. i1480u will send the received
  24. * network packets broken up in fragments; 1 to N fragments make a
  25. * packet, we assemble them together and deliver the packet with netif_rx().
  26. *
  27. * Beacuse each USB transfer is a *single* fragment (except when the
  28. * transfer contains a first fragment), each URB called thus
  29. * back contains one or two fragments. So we queue N URBs, each with its own
  30. * fragment buffer. When a URB is done, we process it (adding to the
  31. * current skb from the fragment buffer until complete). Once
  32. * processed, we requeue the URB. There is always a bunch of URBs
  33. * ready to take data, so the intergap should be minimal.
  34. *
  35. * An URB's transfer buffer is the data field of a socket buffer. This
  36. * reduces copying as data can be passed directly to network layer. If a
  37. * complete packet or 1st fragment is received the URB's transfer buffer is
  38. * taken away from it and used to send data to the network layer. In this
  39. * case a new transfer buffer is allocated to the URB before being requeued.
  40. * If a "NEXT" or "LAST" fragment is received, the fragment contents is
  41. * appended to the RX packet under construction and the transfer buffer
  42. * is reused. To be able to use this buffer to assemble complete packets
  43. * we set each buffer's size to that of the MAX ethernet packet that can
  44. * be received. There is thus room for improvement in memory usage.
  45. *
  46. * When the max tx fragment size increases, we should be able to read
  47. * data into the skbs directly with very simple code.
  48. *
  49. * ROADMAP:
  50. *
  51. * ENTRY POINTS:
  52. *
  53. * i1480u_rx_setup(): setup RX context [from i1480u_open()]
  54. *
  55. * i1480u_rx_release(): release RX context [from i1480u_stop()]
  56. *
  57. * i1480u_rx_cb(): called when the RX USB URB receives a
  58. * packet. It removes the header and pushes it up
  59. * the Linux netdev stack with netif_rx().
  60. *
  61. * i1480u_rx_buffer()
  62. * i1480u_drop() and i1480u_fix()
  63. * i1480u_skb_deliver
  64. *
  65. */
  66. #include <linux/gfp.h>
  67. #include <linux/netdevice.h>
  68. #include <linux/etherdevice.h>
  69. #include "i1480u-wlp.h"
  70. /*
  71. * Setup the RX context
  72. *
  73. * Each URB is provided with a transfer_buffer that is the data field
  74. * of a new socket buffer.
  75. */
  76. int i1480u_rx_setup(struct i1480u *i1480u)
  77. {
  78. int result, cnt;
  79. struct device *dev = &i1480u->usb_iface->dev;
  80. struct net_device *net_dev = i1480u->net_dev;
  81. struct usb_endpoint_descriptor *epd;
  82. struct sk_buff *skb;
  83. /* Alloc RX stuff */
  84. i1480u->rx_skb = NULL; /* not in process of receiving packet */
  85. result = -ENOMEM;
  86. epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
  87. for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
  88. struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
  89. rx_buf->i1480u = i1480u;
  90. skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
  91. if (!skb) {
  92. dev_err(dev,
  93. "RX: cannot allocate RX buffer %d\n", cnt);
  94. result = -ENOMEM;
  95. goto error;
  96. }
  97. skb->dev = net_dev;
  98. skb->ip_summed = CHECKSUM_NONE;
  99. skb_reserve(skb, 2);
  100. rx_buf->data = skb;
  101. rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
  102. if (unlikely(rx_buf->urb == NULL)) {
  103. dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
  104. result = -ENOMEM;
  105. goto error;
  106. }
  107. usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
  108. usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
  109. rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
  110. i1480u_rx_cb, rx_buf);
  111. result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
  112. if (unlikely(result < 0)) {
  113. dev_err(dev, "RX: cannot submit URB %d: %d\n",
  114. cnt, result);
  115. goto error;
  116. }
  117. }
  118. return 0;
  119. error:
  120. i1480u_rx_release(i1480u);
  121. return result;
  122. }
  123. /* Release resources associated to the rx context */
  124. void i1480u_rx_release(struct i1480u *i1480u)
  125. {
  126. int cnt;
  127. for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
  128. if (i1480u->rx_buf[cnt].data)
  129. dev_kfree_skb(i1480u->rx_buf[cnt].data);
  130. if (i1480u->rx_buf[cnt].urb) {
  131. usb_kill_urb(i1480u->rx_buf[cnt].urb);
  132. usb_free_urb(i1480u->rx_buf[cnt].urb);
  133. }
  134. }
  135. if (i1480u->rx_skb != NULL)
  136. dev_kfree_skb(i1480u->rx_skb);
  137. }
  138. static
  139. void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
  140. {
  141. int cnt;
  142. for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
  143. if (i1480u->rx_buf[cnt].urb)
  144. usb_unlink_urb(i1480u->rx_buf[cnt].urb);
  145. }
  146. }
  147. /* Fix an out-of-sequence packet */
  148. #define i1480u_fix(i1480u, msg...) \
  149. do { \
  150. if (printk_ratelimit()) \
  151. dev_err(&i1480u->usb_iface->dev, msg); \
  152. dev_kfree_skb_irq(i1480u->rx_skb); \
  153. i1480u->rx_skb = NULL; \
  154. i1480u->rx_untd_pkt_size = 0; \
  155. } while (0)
  156. /* Drop an out-of-sequence packet */
  157. #define i1480u_drop(i1480u, msg...) \
  158. do { \
  159. if (printk_ratelimit()) \
  160. dev_err(&i1480u->usb_iface->dev, msg); \
  161. i1480u->net_dev->stats.rx_dropped++; \
  162. } while (0)
  163. /* Finalizes setting up the SKB and delivers it
  164. *
  165. * We first pass the incoming frame to WLP substack for verification. It
  166. * may also be a WLP association frame in which case WLP will take over the
  167. * processing. If WLP does not take it over it will still verify it, if the
  168. * frame is invalid the skb will be freed by WLP and we will not continue
  169. * parsing.
  170. * */
  171. static
  172. void i1480u_skb_deliver(struct i1480u *i1480u)
  173. {
  174. int should_parse;
  175. struct net_device *net_dev = i1480u->net_dev;
  176. struct device *dev = &i1480u->usb_iface->dev;
  177. should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
  178. &i1480u->rx_srcaddr);
  179. if (!should_parse)
  180. goto out;
  181. i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
  182. net_dev->stats.rx_packets++;
  183. net_dev->stats.rx_bytes += i1480u->rx_untd_pkt_size;
  184. netif_rx(i1480u->rx_skb); /* deliver */
  185. out:
  186. i1480u->rx_skb = NULL;
  187. i1480u->rx_untd_pkt_size = 0;
  188. }
  189. /*
  190. * Process a buffer of data received from the USB RX endpoint
  191. *
  192. * First fragment arrives with next or last fragment. All other fragments
  193. * arrive alone.
  194. *
  195. * /me hates long functions.
  196. */
  197. static
  198. void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
  199. {
  200. unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
  201. size_t untd_hdr_size, untd_frg_size;
  202. size_t i1480u_hdr_size;
  203. struct wlp_rx_hdr *i1480u_hdr = NULL;
  204. struct i1480u *i1480u = rx_buf->i1480u;
  205. struct sk_buff *skb = rx_buf->data;
  206. int size_left = rx_buf->urb->actual_length;
  207. void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
  208. struct untd_hdr *untd_hdr;
  209. struct net_device *net_dev = i1480u->net_dev;
  210. struct device *dev = &i1480u->usb_iface->dev;
  211. struct sk_buff *new_skb;
  212. #if 0
  213. dev_fnstart(dev,
  214. "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
  215. dev_err(dev, "RX packet, %zu bytes\n", size_left);
  216. dump_bytes(dev, ptr, size_left);
  217. #endif
  218. i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
  219. while (size_left > 0) {
  220. if (pkt_completed) {
  221. i1480u_drop(i1480u, "RX: fragment follows completed"
  222. "packet in same buffer. Dropping\n");
  223. break;
  224. }
  225. untd_hdr = ptr;
  226. if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
  227. i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
  228. goto out;
  229. }
  230. if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
  231. i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
  232. goto out;
  233. }
  234. switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
  235. case i1480u_PKT_FRAG_1ST: {
  236. struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
  237. dev_dbg(dev, "1st fragment\n");
  238. untd_hdr_size = sizeof(struct untd_hdr_1st);
  239. if (i1480u->rx_skb != NULL)
  240. i1480u_fix(i1480u, "RX: 1st fragment out of "
  241. "sequence! Fixing\n");
  242. if (size_left < untd_hdr_size + i1480u_hdr_size) {
  243. i1480u_drop(i1480u, "RX: short 1st fragment! "
  244. "Dropping\n");
  245. goto out;
  246. }
  247. i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
  248. - i1480u_hdr_size;
  249. untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
  250. if (size_left < untd_hdr_size + untd_frg_size) {
  251. i1480u_drop(i1480u,
  252. "RX: short payload! Dropping\n");
  253. goto out;
  254. }
  255. i1480u->rx_skb = skb;
  256. i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
  257. i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
  258. skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
  259. skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
  260. stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
  261. stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
  262. rx_buf->data = NULL; /* need to create new buffer */
  263. break;
  264. }
  265. case i1480u_PKT_FRAG_NXT: {
  266. dev_dbg(dev, "nxt fragment\n");
  267. untd_hdr_size = sizeof(struct untd_hdr_rst);
  268. if (i1480u->rx_skb == NULL) {
  269. i1480u_drop(i1480u, "RX: next fragment out of "
  270. "sequence! Dropping\n");
  271. goto out;
  272. }
  273. if (size_left < untd_hdr_size) {
  274. i1480u_drop(i1480u, "RX: short NXT fragment! "
  275. "Dropping\n");
  276. goto out;
  277. }
  278. untd_frg_size = le16_to_cpu(untd_hdr->len);
  279. if (size_left < untd_hdr_size + untd_frg_size) {
  280. i1480u_drop(i1480u,
  281. "RX: short payload! Dropping\n");
  282. goto out;
  283. }
  284. memmove(skb_put(i1480u->rx_skb, untd_frg_size),
  285. ptr + untd_hdr_size, untd_frg_size);
  286. break;
  287. }
  288. case i1480u_PKT_FRAG_LST: {
  289. dev_dbg(dev, "Lst fragment\n");
  290. untd_hdr_size = sizeof(struct untd_hdr_rst);
  291. if (i1480u->rx_skb == NULL) {
  292. i1480u_drop(i1480u, "RX: last fragment out of "
  293. "sequence! Dropping\n");
  294. goto out;
  295. }
  296. if (size_left < untd_hdr_size) {
  297. i1480u_drop(i1480u, "RX: short LST fragment! "
  298. "Dropping\n");
  299. goto out;
  300. }
  301. untd_frg_size = le16_to_cpu(untd_hdr->len);
  302. if (size_left < untd_frg_size + untd_hdr_size) {
  303. i1480u_drop(i1480u,
  304. "RX: short payload! Dropping\n");
  305. goto out;
  306. }
  307. memmove(skb_put(i1480u->rx_skb, untd_frg_size),
  308. ptr + untd_hdr_size, untd_frg_size);
  309. pkt_completed = 1;
  310. break;
  311. }
  312. case i1480u_PKT_FRAG_CMP: {
  313. dev_dbg(dev, "cmp fragment\n");
  314. untd_hdr_size = sizeof(struct untd_hdr_cmp);
  315. if (i1480u->rx_skb != NULL)
  316. i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
  317. " fragment!\n");
  318. if (size_left < untd_hdr_size + i1480u_hdr_size) {
  319. i1480u_drop(i1480u, "RX: short CMP fragment! "
  320. "Dropping\n");
  321. goto out;
  322. }
  323. i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
  324. untd_frg_size = i1480u->rx_untd_pkt_size;
  325. if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
  326. i1480u_drop(i1480u,
  327. "RX: short payload! Dropping\n");
  328. goto out;
  329. }
  330. i1480u->rx_skb = skb;
  331. i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
  332. i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
  333. stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
  334. stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
  335. skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
  336. skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
  337. rx_buf->data = NULL; /* for hand off skb to network stack */
  338. pkt_completed = 1;
  339. i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
  340. break;
  341. }
  342. default:
  343. i1480u_drop(i1480u, "RX: unknown packet type %u! "
  344. "Dropping\n", untd_hdr_type(untd_hdr));
  345. goto out;
  346. }
  347. size_left -= untd_hdr_size + untd_frg_size;
  348. if (size_left > 0)
  349. ptr += untd_hdr_size + untd_frg_size;
  350. }
  351. if (pkt_completed)
  352. i1480u_skb_deliver(i1480u);
  353. out:
  354. /* recreate needed RX buffers*/
  355. if (rx_buf->data == NULL) {
  356. /* buffer is being used to receive packet, create new */
  357. new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
  358. if (!new_skb) {
  359. if (printk_ratelimit())
  360. dev_err(dev,
  361. "RX: cannot allocate RX buffer\n");
  362. } else {
  363. new_skb->dev = net_dev;
  364. new_skb->ip_summed = CHECKSUM_NONE;
  365. skb_reserve(new_skb, 2);
  366. rx_buf->data = new_skb;
  367. }
  368. }
  369. return;
  370. }
  371. /*
  372. * Called when an RX URB has finished receiving or has found some kind
  373. * of error condition.
  374. *
  375. * LIMITATIONS:
  376. *
  377. * - We read USB-transfers, each transfer contains a SINGLE fragment
  378. * (can contain a complete packet, or a 1st, next, or last fragment
  379. * of a packet).
  380. * Looks like a transfer can contain more than one fragment (07/18/06)
  381. *
  382. * - Each transfer buffer is the size of the maximum packet size (minus
  383. * headroom), i1480u_MAX_PKT_SIZE - 2
  384. *
  385. * - We always read the full USB-transfer, no partials.
  386. *
  387. * - Each transfer is read directly into a skb. This skb will be used to
  388. * send data to the upper layers if it is the first fragment or a complete
  389. * packet. In the other cases the data will be copied from the skb to
  390. * another skb that is being prepared for the upper layers from a prev
  391. * first fragment.
  392. *
  393. * It is simply too much of a pain. Gosh, there should be a unified
  394. * SG infrastructure for *everything* [so that I could declare a SG
  395. * buffer, pass it to USB for receiving, append some space to it if
  396. * I wish, receive more until I have the whole chunk, adapt
  397. * pointers on each fragment to remove hardware headers and then
  398. * attach that to an skbuff and netif_rx()].
  399. */
  400. void i1480u_rx_cb(struct urb *urb)
  401. {
  402. int result;
  403. int do_parse_buffer = 1;
  404. struct i1480u_rx_buf *rx_buf = urb->context;
  405. struct i1480u *i1480u = rx_buf->i1480u;
  406. struct device *dev = &i1480u->usb_iface->dev;
  407. unsigned long flags;
  408. u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
  409. switch (urb->status) {
  410. case 0:
  411. break;
  412. case -ECONNRESET: /* Not an error, but a controlled situation; */
  413. case -ENOENT: /* (we killed the URB)...so, no broadcast */
  414. case -ESHUTDOWN: /* going away! */
  415. dev_err(dev, "RX URB[%u]: goind down %d\n",
  416. rx_buf_idx, urb->status);
  417. goto error;
  418. default:
  419. dev_err(dev, "RX URB[%u]: unknown status %d\n",
  420. rx_buf_idx, urb->status);
  421. if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
  422. EDC_ERROR_TIMEFRAME)) {
  423. dev_err(dev, "RX: max acceptable errors exceeded,"
  424. " resetting device.\n");
  425. i1480u_rx_unlink_urbs(i1480u);
  426. wlp_reset_all(&i1480u->wlp);
  427. goto error;
  428. }
  429. do_parse_buffer = 0;
  430. break;
  431. }
  432. spin_lock_irqsave(&i1480u->lock, flags);
  433. /* chew the data fragments, extract network packets */
  434. if (do_parse_buffer) {
  435. i1480u_rx_buffer(rx_buf);
  436. if (rx_buf->data) {
  437. rx_buf->urb->transfer_buffer = rx_buf->data->data;
  438. result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
  439. if (result < 0) {
  440. dev_err(dev, "RX URB[%u]: cannot submit %d\n",
  441. rx_buf_idx, result);
  442. }
  443. }
  444. }
  445. spin_unlock_irqrestore(&i1480u->lock, flags);
  446. error:
  447. return;
  448. }