rx.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475
  1. /*
  2. * WUSB Wire Adapter: WLP interface
  3. * Driver for the Linux Network stack.
  4. *
  5. * Copyright (C) 2005-2006 Intel Corporation
  6. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version
  10. * 2 as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  20. * 02110-1301, USA.
  21. *
  22. *
  23. * i1480u's RX handling is simple. i1480u will send the received
  24. * network packets broken up in fragments; 1 to N fragments make a
  25. * packet, we assemble them together and deliver the packet with netif_rx().
  26. *
  27. * Beacuse each USB transfer is a *single* fragment (except when the
  28. * transfer contains a first fragment), each URB called thus
  29. * back contains one or two fragments. So we queue N URBs, each with its own
  30. * fragment buffer. When a URB is done, we process it (adding to the
  31. * current skb from the fragment buffer until complete). Once
  32. * processed, we requeue the URB. There is always a bunch of URBs
  33. * ready to take data, so the intergap should be minimal.
  34. *
  35. * An URB's transfer buffer is the data field of a socket buffer. This
  36. * reduces copying as data can be passed directly to network layer. If a
  37. * complete packet or 1st fragment is received the URB's transfer buffer is
  38. * taken away from it and used to send data to the network layer. In this
  39. * case a new transfer buffer is allocated to the URB before being requeued.
  40. * If a "NEXT" or "LAST" fragment is received, the fragment contents is
  41. * appended to the RX packet under construction and the transfer buffer
  42. * is reused. To be able to use this buffer to assemble complete packets
  43. * we set each buffer's size to that of the MAX ethernet packet that can
  44. * be received. There is thus room for improvement in memory usage.
  45. *
  46. * When the max tx fragment size increases, we should be able to read
  47. * data into the skbs directly with very simple code.
  48. *
  49. * ROADMAP:
  50. *
  51. * ENTRY POINTS:
  52. *
  53. * i1480u_rx_setup(): setup RX context [from i1480u_open()]
  54. *
  55. * i1480u_rx_release(): release RX context [from i1480u_stop()]
  56. *
  57. * i1480u_rx_cb(): called when the RX USB URB receives a
  58. * packet. It removes the header and pushes it up
  59. * the Linux netdev stack with netif_rx().
  60. *
  61. * i1480u_rx_buffer()
  62. * i1480u_drop() and i1480u_fix()
  63. * i1480u_skb_deliver
  64. *
  65. */
  66. #include <linux/netdevice.h>
  67. #include <linux/etherdevice.h>
  68. #include "i1480u-wlp.h"
  69. /*
  70. * Setup the RX context
  71. *
  72. * Each URB is provided with a transfer_buffer that is the data field
  73. * of a new socket buffer.
  74. */
  75. int i1480u_rx_setup(struct i1480u *i1480u)
  76. {
  77. int result, cnt;
  78. struct device *dev = &i1480u->usb_iface->dev;
  79. struct net_device *net_dev = i1480u->net_dev;
  80. struct usb_endpoint_descriptor *epd;
  81. struct sk_buff *skb;
  82. /* Alloc RX stuff */
  83. i1480u->rx_skb = NULL; /* not in process of receiving packet */
  84. result = -ENOMEM;
  85. epd = &i1480u->usb_iface->cur_altsetting->endpoint[1].desc;
  86. for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
  87. struct i1480u_rx_buf *rx_buf = &i1480u->rx_buf[cnt];
  88. rx_buf->i1480u = i1480u;
  89. skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
  90. if (!skb) {
  91. dev_err(dev,
  92. "RX: cannot allocate RX buffer %d\n", cnt);
  93. result = -ENOMEM;
  94. goto error;
  95. }
  96. skb->dev = net_dev;
  97. skb->ip_summed = CHECKSUM_NONE;
  98. skb_reserve(skb, 2);
  99. rx_buf->data = skb;
  100. rx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
  101. if (unlikely(rx_buf->urb == NULL)) {
  102. dev_err(dev, "RX: cannot allocate URB %d\n", cnt);
  103. result = -ENOMEM;
  104. goto error;
  105. }
  106. usb_fill_bulk_urb(rx_buf->urb, i1480u->usb_dev,
  107. usb_rcvbulkpipe(i1480u->usb_dev, epd->bEndpointAddress),
  108. rx_buf->data->data, i1480u_MAX_RX_PKT_SIZE - 2,
  109. i1480u_rx_cb, rx_buf);
  110. result = usb_submit_urb(rx_buf->urb, GFP_NOIO);
  111. if (unlikely(result < 0)) {
  112. dev_err(dev, "RX: cannot submit URB %d: %d\n",
  113. cnt, result);
  114. goto error;
  115. }
  116. }
  117. return 0;
  118. error:
  119. i1480u_rx_release(i1480u);
  120. return result;
  121. }
  122. /* Release resources associated to the rx context */
  123. void i1480u_rx_release(struct i1480u *i1480u)
  124. {
  125. int cnt;
  126. for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
  127. if (i1480u->rx_buf[cnt].data)
  128. dev_kfree_skb(i1480u->rx_buf[cnt].data);
  129. if (i1480u->rx_buf[cnt].urb) {
  130. usb_kill_urb(i1480u->rx_buf[cnt].urb);
  131. usb_free_urb(i1480u->rx_buf[cnt].urb);
  132. }
  133. }
  134. if (i1480u->rx_skb != NULL)
  135. dev_kfree_skb(i1480u->rx_skb);
  136. }
  137. static
  138. void i1480u_rx_unlink_urbs(struct i1480u *i1480u)
  139. {
  140. int cnt;
  141. for (cnt = 0; cnt < i1480u_RX_BUFS; cnt++) {
  142. if (i1480u->rx_buf[cnt].urb)
  143. usb_unlink_urb(i1480u->rx_buf[cnt].urb);
  144. }
  145. }
  146. /* Fix an out-of-sequence packet */
  147. #define i1480u_fix(i1480u, msg...) \
  148. do { \
  149. if (printk_ratelimit()) \
  150. dev_err(&i1480u->usb_iface->dev, msg); \
  151. dev_kfree_skb_irq(i1480u->rx_skb); \
  152. i1480u->rx_skb = NULL; \
  153. i1480u->rx_untd_pkt_size = 0; \
  154. } while (0)
  155. /* Drop an out-of-sequence packet */
  156. #define i1480u_drop(i1480u, msg...) \
  157. do { \
  158. if (printk_ratelimit()) \
  159. dev_err(&i1480u->usb_iface->dev, msg); \
  160. i1480u->stats.rx_dropped++; \
  161. } while (0)
  162. /* Finalizes setting up the SKB and delivers it
  163. *
  164. * We first pass the incoming frame to WLP substack for verification. It
  165. * may also be a WLP association frame in which case WLP will take over the
  166. * processing. If WLP does not take it over it will still verify it, if the
  167. * frame is invalid the skb will be freed by WLP and we will not continue
  168. * parsing.
  169. * */
  170. static
  171. void i1480u_skb_deliver(struct i1480u *i1480u)
  172. {
  173. int should_parse;
  174. struct net_device *net_dev = i1480u->net_dev;
  175. struct device *dev = &i1480u->usb_iface->dev;
  176. should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb,
  177. &i1480u->rx_srcaddr);
  178. if (!should_parse)
  179. goto out;
  180. i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev);
  181. i1480u->stats.rx_packets++;
  182. i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size;
  183. net_dev->last_rx = jiffies;
  184. /* FIXME: flow control: check netif_rx() retval */
  185. netif_rx(i1480u->rx_skb); /* deliver */
  186. out:
  187. i1480u->rx_skb = NULL;
  188. i1480u->rx_untd_pkt_size = 0;
  189. }
  190. /*
  191. * Process a buffer of data received from the USB RX endpoint
  192. *
  193. * First fragment arrives with next or last fragment. All other fragments
  194. * arrive alone.
  195. *
  196. * /me hates long functions.
  197. */
  198. static
  199. void i1480u_rx_buffer(struct i1480u_rx_buf *rx_buf)
  200. {
  201. unsigned pkt_completed = 0; /* !0 when we got all pkt fragments */
  202. size_t untd_hdr_size, untd_frg_size;
  203. size_t i1480u_hdr_size;
  204. struct wlp_rx_hdr *i1480u_hdr = NULL;
  205. struct i1480u *i1480u = rx_buf->i1480u;
  206. struct sk_buff *skb = rx_buf->data;
  207. int size_left = rx_buf->urb->actual_length;
  208. void *ptr = rx_buf->urb->transfer_buffer; /* also rx_buf->data->data */
  209. struct untd_hdr *untd_hdr;
  210. struct net_device *net_dev = i1480u->net_dev;
  211. struct device *dev = &i1480u->usb_iface->dev;
  212. struct sk_buff *new_skb;
  213. #if 0
  214. dev_fnstart(dev,
  215. "(i1480u %p ptr %p size_left %zu)\n", i1480u, ptr, size_left);
  216. dev_err(dev, "RX packet, %zu bytes\n", size_left);
  217. dump_bytes(dev, ptr, size_left);
  218. #endif
  219. i1480u_hdr_size = sizeof(struct wlp_rx_hdr);
  220. while (size_left > 0) {
  221. if (pkt_completed) {
  222. i1480u_drop(i1480u, "RX: fragment follows completed"
  223. "packet in same buffer. Dropping\n");
  224. break;
  225. }
  226. untd_hdr = ptr;
  227. if (size_left < sizeof(*untd_hdr)) { /* Check the UNTD header */
  228. i1480u_drop(i1480u, "RX: short UNTD header! Dropping\n");
  229. goto out;
  230. }
  231. if (unlikely(untd_hdr_rx_tx(untd_hdr) == 0)) { /* Paranoia: TX set? */
  232. i1480u_drop(i1480u, "RX: TX bit set! Dropping\n");
  233. goto out;
  234. }
  235. switch (untd_hdr_type(untd_hdr)) { /* Check the UNTD header type */
  236. case i1480u_PKT_FRAG_1ST: {
  237. struct untd_hdr_1st *untd_hdr_1st = (void *) untd_hdr;
  238. dev_dbg(dev, "1st fragment\n");
  239. untd_hdr_size = sizeof(struct untd_hdr_1st);
  240. if (i1480u->rx_skb != NULL)
  241. i1480u_fix(i1480u, "RX: 1st fragment out of "
  242. "sequence! Fixing\n");
  243. if (size_left < untd_hdr_size + i1480u_hdr_size) {
  244. i1480u_drop(i1480u, "RX: short 1st fragment! "
  245. "Dropping\n");
  246. goto out;
  247. }
  248. i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len)
  249. - i1480u_hdr_size;
  250. untd_frg_size = le16_to_cpu(untd_hdr_1st->fragment_len);
  251. if (size_left < untd_hdr_size + untd_frg_size) {
  252. i1480u_drop(i1480u,
  253. "RX: short payload! Dropping\n");
  254. goto out;
  255. }
  256. i1480u->rx_skb = skb;
  257. i1480u_hdr = (void *) untd_hdr_1st + untd_hdr_size;
  258. i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
  259. skb_put(i1480u->rx_skb, untd_hdr_size + untd_frg_size);
  260. skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
  261. stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
  262. stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
  263. rx_buf->data = NULL; /* need to create new buffer */
  264. break;
  265. }
  266. case i1480u_PKT_FRAG_NXT: {
  267. dev_dbg(dev, "nxt fragment\n");
  268. untd_hdr_size = sizeof(struct untd_hdr_rst);
  269. if (i1480u->rx_skb == NULL) {
  270. i1480u_drop(i1480u, "RX: next fragment out of "
  271. "sequence! Dropping\n");
  272. goto out;
  273. }
  274. if (size_left < untd_hdr_size) {
  275. i1480u_drop(i1480u, "RX: short NXT fragment! "
  276. "Dropping\n");
  277. goto out;
  278. }
  279. untd_frg_size = le16_to_cpu(untd_hdr->len);
  280. if (size_left < untd_hdr_size + untd_frg_size) {
  281. i1480u_drop(i1480u,
  282. "RX: short payload! Dropping\n");
  283. goto out;
  284. }
  285. memmove(skb_put(i1480u->rx_skb, untd_frg_size),
  286. ptr + untd_hdr_size, untd_frg_size);
  287. break;
  288. }
  289. case i1480u_PKT_FRAG_LST: {
  290. dev_dbg(dev, "Lst fragment\n");
  291. untd_hdr_size = sizeof(struct untd_hdr_rst);
  292. if (i1480u->rx_skb == NULL) {
  293. i1480u_drop(i1480u, "RX: last fragment out of "
  294. "sequence! Dropping\n");
  295. goto out;
  296. }
  297. if (size_left < untd_hdr_size) {
  298. i1480u_drop(i1480u, "RX: short LST fragment! "
  299. "Dropping\n");
  300. goto out;
  301. }
  302. untd_frg_size = le16_to_cpu(untd_hdr->len);
  303. if (size_left < untd_frg_size + untd_hdr_size) {
  304. i1480u_drop(i1480u,
  305. "RX: short payload! Dropping\n");
  306. goto out;
  307. }
  308. memmove(skb_put(i1480u->rx_skb, untd_frg_size),
  309. ptr + untd_hdr_size, untd_frg_size);
  310. pkt_completed = 1;
  311. break;
  312. }
  313. case i1480u_PKT_FRAG_CMP: {
  314. dev_dbg(dev, "cmp fragment\n");
  315. untd_hdr_size = sizeof(struct untd_hdr_cmp);
  316. if (i1480u->rx_skb != NULL)
  317. i1480u_fix(i1480u, "RX: fix out-of-sequence CMP"
  318. " fragment!\n");
  319. if (size_left < untd_hdr_size + i1480u_hdr_size) {
  320. i1480u_drop(i1480u, "RX: short CMP fragment! "
  321. "Dropping\n");
  322. goto out;
  323. }
  324. i1480u->rx_untd_pkt_size = le16_to_cpu(untd_hdr->len);
  325. untd_frg_size = i1480u->rx_untd_pkt_size;
  326. if (size_left < i1480u->rx_untd_pkt_size + untd_hdr_size) {
  327. i1480u_drop(i1480u,
  328. "RX: short payload! Dropping\n");
  329. goto out;
  330. }
  331. i1480u->rx_skb = skb;
  332. i1480u_hdr = (void *) untd_hdr + untd_hdr_size;
  333. i1480u->rx_srcaddr = i1480u_hdr->srcaddr;
  334. stats_add_sample(&i1480u->lqe_stats, (s8) i1480u_hdr->LQI - 7);
  335. stats_add_sample(&i1480u->rssi_stats, i1480u_hdr->RSSI + 18);
  336. skb_put(i1480u->rx_skb, untd_hdr_size + i1480u->rx_untd_pkt_size);
  337. skb_pull(i1480u->rx_skb, untd_hdr_size + i1480u_hdr_size);
  338. rx_buf->data = NULL; /* for hand off skb to network stack */
  339. pkt_completed = 1;
  340. i1480u->rx_untd_pkt_size -= i1480u_hdr_size; /* accurate stat */
  341. break;
  342. }
  343. default:
  344. i1480u_drop(i1480u, "RX: unknown packet type %u! "
  345. "Dropping\n", untd_hdr_type(untd_hdr));
  346. goto out;
  347. }
  348. size_left -= untd_hdr_size + untd_frg_size;
  349. if (size_left > 0)
  350. ptr += untd_hdr_size + untd_frg_size;
  351. }
  352. if (pkt_completed)
  353. i1480u_skb_deliver(i1480u);
  354. out:
  355. /* recreate needed RX buffers*/
  356. if (rx_buf->data == NULL) {
  357. /* buffer is being used to receive packet, create new */
  358. new_skb = dev_alloc_skb(i1480u_MAX_RX_PKT_SIZE);
  359. if (!new_skb) {
  360. if (printk_ratelimit())
  361. dev_err(dev,
  362. "RX: cannot allocate RX buffer\n");
  363. } else {
  364. new_skb->dev = net_dev;
  365. new_skb->ip_summed = CHECKSUM_NONE;
  366. skb_reserve(new_skb, 2);
  367. rx_buf->data = new_skb;
  368. }
  369. }
  370. return;
  371. }
  372. /*
  373. * Called when an RX URB has finished receiving or has found some kind
  374. * of error condition.
  375. *
  376. * LIMITATIONS:
  377. *
  378. * - We read USB-transfers, each transfer contains a SINGLE fragment
  379. * (can contain a complete packet, or a 1st, next, or last fragment
  380. * of a packet).
  381. * Looks like a transfer can contain more than one fragment (07/18/06)
  382. *
  383. * - Each transfer buffer is the size of the maximum packet size (minus
  384. * headroom), i1480u_MAX_PKT_SIZE - 2
  385. *
  386. * - We always read the full USB-transfer, no partials.
  387. *
  388. * - Each transfer is read directly into a skb. This skb will be used to
  389. * send data to the upper layers if it is the first fragment or a complete
  390. * packet. In the other cases the data will be copied from the skb to
  391. * another skb that is being prepared for the upper layers from a prev
  392. * first fragment.
  393. *
  394. * It is simply too much of a pain. Gosh, there should be a unified
  395. * SG infrastructure for *everything* [so that I could declare a SG
  396. * buffer, pass it to USB for receiving, append some space to it if
  397. * I wish, receive more until I have the whole chunk, adapt
  398. * pointers on each fragment to remove hardware headers and then
  399. * attach that to an skbuff and netif_rx()].
  400. */
  401. void i1480u_rx_cb(struct urb *urb)
  402. {
  403. int result;
  404. int do_parse_buffer = 1;
  405. struct i1480u_rx_buf *rx_buf = urb->context;
  406. struct i1480u *i1480u = rx_buf->i1480u;
  407. struct device *dev = &i1480u->usb_iface->dev;
  408. unsigned long flags;
  409. u8 rx_buf_idx = rx_buf - i1480u->rx_buf;
  410. switch (urb->status) {
  411. case 0:
  412. break;
  413. case -ECONNRESET: /* Not an error, but a controlled situation; */
  414. case -ENOENT: /* (we killed the URB)...so, no broadcast */
  415. case -ESHUTDOWN: /* going away! */
  416. dev_err(dev, "RX URB[%u]: goind down %d\n",
  417. rx_buf_idx, urb->status);
  418. goto error;
  419. default:
  420. dev_err(dev, "RX URB[%u]: unknown status %d\n",
  421. rx_buf_idx, urb->status);
  422. if (edc_inc(&i1480u->rx_errors, EDC_MAX_ERRORS,
  423. EDC_ERROR_TIMEFRAME)) {
  424. dev_err(dev, "RX: max acceptable errors exceeded,"
  425. " resetting device.\n");
  426. i1480u_rx_unlink_urbs(i1480u);
  427. wlp_reset_all(&i1480u->wlp);
  428. goto error;
  429. }
  430. do_parse_buffer = 0;
  431. break;
  432. }
  433. spin_lock_irqsave(&i1480u->lock, flags);
  434. /* chew the data fragments, extract network packets */
  435. if (do_parse_buffer) {
  436. i1480u_rx_buffer(rx_buf);
  437. if (rx_buf->data) {
  438. rx_buf->urb->transfer_buffer = rx_buf->data->data;
  439. result = usb_submit_urb(rx_buf->urb, GFP_ATOMIC);
  440. if (result < 0) {
  441. dev_err(dev, "RX URB[%u]: cannot submit %d\n",
  442. rx_buf_idx, result);
  443. }
  444. }
  445. }
  446. spin_unlock_irqrestore(&i1480u->lock, flags);
  447. error:
  448. return;
  449. }