usb-rx.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * Intel Wireless WiMAX Connection 2400m
  3. * USB RX handling
  4. *
  5. *
  6. * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. *
  12. * * Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * * Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in
  16. * the documentation and/or other materials provided with the
  17. * distribution.
  18. * * Neither the name of Intel Corporation nor the names of its
  19. * contributors may be used to endorse or promote products derived
  20. * from this software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  24. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  25. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  26. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  27. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  28. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  29. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  30. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  32. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. *
  35. * Intel Corporation <linux-wimax@intel.com>
  36. * Yanir Lubetkin <yanirx.lubetkin@intel.com>
  37. * - Initial implementation
  38. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  39. * - Use skb_clone(), break up processing in chunks
  40. * - Split transport/device specific
  41. * - Make buffer size dynamic to exert less memory pressure
  42. *
  43. *
  44. * This handles the RX path on USB.
  45. *
  46. * When a notification is received that says 'there is RX data ready',
  47. * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
  48. * reads a buffer from USB and passes it to i2400m_rx() in the generic
  49. * handling code. The RX buffer has an specific format that is
  50. * described in rx.c.
  51. *
  52. * We use a kernel thread in a loop because:
  53. *
  54. * - we want to be able to call the USB power management get/put
  55. * functions (blocking) before each transaction.
  56. *
  57. * - We might get a lot of notifications and we don't want to submit
  58. * a zillion reads; by serializing, we are throttling.
  59. *
  60. * - RX data processing can get heavy enough so that it is not
  61. * appropiate for doing it in the USB callback; thus we run it in a
  62. * process context.
  63. *
  64. * We provide a read buffer of an arbitrary size (short of a page); if
  65. * the callback reports -EOVERFLOW, it means it was too small, so we
  66. * just double the size and retry (being careful to append, as
  67. * sometimes the device provided some data). Every now and then we
  68. * check if the average packet size is smaller than the current packet
  69. * size and if so, we halve it. At the end, the size of the
  70. * preallocated buffer should be following the average received
  71. * transaction size, adapting dynamically to it.
  72. *
  73. * ROADMAP
  74. *
  75. * i2400mu_rx_kick() Called from notif.c when we get a
  76. * 'data ready' notification
  77. * i2400mu_rxd() Kernel RX daemon
  78. * i2400mu_rx() Receive USB data
  79. * i2400m_rx() Send data to generic i2400m RX handling
  80. *
  81. * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
  82. *
  83. * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
  84. */
  85. #include <linux/workqueue.h>
  86. #include <linux/usb.h>
  87. #include "i2400m-usb.h"
  88. #define D_SUBMODULE rx
  89. #include "usb-debug-levels.h"
  90. /*
  91. * Dynamic RX size
  92. *
  93. * We can't let the rx_size be a multiple of 512 bytes (the RX
  94. * endpoint's max packet size). On some USB host controllers (we
  95. * haven't been able to fully characterize which), if the device is
  96. * about to send (for example) X bytes and we only post a buffer to
  97. * receive n*512, it will fail to mark that as babble (so that
  98. * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
  99. * rest).
  100. *
  101. * So on growing or shrinking, if it is a multiple of the
  102. * maxpacketsize, we remove some (instead of incresing some, so in a
  103. * buddy allocator we try to waste less space).
  104. *
  105. * Note we also need a hook for this on i2400mu_rx() -- when we do the
  106. * first read, we are sure we won't hit this spot because
  107. * i240mm->rx_size has been set properly. However, if we have to
  108. * double because of -EOVERFLOW, when we launch the read to get the
  109. * rest of the data, we *have* to make sure that also is not a
  110. * multiple of the max_pkt_size.
  111. */
  112. static
  113. size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
  114. {
  115. struct device *dev = &i2400mu->usb_iface->dev;
  116. size_t rx_size;
  117. const size_t max_pkt_size = 512;
  118. rx_size = 2 * i2400mu->rx_size;
  119. if (rx_size % max_pkt_size == 0) {
  120. rx_size -= 8;
  121. d_printf(1, dev,
  122. "RX: expected size grew to %zu [adjusted -8] "
  123. "from %zu\n",
  124. rx_size, i2400mu->rx_size);
  125. } else
  126. d_printf(1, dev,
  127. "RX: expected size grew to %zu from %zu\n",
  128. rx_size, i2400mu->rx_size);
  129. return rx_size;
  130. }
  131. static
  132. void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
  133. {
  134. const size_t max_pkt_size = 512;
  135. struct device *dev = &i2400mu->usb_iface->dev;
  136. if (unlikely(i2400mu->rx_size_cnt >= 100
  137. && i2400mu->rx_size_auto_shrink)) {
  138. size_t avg_rx_size =
  139. i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
  140. size_t new_rx_size = i2400mu->rx_size / 2;
  141. if (avg_rx_size < new_rx_size) {
  142. if (new_rx_size % max_pkt_size == 0) {
  143. new_rx_size -= 8;
  144. d_printf(1, dev,
  145. "RX: expected size shrank to %zu "
  146. "[adjusted -8] from %zu\n",
  147. new_rx_size, i2400mu->rx_size);
  148. } else
  149. d_printf(1, dev,
  150. "RX: expected size shrank to %zu "
  151. "from %zu\n",
  152. new_rx_size, i2400mu->rx_size);
  153. i2400mu->rx_size = new_rx_size;
  154. i2400mu->rx_size_cnt = 0;
  155. i2400mu->rx_size_acc = i2400mu->rx_size;
  156. }
  157. }
  158. }
  159. /*
  160. * Receive a message with payloads from the USB bus into an skb
  161. *
  162. * @i2400mu: USB device descriptor
  163. * @rx_skb: skb where to place the received message
  164. *
  165. * Deals with all the USB-specifics of receiving, dynamically
  166. * increasing the buffer size if so needed. Returns the payload in the
  167. * skb, ready to process. On a zero-length packet, we retry.
  168. *
  169. * On soft USB errors, we retry (until they become too frequent and
  170. * then are promoted to hard); on hard USB errors, we reset the
  171. * device. On other errors (skb realloacation, we just drop it and
  172. * hope for the next invocation to solve it).
  173. *
  174. * Returns: pointer to the skb if ok, ERR_PTR on error.
  175. * NOTE: this function might realloc the skb (if it is too small),
  176. * so always update with the one returned.
  177. * ERR_PTR() is < 0 on error.
  178. * Will return NULL if it cannot reallocate -- this can be
  179. * considered a transient retryable error.
  180. */
  181. static
  182. struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
  183. {
  184. int result = 0;
  185. struct device *dev = &i2400mu->usb_iface->dev;
  186. int usb_pipe, read_size, rx_size, do_autopm;
  187. struct usb_endpoint_descriptor *epd;
  188. const size_t max_pkt_size = 512;
  189. d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
  190. do_autopm = atomic_read(&i2400mu->do_autopm);
  191. result = do_autopm ?
  192. usb_autopm_get_interface(i2400mu->usb_iface) : 0;
  193. if (result < 0) {
  194. dev_err(dev, "RX: can't get autopm: %d\n", result);
  195. do_autopm = 0;
  196. }
  197. epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
  198. usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
  199. retry:
  200. rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
  201. if (unlikely(rx_size % max_pkt_size == 0)) {
  202. rx_size -= 8;
  203. d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
  204. }
  205. result = usb_bulk_msg(
  206. i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
  207. rx_size, &read_size, 200);
  208. usb_mark_last_busy(i2400mu->usb_dev);
  209. switch (result) {
  210. case 0:
  211. if (read_size == 0)
  212. goto retry; /* ZLP, just resubmit */
  213. skb_put(rx_skb, read_size);
  214. break;
  215. case -EPIPE:
  216. /*
  217. * Stall -- maybe the device is choking with our
  218. * requests. Clear it and give it some time. If they
  219. * happen to often, it might be another symptom, so we
  220. * reset.
  221. *
  222. * No error handling for usb_clear_halt(0; if it
  223. * works, the retry works; if it fails, this switch
  224. * does the error handling for us.
  225. */
  226. if (edc_inc(&i2400mu->urb_edc,
  227. 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
  228. dev_err(dev, "BM-CMD: too many stalls in "
  229. "URB; resetting device\n");
  230. goto do_reset;
  231. }
  232. usb_clear_halt(i2400mu->usb_dev, usb_pipe);
  233. msleep(10); /* give the device some time */
  234. goto retry;
  235. case -EINVAL: /* while removing driver */
  236. case -ENODEV: /* dev disconnect ... */
  237. case -ENOENT: /* just ignore it */
  238. case -ESHUTDOWN:
  239. case -ECONNRESET:
  240. break;
  241. case -EOVERFLOW: { /* too small, reallocate */
  242. struct sk_buff *new_skb;
  243. rx_size = i2400mu_rx_size_grow(i2400mu);
  244. if (rx_size <= (1 << 16)) /* cap it */
  245. i2400mu->rx_size = rx_size;
  246. else if (printk_ratelimit()) {
  247. dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
  248. result = -EINVAL;
  249. goto out;
  250. }
  251. skb_put(rx_skb, read_size);
  252. new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
  253. GFP_KERNEL);
  254. if (new_skb == NULL) {
  255. if (printk_ratelimit())
  256. dev_err(dev, "RX: Can't reallocate skb to %d; "
  257. "RX dropped\n", rx_size);
  258. kfree_skb(rx_skb);
  259. rx_skb = NULL;
  260. goto out; /* drop it...*/
  261. }
  262. kfree_skb(rx_skb);
  263. rx_skb = new_skb;
  264. i2400mu->rx_size_cnt = 0;
  265. i2400mu->rx_size_acc = i2400mu->rx_size;
  266. d_printf(1, dev, "RX: size changed to %d, received %d, "
  267. "copied %d, capacity %ld\n",
  268. rx_size, read_size, rx_skb->len,
  269. (long) (skb_end_pointer(new_skb) - new_skb->head));
  270. goto retry;
  271. }
  272. /* In most cases, it happens due to the hardware scheduling a
  273. * read when there was no data - unfortunately, we have no way
  274. * to tell this timeout from a USB timeout. So we just ignore
  275. * it. */
  276. case -ETIMEDOUT:
  277. dev_err(dev, "RX: timeout: %d\n", result);
  278. result = 0;
  279. break;
  280. default: /* Any error */
  281. if (edc_inc(&i2400mu->urb_edc,
  282. EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
  283. goto error_reset;
  284. dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
  285. goto retry;
  286. }
  287. out:
  288. if (do_autopm)
  289. usb_autopm_put_interface(i2400mu->usb_iface);
  290. d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
  291. return rx_skb;
  292. error_reset:
  293. dev_err(dev, "RX: maximum errors in URB exceeded; "
  294. "resetting device\n");
  295. do_reset:
  296. usb_queue_reset_device(i2400mu->usb_iface);
  297. rx_skb = ERR_PTR(result);
  298. goto out;
  299. }
  300. /*
  301. * Kernel thread for USB reception of data
  302. *
  303. * This thread waits for a kick; once kicked, it will allocate an skb
  304. * and receive a single message to it from USB (using
  305. * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
  306. * code for processing.
  307. *
  308. * When done processing, it runs some dirty statistics to verify if
  309. * the last 100 messages received were smaller than half of the
  310. * current RX buffer size. In that case, the RX buffer size is
  311. * halved. This will helps lowering the pressure on the memory
  312. * allocator.
  313. *
  314. * Hard errors force the thread to exit.
  315. */
  316. static
  317. int i2400mu_rxd(void *_i2400mu)
  318. {
  319. int result = 0;
  320. struct i2400mu *i2400mu = _i2400mu;
  321. struct i2400m *i2400m = &i2400mu->i2400m;
  322. struct device *dev = &i2400mu->usb_iface->dev;
  323. struct net_device *net_dev = i2400m->wimax_dev.net_dev;
  324. size_t pending;
  325. int rx_size;
  326. struct sk_buff *rx_skb;
  327. unsigned long flags;
  328. d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
  329. spin_lock_irqsave(&i2400m->rx_lock, flags);
  330. BUG_ON(i2400mu->rx_kthread != NULL);
  331. i2400mu->rx_kthread = current;
  332. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  333. while (1) {
  334. d_printf(2, dev, "RX: waiting for messages\n");
  335. pending = 0;
  336. wait_event_interruptible(
  337. i2400mu->rx_wq,
  338. (kthread_should_stop() /* check this first! */
  339. || (pending = atomic_read(&i2400mu->rx_pending_count)))
  340. );
  341. if (kthread_should_stop())
  342. break;
  343. if (pending == 0)
  344. continue;
  345. rx_size = i2400mu->rx_size;
  346. d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
  347. rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
  348. if (rx_skb == NULL) {
  349. dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
  350. rx_size);
  351. msleep(50); /* give it some time? */
  352. continue;
  353. }
  354. /* Receive the message with the payloads */
  355. rx_skb = i2400mu_rx(i2400mu, rx_skb);
  356. result = PTR_ERR(rx_skb);
  357. if (IS_ERR(rx_skb))
  358. goto out;
  359. atomic_dec(&i2400mu->rx_pending_count);
  360. if (rx_skb == NULL || rx_skb->len == 0) {
  361. /* some "ignorable" condition */
  362. kfree_skb(rx_skb);
  363. continue;
  364. }
  365. /* Deliver the message to the generic i2400m code */
  366. i2400mu->rx_size_cnt++;
  367. i2400mu->rx_size_acc += rx_skb->len;
  368. result = i2400m_rx(i2400m, rx_skb);
  369. if (result == -EIO
  370. && edc_inc(&i2400mu->urb_edc,
  371. EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
  372. goto error_reset;
  373. }
  374. /* Maybe adjust RX buffer size */
  375. i2400mu_rx_size_maybe_shrink(i2400mu);
  376. }
  377. result = 0;
  378. out:
  379. spin_lock_irqsave(&i2400m->rx_lock, flags);
  380. i2400mu->rx_kthread = NULL;
  381. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  382. d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
  383. return result;
  384. error_reset:
  385. dev_err(dev, "RX: maximum errors in received buffer exceeded; "
  386. "resetting device\n");
  387. usb_queue_reset_device(i2400mu->usb_iface);
  388. goto out;
  389. }
  390. /*
  391. * Start reading from the device
  392. *
  393. * @i2400m: device instance
  394. *
  395. * Notify the RX thread that there is data pending.
  396. */
  397. void i2400mu_rx_kick(struct i2400mu *i2400mu)
  398. {
  399. struct i2400m *i2400m = &i2400mu->i2400m;
  400. struct device *dev = &i2400mu->usb_iface->dev;
  401. d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
  402. atomic_inc(&i2400mu->rx_pending_count);
  403. wake_up_all(&i2400mu->rx_wq);
  404. d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
  405. }
  406. int i2400mu_rx_setup(struct i2400mu *i2400mu)
  407. {
  408. int result = 0;
  409. struct i2400m *i2400m = &i2400mu->i2400m;
  410. struct device *dev = &i2400mu->usb_iface->dev;
  411. struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
  412. struct task_struct *kthread;
  413. kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
  414. wimax_dev->name);
  415. /* the kthread function sets i2400mu->rx_thread */
  416. if (IS_ERR(kthread)) {
  417. result = PTR_ERR(kthread);
  418. dev_err(dev, "RX: cannot start thread: %d\n", result);
  419. }
  420. return result;
  421. }
  422. void i2400mu_rx_release(struct i2400mu *i2400mu)
  423. {
  424. unsigned long flags;
  425. struct i2400m *i2400m = &i2400mu->i2400m;
  426. struct device *dev = i2400m_dev(i2400m);
  427. struct task_struct *kthread;
  428. spin_lock_irqsave(&i2400m->rx_lock, flags);
  429. kthread = i2400mu->rx_kthread;
  430. i2400mu->rx_kthread = NULL;
  431. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  432. if (kthread)
  433. kthread_stop(kthread);
  434. else
  435. d_printf(1, dev, "RX: kthread had already exited\n");
  436. }