rx.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * Intel Wireless WiMAX Connection 2400m
  3. * Handle incoming traffic and deliver it to the control or data planes
  4. *
  5. *
  6. * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. *
  12. * * Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * * Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in
  16. * the documentation and/or other materials provided with the
  17. * distribution.
  18. * * Neither the name of Intel Corporation nor the names of its
  19. * contributors may be used to endorse or promote products derived
  20. * from this software without specific prior written permission.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  23. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  24. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  25. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  26. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  27. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  28. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  29. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  30. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  31. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  32. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  33. *
  34. *
  35. * Intel Corporation <linux-wimax@intel.com>
  36. * Yanir Lubetkin <yanirx.lubetkin@intel.com>
  37. * - Initial implementation
  38. * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  39. * - Use skb_clone(), break up processing in chunks
  40. * - Split transport/device specific
  41. * - Make buffer size dynamic to exert less memory pressure
  42. *
  43. *
  44. * This handles the RX path.
  45. *
  46. * We receive an RX message from the bus-specific driver, which
  47. * contains one or more payloads that have potentially different
  48. * destinataries (data or control paths).
  49. *
  50. * So we just take that payload from the transport specific code in
  51. * the form of an skb, break it up in chunks (a cloned skb each in the
  52. * case of network packets) and pass it to netdev or to the
  53. * command/ack handler (and from there to the WiMAX stack).
  54. *
  55. * PROTOCOL FORMAT
  56. *
  57. * The format of the buffer is:
  58. *
  59. * HEADER (struct i2400m_msg_hdr)
  60. * PAYLOAD DESCRIPTOR 0 (struct i2400m_pld)
  61. * PAYLOAD DESCRIPTOR 1
  62. * ...
  63. * PAYLOAD DESCRIPTOR N
  64. * PAYLOAD 0 (raw bytes)
  65. * PAYLOAD 1
  66. * ...
  67. * PAYLOAD N
  68. *
  69. * See tx.c for a deeper description on alignment requirements and
  70. * other fun facts of it.
  71. *
  72. * ROADMAP
  73. *
  74. * i2400m_rx
  75. * i2400m_rx_msg_hdr_check
  76. * i2400m_rx_pl_descr_check
  77. * i2400m_rx_payload
  78. * i2400m_net_rx
  79. * i2400m_rx_ctl
  80. * i2400m_msg_size_check
  81. * i2400m_report_hook_work [in a workqueue]
  82. * i2400m_report_hook
  83. * wimax_msg_to_user
  84. * i2400m_rx_ctl_ack
  85. * wimax_msg_to_user_alloc
  86. * i2400m_rx_trace
  87. * i2400m_msg_size_check
  88. * wimax_msg
  89. */
  90. #include <linux/kernel.h>
  91. #include <linux/if_arp.h>
  92. #include <linux/netdevice.h>
  93. #include <linux/workqueue.h>
  94. #include "i2400m.h"
  95. #define D_SUBMODULE rx
  96. #include "debug-levels.h"
  97. struct i2400m_report_hook_args {
  98. struct sk_buff *skb_rx;
  99. const struct i2400m_l3l4_hdr *l3l4_hdr;
  100. size_t size;
  101. };
  102. /*
  103. * Execute i2400m_report_hook in a workqueue
  104. *
  105. * Unpacks arguments from the deferred call, executes it and then
  106. * drops the references.
  107. *
  108. * Obvious NOTE: References are needed because we are a separate
  109. * thread; otherwise the buffer changes under us because it is
  110. * released by the original caller.
  111. */
  112. static
  113. void i2400m_report_hook_work(struct work_struct *ws)
  114. {
  115. struct i2400m_work *iw =
  116. container_of(ws, struct i2400m_work, ws);
  117. struct i2400m_report_hook_args *args = (void *) iw->pl;
  118. i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size);
  119. kfree_skb(args->skb_rx);
  120. i2400m_put(iw->i2400m);
  121. kfree(iw);
  122. }
  123. /*
  124. * Process an ack to a command
  125. *
  126. * @i2400m: device descriptor
  127. * @payload: pointer to message
  128. * @size: size of the message
  129. *
  130. * Pass the acknodledgment (in an skb) to the thread that is waiting
  131. * for it in i2400m->msg_completion.
  132. *
  133. * We need to coordinate properly with the thread waiting for the
  134. * ack. Check if it is waiting or if it is gone. We loose the spinlock
  135. * to avoid allocating on atomic contexts (yeah, could use GFP_ATOMIC,
  136. * but this is not so speed critical).
  137. */
  138. static
  139. void i2400m_rx_ctl_ack(struct i2400m *i2400m,
  140. const void *payload, size_t size)
  141. {
  142. struct device *dev = i2400m_dev(i2400m);
  143. struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
  144. unsigned long flags;
  145. struct sk_buff *ack_skb;
  146. /* Anyone waiting for an answer? */
  147. spin_lock_irqsave(&i2400m->rx_lock, flags);
  148. if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
  149. dev_err(dev, "Huh? reply to command with no waiters\n");
  150. goto error_no_waiter;
  151. }
  152. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  153. ack_skb = wimax_msg_alloc(wimax_dev, NULL, payload, size, GFP_KERNEL);
  154. /* Check waiter didn't time out waiting for the answer... */
  155. spin_lock_irqsave(&i2400m->rx_lock, flags);
  156. if (i2400m->ack_skb != ERR_PTR(-EINPROGRESS)) {
  157. d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
  158. goto error_waiter_cancelled;
  159. }
  160. if (ack_skb == NULL) {
  161. dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
  162. i2400m->ack_skb = ERR_PTR(-ENOMEM);
  163. } else
  164. i2400m->ack_skb = ack_skb;
  165. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  166. complete(&i2400m->msg_completion);
  167. return;
  168. error_waiter_cancelled:
  169. if (ack_skb)
  170. kfree_skb(ack_skb);
  171. error_no_waiter:
  172. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  173. return;
  174. }
  175. /*
  176. * Receive and process a control payload
  177. *
  178. * @i2400m: device descriptor
  179. * @skb_rx: skb that contains the payload (for reference counting)
  180. * @payload: pointer to message
  181. * @size: size of the message
  182. *
  183. * There are two types of control RX messages: reports (asynchronous,
  184. * like your every day interrupts) and 'acks' (reponses to a command,
  185. * get or set request).
  186. *
  187. * If it is a report, we run hooks on it (to extract information for
  188. * things we need to do in the driver) and then pass it over to the
  189. * WiMAX stack to send it to user space.
  190. *
  191. * NOTE: report processing is done in a workqueue specific to the
  192. * generic driver, to avoid deadlocks in the system.
  193. *
  194. * If it is not a report, it is an ack to a previously executed
  195. * command, set or get, so wake up whoever is waiting for it from
  196. * i2400m_msg_to_dev(). i2400m_rx_ctl_ack() takes care of that.
  197. *
  198. * Note that the sizes we pass to other functions from here are the
  199. * sizes of the _l3l4_hdr + payload, not full buffer sizes, as we have
  200. * verified in _msg_size_check() that they are congruent.
  201. *
  202. * For reports: We can't clone the original skb where the data is
  203. * because we need to send this up via netlink; netlink has to add
  204. * headers and we can't overwrite what's preceeding the payload...as
  205. * it is another message. So we just dup them.
  206. */
  207. static
  208. void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
  209. const void *payload, size_t size)
  210. {
  211. int result;
  212. struct device *dev = i2400m_dev(i2400m);
  213. const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
  214. unsigned msg_type;
  215. result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
  216. if (result < 0) {
  217. dev_err(dev, "HW BUG? device sent a bad message: %d\n",
  218. result);
  219. goto error_check;
  220. }
  221. msg_type = le16_to_cpu(l3l4_hdr->type);
  222. d_printf(1, dev, "%s 0x%04x: %zu bytes\n",
  223. msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
  224. msg_type, size);
  225. d_dump(2, dev, l3l4_hdr, size);
  226. if (msg_type & I2400M_MT_REPORT_MASK) {
  227. /* These hooks have to be ran serialized; as well, the
  228. * handling might force the execution of commands, and
  229. * that might cause reentrancy issues with
  230. * bus-specific subdrivers and workqueues. So we run
  231. * it in a separate workqueue. */
  232. struct i2400m_report_hook_args args = {
  233. .skb_rx = skb_rx,
  234. .l3l4_hdr = l3l4_hdr,
  235. .size = size
  236. };
  237. if (unlikely(i2400m->ready == 0)) /* only send if up */
  238. return;
  239. skb_get(skb_rx);
  240. i2400m_queue_work(i2400m, i2400m_report_hook_work,
  241. GFP_KERNEL, &args, sizeof(args));
  242. result = wimax_msg(&i2400m->wimax_dev, NULL, l3l4_hdr, size,
  243. GFP_KERNEL);
  244. if (result < 0)
  245. dev_err(dev, "error sending report to userspace: %d\n",
  246. result);
  247. } else /* an ack to a CMD, GET or SET */
  248. i2400m_rx_ctl_ack(i2400m, payload, size);
  249. error_check:
  250. return;
  251. }
  252. /*
  253. * Receive and send up a trace
  254. *
  255. * @i2400m: device descriptor
  256. * @skb_rx: skb that contains the trace (for reference counting)
  257. * @payload: pointer to trace message inside the skb
  258. * @size: size of the message
  259. *
  260. * THe i2400m might produce trace information (diagnostics) and we
  261. * send them through a different kernel-to-user pipe (to avoid
  262. * clogging it).
  263. *
  264. * As in i2400m_rx_ctl(), we can't clone the original skb where the
  265. * data is because we need to send this up via netlink; netlink has to
  266. * add headers and we can't overwrite what's preceeding the
  267. * payload...as it is another message. So we just dup them.
  268. */
  269. static
  270. void i2400m_rx_trace(struct i2400m *i2400m,
  271. const void *payload, size_t size)
  272. {
  273. int result;
  274. struct device *dev = i2400m_dev(i2400m);
  275. struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
  276. const struct i2400m_l3l4_hdr *l3l4_hdr = payload;
  277. unsigned msg_type;
  278. result = i2400m_msg_size_check(i2400m, l3l4_hdr, size);
  279. if (result < 0) {
  280. dev_err(dev, "HW BUG? device sent a bad trace message: %d\n",
  281. result);
  282. goto error_check;
  283. }
  284. msg_type = le16_to_cpu(l3l4_hdr->type);
  285. d_printf(1, dev, "Trace %s 0x%04x: %zu bytes\n",
  286. msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
  287. msg_type, size);
  288. d_dump(2, dev, l3l4_hdr, size);
  289. if (unlikely(i2400m->ready == 0)) /* only send if up */
  290. return;
  291. result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
  292. if (result < 0)
  293. dev_err(dev, "error sending trace to userspace: %d\n",
  294. result);
  295. error_check:
  296. return;
  297. }
  298. /*
  299. * Act on a received payload
  300. *
  301. * @i2400m: device instance
  302. * @skb_rx: skb where the transaction was received
  303. * @single: 1 if there is only one payload, 0 otherwise
  304. * @pld: payload descriptor
  305. * @payload: payload data
  306. *
  307. * Upon reception of a payload, look at its guts in the payload
  308. * descriptor and decide what to do with it.
  309. */
  310. static
  311. void i2400m_rx_payload(struct i2400m *i2400m, struct sk_buff *skb_rx,
  312. unsigned single, const struct i2400m_pld *pld,
  313. const void *payload)
  314. {
  315. struct device *dev = i2400m_dev(i2400m);
  316. size_t pl_size = i2400m_pld_size(pld);
  317. enum i2400m_pt pl_type = i2400m_pld_type(pld);
  318. switch (pl_type) {
  319. case I2400M_PT_DATA:
  320. d_printf(3, dev, "RX: data payload %zu bytes\n", pl_size);
  321. i2400m_net_rx(i2400m, skb_rx, single, payload, pl_size);
  322. break;
  323. case I2400M_PT_CTRL:
  324. i2400m_rx_ctl(i2400m, skb_rx, payload, pl_size);
  325. break;
  326. case I2400M_PT_TRACE:
  327. i2400m_rx_trace(i2400m, payload, pl_size);
  328. break;
  329. default: /* Anything else shouldn't come to the host */
  330. if (printk_ratelimit())
  331. dev_err(dev, "RX: HW BUG? unexpected payload type %u\n",
  332. pl_type);
  333. }
  334. }
  335. /*
  336. * Check a received transaction's message header
  337. *
  338. * @i2400m: device descriptor
  339. * @msg_hdr: message header
  340. * @buf_size: size of the received buffer
  341. *
  342. * Check that the declarations done by a RX buffer message header are
  343. * sane and consistent with the amount of data that was received.
  344. */
  345. static
  346. int i2400m_rx_msg_hdr_check(struct i2400m *i2400m,
  347. const struct i2400m_msg_hdr *msg_hdr,
  348. size_t buf_size)
  349. {
  350. int result = -EIO;
  351. struct device *dev = i2400m_dev(i2400m);
  352. if (buf_size < sizeof(*msg_hdr)) {
  353. dev_err(dev, "RX: HW BUG? message with short header (%zu "
  354. "vs %zu bytes expected)\n", buf_size, sizeof(*msg_hdr));
  355. goto error;
  356. }
  357. if (msg_hdr->barker != cpu_to_le32(I2400M_D2H_MSG_BARKER)) {
  358. dev_err(dev, "RX: HW BUG? message received with unknown "
  359. "barker 0x%08x (buf_size %zu bytes)\n",
  360. le32_to_cpu(msg_hdr->barker), buf_size);
  361. goto error;
  362. }
  363. if (msg_hdr->num_pls == 0) {
  364. dev_err(dev, "RX: HW BUG? zero payload packets in message\n");
  365. goto error;
  366. }
  367. if (le16_to_cpu(msg_hdr->num_pls) > I2400M_MAX_PLS_IN_MSG) {
  368. dev_err(dev, "RX: HW BUG? message contains more payload "
  369. "than maximum; ignoring.\n");
  370. goto error;
  371. }
  372. result = 0;
  373. error:
  374. return result;
  375. }
  376. /*
  377. * Check a payload descriptor against the received data
  378. *
  379. * @i2400m: device descriptor
  380. * @pld: payload descriptor
  381. * @pl_itr: offset (in bytes) in the received buffer the payload is
  382. * located
  383. * @buf_size: size of the received buffer
  384. *
  385. * Given a payload descriptor (part of a RX buffer), check it is sane
  386. * and that the data it declares fits in the buffer.
  387. */
  388. static
  389. int i2400m_rx_pl_descr_check(struct i2400m *i2400m,
  390. const struct i2400m_pld *pld,
  391. size_t pl_itr, size_t buf_size)
  392. {
  393. int result = -EIO;
  394. struct device *dev = i2400m_dev(i2400m);
  395. size_t pl_size = i2400m_pld_size(pld);
  396. enum i2400m_pt pl_type = i2400m_pld_type(pld);
  397. if (pl_size > i2400m->bus_pl_size_max) {
  398. dev_err(dev, "RX: HW BUG? payload @%zu: size %zu is "
  399. "bigger than maximum %zu; ignoring message\n",
  400. pl_itr, pl_size, i2400m->bus_pl_size_max);
  401. goto error;
  402. }
  403. if (pl_itr + pl_size > buf_size) { /* enough? */
  404. dev_err(dev, "RX: HW BUG? payload @%zu: size %zu "
  405. "goes beyond the received buffer "
  406. "size (%zu bytes); ignoring message\n",
  407. pl_itr, pl_size, buf_size);
  408. goto error;
  409. }
  410. if (pl_type >= I2400M_PT_ILLEGAL) {
  411. dev_err(dev, "RX: HW BUG? illegal payload type %u; "
  412. "ignoring message\n", pl_type);
  413. goto error;
  414. }
  415. result = 0;
  416. error:
  417. return result;
  418. }
  419. /**
  420. * i2400m_rx - Receive a buffer of data from the device
  421. *
  422. * @i2400m: device descriptor
  423. * @skb: skbuff where the data has been received
  424. *
  425. * Parse in a buffer of data that contains an RX message sent from the
  426. * device. See the file header for the format. Run all checks on the
  427. * buffer header, then run over each payload's descriptors, verify
  428. * their consistency and act on each payload's contents. If
  429. * everything is succesful, update the device's statistics.
  430. *
  431. * Note: You need to set the skb to contain only the length of the
  432. * received buffer; for that, use skb_trim(skb, RECEIVED_SIZE).
  433. *
  434. * Returns:
  435. *
  436. * 0 if ok, < 0 errno on error
  437. *
  438. * If ok, this function owns now the skb and the caller DOESN'T have
  439. * to run kfree_skb() on it. However, on error, the caller still owns
  440. * the skb and it is responsible for releasing it.
  441. */
  442. int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb)
  443. {
  444. int i, result;
  445. struct device *dev = i2400m_dev(i2400m);
  446. const struct i2400m_msg_hdr *msg_hdr;
  447. size_t pl_itr, pl_size, skb_len;
  448. unsigned long flags;
  449. unsigned num_pls;
  450. skb_len = skb->len;
  451. d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n",
  452. i2400m, skb, skb_len);
  453. result = -EIO;
  454. msg_hdr = (void *) skb->data;
  455. result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len);
  456. if (result < 0)
  457. goto error_msg_hdr_check;
  458. result = -EIO;
  459. num_pls = le16_to_cpu(msg_hdr->num_pls);
  460. pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */
  461. num_pls * sizeof(msg_hdr->pld[0]);
  462. pl_itr = ALIGN(pl_itr, I2400M_PL_PAD);
  463. if (pl_itr > skb->len) { /* got all the payload descriptors? */
  464. dev_err(dev, "RX: HW BUG? message too short (%u bytes) for "
  465. "%u payload descriptors (%zu each, total %zu)\n",
  466. skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr);
  467. goto error_pl_descr_short;
  468. }
  469. /* Walk each payload payload--check we really got it */
  470. for (i = 0; i < num_pls; i++) {
  471. /* work around old gcc warnings */
  472. pl_size = i2400m_pld_size(&msg_hdr->pld[i]);
  473. result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i],
  474. pl_itr, skb->len);
  475. if (result < 0)
  476. goto error_pl_descr_check;
  477. i2400m_rx_payload(i2400m, skb, num_pls == 1, &msg_hdr->pld[i],
  478. skb->data + pl_itr);
  479. pl_itr += ALIGN(pl_size, I2400M_PL_PAD);
  480. cond_resched(); /* Don't monopolize */
  481. }
  482. kfree_skb(skb);
  483. /* Update device statistics */
  484. spin_lock_irqsave(&i2400m->rx_lock, flags);
  485. i2400m->rx_pl_num += i;
  486. if (i > i2400m->rx_pl_max)
  487. i2400m->rx_pl_max = i;
  488. if (i < i2400m->rx_pl_min)
  489. i2400m->rx_pl_min = i;
  490. i2400m->rx_num++;
  491. i2400m->rx_size_acc += skb->len;
  492. if (skb->len < i2400m->rx_size_min)
  493. i2400m->rx_size_min = skb->len;
  494. if (skb->len > i2400m->rx_size_max)
  495. i2400m->rx_size_max = skb->len;
  496. spin_unlock_irqrestore(&i2400m->rx_lock, flags);
  497. error_pl_descr_check:
  498. error_pl_descr_short:
  499. error_msg_hdr_check:
  500. d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n",
  501. i2400m, skb, skb_len, result);
  502. return result;
  503. }
  504. EXPORT_SYMBOL_GPL(i2400m_rx);