6lowpan.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468
  1. /*
  2. * Copyright 2011, Siemens AG
  3. * written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
  4. */
  5. /*
  6. * Based on patches from Jon Smirl <jonsmirl@gmail.com>
  7. * Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2
  11. * as published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License along
  19. * with this program; if not, write to the Free Software Foundation, Inc.,
  20. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  21. */
  22. /* Jon's code is based on 6lowpan implementation for Contiki which is:
  23. * Copyright (c) 2008, Swedish Institute of Computer Science.
  24. * All rights reserved.
  25. *
  26. * Redistribution and use in source and binary forms, with or without
  27. * modification, are permitted provided that the following conditions
  28. * are met:
  29. * 1. Redistributions of source code must retain the above copyright
  30. * notice, this list of conditions and the following disclaimer.
  31. * 2. Redistributions in binary form must reproduce the above copyright
  32. * notice, this list of conditions and the following disclaimer in the
  33. * documentation and/or other materials provided with the distribution.
  34. * 3. Neither the name of the Institute nor the names of its contributors
  35. * may be used to endorse or promote products derived from this software
  36. * without specific prior written permission.
  37. *
  38. * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
  39. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  40. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  41. * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
  42. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  43. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  44. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  45. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  46. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  47. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  48. * SUCH DAMAGE.
  49. */
  50. #include <linux/bitops.h>
  51. #include <linux/if_arp.h>
  52. #include <linux/module.h>
  53. #include <linux/moduleparam.h>
  54. #include <linux/netdevice.h>
  55. #include <net/af_ieee802154.h>
  56. #include <net/ieee802154.h>
  57. #include <net/ieee802154_netdev.h>
  58. #include <net/ipv6.h>
  59. #include "6lowpan.h"
  60. /* TTL uncompression values */
  61. static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
  62. static LIST_HEAD(lowpan_devices);
  63. /*
  64. * Uncompression of linklocal:
  65. * 0 -> 16 bytes from packet
  66. * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
  67. * 2 -> 2 bytes from prefix - zeroes + 2 from packet
  68. * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
  69. *
  70. * NOTE: => the uncompress function does change 0xf to 0x10
  71. * NOTE: 0x00 => no-autoconfig => unspecified
  72. */
  73. static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
  74. /*
  75. * Uncompression of ctx-based:
  76. * 0 -> 0 bits from packet [unspecified / reserved]
  77. * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
  78. * 2 -> 8 bytes from prefix - zeroes + 2 from packet
  79. * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
  80. */
  81. static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
  82. /* Link local prefix */
  83. static const u8 lowpan_llprefix[] = {0xfe, 0x80};
  84. /* private device info */
  85. struct lowpan_dev_info {
  86. struct net_device *real_dev; /* real WPAN device ptr */
  87. struct mutex dev_list_mtx; /* mutex for list ops */
  88. unsigned short fragment_tag;
  89. };
  90. struct lowpan_dev_record {
  91. struct net_device *ldev;
  92. struct list_head list;
  93. };
  94. struct lowpan_fragment {
  95. struct sk_buff *skb; /* skb to be assembled */
  96. u16 length; /* length to be assemled */
  97. u32 bytes_rcv; /* bytes received */
  98. u16 tag; /* current fragment tag */
  99. struct timer_list timer; /* assembling timer */
  100. struct list_head list; /* fragments list */
  101. };
  102. static LIST_HEAD(lowpan_fragments);
  103. static DEFINE_SPINLOCK(flist_lock);
  104. static inline struct
  105. lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
  106. {
  107. return netdev_priv(dev);
  108. }
  109. static inline void lowpan_address_flip(u8 *src, u8 *dest)
  110. {
  111. int i;
  112. for (i = 0; i < IEEE802154_ADDR_LEN; i++)
  113. (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
  114. }
  115. /* list of all 6lowpan devices, uses for package delivering */
  116. /* print data in line */
  117. static inline void lowpan_raw_dump_inline(const char *caller, char *msg,
  118. unsigned char *buf, int len)
  119. {
  120. #ifdef DEBUG
  121. if (msg)
  122. pr_debug("(%s) %s: ", caller, msg);
  123. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE,
  124. 16, 1, buf, len, false);
  125. #endif /* DEBUG */
  126. }
  127. /*
  128. * print data in a table format:
  129. *
  130. * addr: xx xx xx xx xx xx
  131. * addr: xx xx xx xx xx xx
  132. * ...
  133. */
  134. static inline void lowpan_raw_dump_table(const char *caller, char *msg,
  135. unsigned char *buf, int len)
  136. {
  137. #ifdef DEBUG
  138. if (msg)
  139. pr_debug("(%s) %s:\n", caller, msg);
  140. print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET,
  141. 16, 1, buf, len, false);
  142. #endif /* DEBUG */
  143. }
  144. static u8
  145. lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
  146. const unsigned char *lladdr)
  147. {
  148. u8 val = 0;
  149. if (is_addr_mac_addr_based(ipaddr, lladdr))
  150. val = 3; /* 0-bits */
  151. else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
  152. /* compress IID to 16 bits xxxx::XXXX */
  153. memcpy(*hc06_ptr, &ipaddr->s6_addr16[7], 2);
  154. *hc06_ptr += 2;
  155. val = 2; /* 16-bits */
  156. } else {
  157. /* do not compress IID => xxxx::IID */
  158. memcpy(*hc06_ptr, &ipaddr->s6_addr16[4], 8);
  159. *hc06_ptr += 8;
  160. val = 1; /* 64-bits */
  161. }
  162. return rol8(val, shift);
  163. }
  164. static void
  165. lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
  166. {
  167. memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
  168. /* second bit-flip (Universe/Local) is done according RFC2464 */
  169. ipaddr->s6_addr[8] ^= 0x02;
  170. }
  171. /*
  172. * Uncompress addresses based on a prefix and a postfix with zeroes in
  173. * between. If the postfix is zero in length it will use the link address
  174. * to configure the IP address (autoconf style).
  175. * pref_post_count takes a byte where the first nibble specify prefix count
  176. * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
  177. */
  178. static int
  179. lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
  180. u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
  181. {
  182. u8 prefcount = pref_post_count >> 4;
  183. u8 postcount = pref_post_count & 0x0f;
  184. /* full nibble 15 => 16 */
  185. prefcount = (prefcount == 15 ? 16 : prefcount);
  186. postcount = (postcount == 15 ? 16 : postcount);
  187. if (lladdr)
  188. lowpan_raw_dump_inline(__func__, "linklocal address",
  189. lladdr, IEEE802154_ADDR_LEN);
  190. if (prefcount > 0)
  191. memcpy(ipaddr, prefix, prefcount);
  192. if (postcount > 0) {
  193. memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
  194. skb_pull(skb, postcount);
  195. } else if (prefcount > 0) {
  196. if (lladdr == NULL)
  197. return -EINVAL;
  198. /* no IID based configuration if no prefix and no data */
  199. lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
  200. }
  201. pr_debug("uncompressing %d + %d => ", prefcount, postcount);
  202. lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
  203. return 0;
  204. }
  205. /* Uncompress function for multicast destination address,
  206. * when M bit is set.
  207. */
  208. static int
  209. lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
  210. struct in6_addr *ipaddr,
  211. const u8 dam)
  212. {
  213. bool fail;
  214. switch (dam) {
  215. case LOWPAN_IPHC_DAM_00:
  216. /* 00: 128 bits. The full address
  217. * is carried in-line.
  218. */
  219. fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
  220. break;
  221. case LOWPAN_IPHC_DAM_01:
  222. /* 01: 48 bits. The address takes
  223. * the form ffXX::00XX:XXXX:XXXX.
  224. */
  225. ipaddr->s6_addr[0] = 0xFF;
  226. fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
  227. fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
  228. break;
  229. case LOWPAN_IPHC_DAM_10:
  230. /* 10: 32 bits. The address takes
  231. * the form ffXX::00XX:XXXX.
  232. */
  233. ipaddr->s6_addr[0] = 0xFF;
  234. fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
  235. fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
  236. break;
  237. case LOWPAN_IPHC_DAM_11:
  238. /* 11: 8 bits. The address takes
  239. * the form ff02::00XX.
  240. */
  241. ipaddr->s6_addr[0] = 0xFF;
  242. ipaddr->s6_addr[1] = 0x02;
  243. fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
  244. break;
  245. default:
  246. pr_debug("DAM value has a wrong value: 0x%x\n", dam);
  247. return -EINVAL;
  248. }
  249. if (fail) {
  250. pr_debug("Failed to fetch skb data\n");
  251. return -EIO;
  252. }
  253. lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
  254. ipaddr->s6_addr, 16);
  255. return 0;
  256. }
  257. static void
  258. lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
  259. {
  260. struct udphdr *uh = udp_hdr(skb);
  261. if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
  262. LOWPAN_NHC_UDP_4BIT_PORT) &&
  263. ((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
  264. LOWPAN_NHC_UDP_4BIT_PORT)) {
  265. pr_debug("UDP header: both ports compression to 4 bits\n");
  266. **hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
  267. **(hc06_ptr + 1) = /* subtraction is faster */
  268. (u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
  269. ((uh->source & LOWPAN_NHC_UDP_4BIT_PORT) << 4));
  270. *hc06_ptr += 2;
  271. } else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
  272. LOWPAN_NHC_UDP_8BIT_PORT) {
  273. pr_debug("UDP header: remove 8 bits of dest\n");
  274. **hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
  275. memcpy(*hc06_ptr + 1, &uh->source, 2);
  276. **(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
  277. *hc06_ptr += 4;
  278. } else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
  279. LOWPAN_NHC_UDP_8BIT_PORT) {
  280. pr_debug("UDP header: remove 8 bits of source\n");
  281. **hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
  282. memcpy(*hc06_ptr + 1, &uh->dest, 2);
  283. **(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
  284. *hc06_ptr += 4;
  285. } else {
  286. pr_debug("UDP header: can't compress\n");
  287. **hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
  288. memcpy(*hc06_ptr + 1, &uh->source, 2);
  289. memcpy(*hc06_ptr + 3, &uh->dest, 2);
  290. *hc06_ptr += 5;
  291. }
  292. /* checksum is always inline */
  293. memcpy(*hc06_ptr, &uh->check, 2);
  294. *hc06_ptr += 2;
  295. /* skip the UDP header */
  296. skb_pull(skb, sizeof(struct udphdr));
  297. }
  298. static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
  299. {
  300. if (unlikely(!pskb_may_pull(skb, 1)))
  301. return -EINVAL;
  302. *val = skb->data[0];
  303. skb_pull(skb, 1);
  304. return 0;
  305. }
  306. static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
  307. {
  308. if (unlikely(!pskb_may_pull(skb, 2)))
  309. return -EINVAL;
  310. *val = (skb->data[0] << 8) | skb->data[1];
  311. skb_pull(skb, 2);
  312. return 0;
  313. }
  314. static int
  315. lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
  316. {
  317. u8 tmp;
  318. if (!uh)
  319. goto err;
  320. if (lowpan_fetch_skb_u8(skb, &tmp))
  321. goto err;
  322. if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
  323. pr_debug("UDP header uncompression\n");
  324. switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
  325. case LOWPAN_NHC_UDP_CS_P_00:
  326. memcpy(&uh->source, &skb->data[0], 2);
  327. memcpy(&uh->dest, &skb->data[2], 2);
  328. skb_pull(skb, 4);
  329. break;
  330. case LOWPAN_NHC_UDP_CS_P_01:
  331. memcpy(&uh->source, &skb->data[0], 2);
  332. uh->dest =
  333. skb->data[2] + LOWPAN_NHC_UDP_8BIT_PORT;
  334. skb_pull(skb, 3);
  335. break;
  336. case LOWPAN_NHC_UDP_CS_P_10:
  337. uh->source = skb->data[0] + LOWPAN_NHC_UDP_8BIT_PORT;
  338. memcpy(&uh->dest, &skb->data[1], 2);
  339. skb_pull(skb, 3);
  340. break;
  341. case LOWPAN_NHC_UDP_CS_P_11:
  342. uh->source =
  343. LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] >> 4);
  344. uh->dest =
  345. LOWPAN_NHC_UDP_4BIT_PORT + (skb->data[0] & 0x0f);
  346. skb_pull(skb, 1);
  347. break;
  348. default:
  349. pr_debug("ERROR: unknown UDP format\n");
  350. goto err;
  351. break;
  352. }
  353. pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
  354. uh->source, uh->dest);
  355. /* copy checksum */
  356. memcpy(&uh->check, &skb->data[0], 2);
  357. skb_pull(skb, 2);
  358. /*
  359. * UDP lenght needs to be infered from the lower layers
  360. * here, we obtain the hint from the remaining size of the
  361. * frame
  362. */
  363. uh->len = htons(skb->len + sizeof(struct udphdr));
  364. pr_debug("uncompressed UDP length: src = %d", uh->len);
  365. } else {
  366. pr_debug("ERROR: unsupported NH format\n");
  367. goto err;
  368. }
  369. return 0;
  370. err:
  371. return -EINVAL;
  372. }
  373. static int lowpan_header_create(struct sk_buff *skb,
  374. struct net_device *dev,
  375. unsigned short type, const void *_daddr,
  376. const void *_saddr, unsigned int len)
  377. {
  378. u8 tmp, iphc0, iphc1, *hc06_ptr;
  379. struct ipv6hdr *hdr;
  380. const u8 *saddr = _saddr;
  381. const u8 *daddr = _daddr;
  382. u8 head[100];
  383. struct ieee802154_addr sa, da;
  384. /* TODO:
  385. * if this package isn't ipv6 one, where should it be routed?
  386. */
  387. if (type != ETH_P_IPV6)
  388. return 0;
  389. hdr = ipv6_hdr(skb);
  390. hc06_ptr = head + 2;
  391. pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
  392. "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
  393. ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
  394. lowpan_raw_dump_table(__func__, "raw skb network header dump",
  395. skb_network_header(skb), sizeof(struct ipv6hdr));
  396. if (!saddr)
  397. saddr = dev->dev_addr;
  398. lowpan_raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8);
  399. /*
  400. * As we copy some bit-length fields, in the IPHC encoding bytes,
  401. * we sometimes use |=
  402. * If the field is 0, and the current bit value in memory is 1,
  403. * this does not work. We therefore reset the IPHC encoding here
  404. */
  405. iphc0 = LOWPAN_DISPATCH_IPHC;
  406. iphc1 = 0;
  407. /* TODO: context lookup */
  408. lowpan_raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8);
  409. /*
  410. * Traffic class, flow label
  411. * If flow label is 0, compress it. If traffic class is 0, compress it
  412. * We have to process both in the same time as the offset of traffic
  413. * class depends on the presence of version and flow label
  414. */
  415. /* hc06 format of TC is ECN | DSCP , original one is DSCP | ECN */
  416. tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
  417. tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
  418. if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
  419. (hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
  420. /* flow label can be compressed */
  421. iphc0 |= LOWPAN_IPHC_FL_C;
  422. if ((hdr->priority == 0) &&
  423. ((hdr->flow_lbl[0] & 0xF0) == 0)) {
  424. /* compress (elide) all */
  425. iphc0 |= LOWPAN_IPHC_TC_C;
  426. } else {
  427. /* compress only the flow label */
  428. *hc06_ptr = tmp;
  429. hc06_ptr += 1;
  430. }
  431. } else {
  432. /* Flow label cannot be compressed */
  433. if ((hdr->priority == 0) &&
  434. ((hdr->flow_lbl[0] & 0xF0) == 0)) {
  435. /* compress only traffic class */
  436. iphc0 |= LOWPAN_IPHC_TC_C;
  437. *hc06_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
  438. memcpy(hc06_ptr + 1, &hdr->flow_lbl[1], 2);
  439. hc06_ptr += 3;
  440. } else {
  441. /* compress nothing */
  442. memcpy(hc06_ptr, &hdr, 4);
  443. /* replace the top byte with new ECN | DSCP format */
  444. *hc06_ptr = tmp;
  445. hc06_ptr += 4;
  446. }
  447. }
  448. /* NOTE: payload length is always compressed */
  449. /* Next Header is compress if UDP */
  450. if (hdr->nexthdr == UIP_PROTO_UDP)
  451. iphc0 |= LOWPAN_IPHC_NH_C;
  452. if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
  453. *hc06_ptr = hdr->nexthdr;
  454. hc06_ptr += 1;
  455. }
  456. /*
  457. * Hop limit
  458. * if 1: compress, encoding is 01
  459. * if 64: compress, encoding is 10
  460. * if 255: compress, encoding is 11
  461. * else do not compress
  462. */
  463. switch (hdr->hop_limit) {
  464. case 1:
  465. iphc0 |= LOWPAN_IPHC_TTL_1;
  466. break;
  467. case 64:
  468. iphc0 |= LOWPAN_IPHC_TTL_64;
  469. break;
  470. case 255:
  471. iphc0 |= LOWPAN_IPHC_TTL_255;
  472. break;
  473. default:
  474. *hc06_ptr = hdr->hop_limit;
  475. hc06_ptr += 1;
  476. break;
  477. }
  478. /* source address compression */
  479. if (is_addr_unspecified(&hdr->saddr)) {
  480. pr_debug("source address is unspecified, setting SAC\n");
  481. iphc1 |= LOWPAN_IPHC_SAC;
  482. /* TODO: context lookup */
  483. } else if (is_addr_link_local(&hdr->saddr)) {
  484. pr_debug("source address is link-local\n");
  485. iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
  486. LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
  487. } else {
  488. pr_debug("send the full source address\n");
  489. memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
  490. hc06_ptr += 16;
  491. }
  492. /* destination address compression */
  493. if (is_addr_mcast(&hdr->daddr)) {
  494. pr_debug("destination address is multicast: ");
  495. iphc1 |= LOWPAN_IPHC_M;
  496. if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
  497. pr_debug("compressed to 1 octet\n");
  498. iphc1 |= LOWPAN_IPHC_DAM_11;
  499. /* use last byte */
  500. *hc06_ptr = hdr->daddr.s6_addr[15];
  501. hc06_ptr += 1;
  502. } else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
  503. pr_debug("compressed to 4 octets\n");
  504. iphc1 |= LOWPAN_IPHC_DAM_10;
  505. /* second byte + the last three */
  506. *hc06_ptr = hdr->daddr.s6_addr[1];
  507. memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[13], 3);
  508. hc06_ptr += 4;
  509. } else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
  510. pr_debug("compressed to 6 octets\n");
  511. iphc1 |= LOWPAN_IPHC_DAM_01;
  512. /* second byte + the last five */
  513. *hc06_ptr = hdr->daddr.s6_addr[1];
  514. memcpy(hc06_ptr + 1, &hdr->daddr.s6_addr[11], 5);
  515. hc06_ptr += 6;
  516. } else {
  517. pr_debug("using full address\n");
  518. iphc1 |= LOWPAN_IPHC_DAM_00;
  519. memcpy(hc06_ptr, &hdr->daddr.s6_addr[0], 16);
  520. hc06_ptr += 16;
  521. }
  522. } else {
  523. /* TODO: context lookup */
  524. if (is_addr_link_local(&hdr->daddr)) {
  525. pr_debug("dest address is unicast and link-local\n");
  526. iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
  527. LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
  528. } else {
  529. pr_debug("dest address is unicast: using full one\n");
  530. memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
  531. hc06_ptr += 16;
  532. }
  533. }
  534. /* UDP header compression */
  535. if (hdr->nexthdr == UIP_PROTO_UDP)
  536. lowpan_compress_udp_header(&hc06_ptr, skb);
  537. head[0] = iphc0;
  538. head[1] = iphc1;
  539. skb_pull(skb, sizeof(struct ipv6hdr));
  540. memcpy(skb_push(skb, hc06_ptr - head), head, hc06_ptr - head);
  541. lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
  542. skb->len);
  543. /*
  544. * NOTE1: I'm still unsure about the fact that compression and WPAN
  545. * header are created here and not later in the xmit. So wait for
  546. * an opinion of net maintainers.
  547. */
  548. /*
  549. * NOTE2: to be absolutely correct, we must derive PANid information
  550. * from MAC subif of the 'dev' and 'real_dev' network devices, but
  551. * this isn't implemented in mainline yet, so currently we assign 0xff
  552. */
  553. {
  554. mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
  555. mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
  556. /* prepare wpan address data */
  557. sa.addr_type = IEEE802154_ADDR_LONG;
  558. sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
  559. memcpy(&(sa.hwaddr), saddr, 8);
  560. /* intra-PAN communications */
  561. da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
  562. /*
  563. * if the destination address is the broadcast address, use the
  564. * corresponding short address
  565. */
  566. if (lowpan_is_addr_broadcast(daddr)) {
  567. da.addr_type = IEEE802154_ADDR_SHORT;
  568. da.short_addr = IEEE802154_ADDR_BROADCAST;
  569. } else {
  570. da.addr_type = IEEE802154_ADDR_LONG;
  571. memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
  572. /* request acknowledgment */
  573. mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
  574. }
  575. return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
  576. type, (void *)&da, (void *)&sa, skb->len);
  577. }
  578. }
  579. static int lowpan_give_skb_to_devices(struct sk_buff *skb)
  580. {
  581. struct lowpan_dev_record *entry;
  582. struct sk_buff *skb_cp;
  583. int stat = NET_RX_SUCCESS;
  584. rcu_read_lock();
  585. list_for_each_entry_rcu(entry, &lowpan_devices, list)
  586. if (lowpan_dev_info(entry->ldev)->real_dev == skb->dev) {
  587. skb_cp = skb_copy(skb, GFP_ATOMIC);
  588. if (!skb_cp) {
  589. stat = -ENOMEM;
  590. break;
  591. }
  592. skb_cp->dev = entry->ldev;
  593. stat = netif_rx(skb_cp);
  594. }
  595. rcu_read_unlock();
  596. return stat;
  597. }
  598. static int lowpan_skb_deliver(struct sk_buff *skb, struct ipv6hdr *hdr)
  599. {
  600. struct sk_buff *new;
  601. int stat = NET_RX_SUCCESS;
  602. new = skb_copy_expand(skb, sizeof(struct ipv6hdr), skb_tailroom(skb),
  603. GFP_ATOMIC);
  604. kfree_skb(skb);
  605. if (!new)
  606. return -ENOMEM;
  607. skb_push(new, sizeof(struct ipv6hdr));
  608. skb_reset_network_header(new);
  609. skb_copy_to_linear_data(new, hdr, sizeof(struct ipv6hdr));
  610. new->protocol = htons(ETH_P_IPV6);
  611. new->pkt_type = PACKET_HOST;
  612. stat = lowpan_give_skb_to_devices(new);
  613. kfree_skb(new);
  614. return stat;
  615. }
  616. static void lowpan_fragment_timer_expired(unsigned long entry_addr)
  617. {
  618. struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
  619. pr_debug("timer expired for frame with tag %d\n", entry->tag);
  620. list_del(&entry->list);
  621. dev_kfree_skb(entry->skb);
  622. kfree(entry);
  623. }
  624. static struct lowpan_fragment *
  625. lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
  626. {
  627. struct lowpan_fragment *frame;
  628. frame = kzalloc(sizeof(struct lowpan_fragment),
  629. GFP_ATOMIC);
  630. if (!frame)
  631. goto frame_err;
  632. INIT_LIST_HEAD(&frame->list);
  633. frame->length = len;
  634. frame->tag = tag;
  635. /* allocate buffer for frame assembling */
  636. frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
  637. sizeof(struct ipv6hdr));
  638. if (!frame->skb)
  639. goto skb_err;
  640. frame->skb->priority = skb->priority;
  641. frame->skb->dev = skb->dev;
  642. /* reserve headroom for uncompressed ipv6 header */
  643. skb_reserve(frame->skb, sizeof(struct ipv6hdr));
  644. skb_put(frame->skb, frame->length);
  645. /* copy the first control block to keep a
  646. * trace of the link-layer addresses in case
  647. * of a link-local compressed address
  648. */
  649. memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
  650. init_timer(&frame->timer);
  651. /* time out is the same as for ipv6 - 60 sec */
  652. frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
  653. frame->timer.data = (unsigned long)frame;
  654. frame->timer.function = lowpan_fragment_timer_expired;
  655. add_timer(&frame->timer);
  656. list_add_tail(&frame->list, &lowpan_fragments);
  657. return frame;
  658. skb_err:
  659. kfree(frame);
  660. frame_err:
  661. return NULL;
  662. }
  663. static int
  664. lowpan_process_data(struct sk_buff *skb)
  665. {
  666. struct ipv6hdr hdr = {};
  667. u8 tmp, iphc0, iphc1, num_context = 0;
  668. u8 *_saddr, *_daddr;
  669. int err;
  670. lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
  671. skb->len);
  672. /* at least two bytes will be used for the encoding */
  673. if (skb->len < 2)
  674. goto drop;
  675. if (lowpan_fetch_skb_u8(skb, &iphc0))
  676. goto drop;
  677. /* fragments assembling */
  678. switch (iphc0 & LOWPAN_DISPATCH_MASK) {
  679. case LOWPAN_DISPATCH_FRAG1:
  680. case LOWPAN_DISPATCH_FRAGN:
  681. {
  682. struct lowpan_fragment *frame;
  683. /* slen stores the rightmost 8 bits of the 11 bits length */
  684. u8 slen, offset = 0;
  685. u16 len, tag;
  686. bool found = false;
  687. if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
  688. lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
  689. goto drop;
  690. /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
  691. len = ((iphc0 & 7) << 8) | slen;
  692. if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
  693. pr_debug("%s received a FRAG1 packet (tag: %d, "
  694. "size of the entire IP packet: %d)",
  695. __func__, tag, len);
  696. } else { /* FRAGN */
  697. if (lowpan_fetch_skb_u8(skb, &offset))
  698. goto unlock_and_drop;
  699. pr_debug("%s received a FRAGN packet (tag: %d, "
  700. "size of the entire IP packet: %d, "
  701. "offset: %d)", __func__, tag, len, offset * 8);
  702. }
  703. /*
  704. * check if frame assembling with the same tag is
  705. * already in progress
  706. */
  707. spin_lock_bh(&flist_lock);
  708. list_for_each_entry(frame, &lowpan_fragments, list)
  709. if (frame->tag == tag) {
  710. found = true;
  711. break;
  712. }
  713. /* alloc new frame structure */
  714. if (!found) {
  715. pr_debug("%s first fragment received for tag %d, "
  716. "begin packet reassembly", __func__, tag);
  717. frame = lowpan_alloc_new_frame(skb, len, tag);
  718. if (!frame)
  719. goto unlock_and_drop;
  720. }
  721. /* if payload fits buffer, copy it */
  722. if (likely((offset * 8 + skb->len) <= frame->length))
  723. skb_copy_to_linear_data_offset(frame->skb, offset * 8,
  724. skb->data, skb->len);
  725. else
  726. goto unlock_and_drop;
  727. frame->bytes_rcv += skb->len;
  728. /* frame assembling complete */
  729. if ((frame->bytes_rcv == frame->length) &&
  730. frame->timer.expires > jiffies) {
  731. /* if timer haven't expired - first of all delete it */
  732. del_timer_sync(&frame->timer);
  733. list_del(&frame->list);
  734. spin_unlock_bh(&flist_lock);
  735. pr_debug("%s successfully reassembled fragment "
  736. "(tag %d)", __func__, tag);
  737. dev_kfree_skb(skb);
  738. skb = frame->skb;
  739. kfree(frame);
  740. if (lowpan_fetch_skb_u8(skb, &iphc0))
  741. goto drop;
  742. break;
  743. }
  744. spin_unlock_bh(&flist_lock);
  745. return kfree_skb(skb), 0;
  746. }
  747. default:
  748. break;
  749. }
  750. if (lowpan_fetch_skb_u8(skb, &iphc1))
  751. goto drop;
  752. _saddr = mac_cb(skb)->sa.hwaddr;
  753. _daddr = mac_cb(skb)->da.hwaddr;
  754. pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
  755. /* another if the CID flag is set */
  756. if (iphc1 & LOWPAN_IPHC_CID) {
  757. pr_debug("CID flag is set, increase header with one\n");
  758. if (lowpan_fetch_skb_u8(skb, &num_context))
  759. goto drop;
  760. }
  761. hdr.version = 6;
  762. /* Traffic Class and Flow Label */
  763. switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
  764. /*
  765. * Traffic Class and FLow Label carried in-line
  766. * ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
  767. */
  768. case 0: /* 00b */
  769. if (lowpan_fetch_skb_u8(skb, &tmp))
  770. goto drop;
  771. memcpy(&hdr.flow_lbl, &skb->data[0], 3);
  772. skb_pull(skb, 3);
  773. hdr.priority = ((tmp >> 2) & 0x0f);
  774. hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
  775. (hdr.flow_lbl[0] & 0x0f);
  776. break;
  777. /*
  778. * Traffic class carried in-line
  779. * ECN + DSCP (1 byte), Flow Label is elided
  780. */
  781. case 1: /* 10b */
  782. if (lowpan_fetch_skb_u8(skb, &tmp))
  783. goto drop;
  784. hdr.priority = ((tmp >> 2) & 0x0f);
  785. hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
  786. break;
  787. /*
  788. * Flow Label carried in-line
  789. * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
  790. */
  791. case 2: /* 01b */
  792. if (lowpan_fetch_skb_u8(skb, &tmp))
  793. goto drop;
  794. hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
  795. memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
  796. skb_pull(skb, 2);
  797. break;
  798. /* Traffic Class and Flow Label are elided */
  799. case 3: /* 11b */
  800. break;
  801. default:
  802. break;
  803. }
  804. /* Next Header */
  805. if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
  806. /* Next header is carried inline */
  807. if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
  808. goto drop;
  809. pr_debug("NH flag is set, next header carried inline: %02x\n",
  810. hdr.nexthdr);
  811. }
  812. /* Hop Limit */
  813. if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
  814. hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
  815. else {
  816. if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
  817. goto drop;
  818. }
  819. /* Extract SAM to the tmp variable */
  820. tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
  821. /* Source address uncompression */
  822. pr_debug("source address stateless compression\n");
  823. err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
  824. lowpan_unc_llconf[tmp], skb->data);
  825. if (err)
  826. goto drop;
  827. /* Extract DAM to the tmp variable */
  828. tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
  829. /* check for Multicast Compression */
  830. if (iphc1 & LOWPAN_IPHC_M) {
  831. if (iphc1 & LOWPAN_IPHC_DAC) {
  832. pr_debug("dest: context-based mcast compression\n");
  833. /* TODO: implement this */
  834. } else {
  835. err = lowpan_uncompress_multicast_daddr(
  836. skb, &hdr.daddr, tmp);
  837. if (err)
  838. goto drop;
  839. }
  840. } else {
  841. pr_debug("dest: stateless compression\n");
  842. err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
  843. lowpan_unc_llconf[tmp], skb->data);
  844. if (err)
  845. goto drop;
  846. }
  847. /* UDP data uncompression */
  848. if (iphc0 & LOWPAN_IPHC_NH_C) {
  849. struct udphdr uh;
  850. struct sk_buff *new;
  851. if (lowpan_uncompress_udp_header(skb, &uh))
  852. goto drop;
  853. /*
  854. * replace the compressed UDP head by the uncompressed UDP
  855. * header
  856. */
  857. new = skb_copy_expand(skb, sizeof(struct udphdr),
  858. skb_tailroom(skb), GFP_ATOMIC);
  859. kfree_skb(skb);
  860. if (!new)
  861. return -ENOMEM;
  862. skb = new;
  863. skb_push(skb, sizeof(struct udphdr));
  864. skb_reset_transport_header(skb);
  865. skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
  866. lowpan_raw_dump_table(__func__, "raw UDP header dump",
  867. (u8 *)&uh, sizeof(uh));
  868. hdr.nexthdr = UIP_PROTO_UDP;
  869. }
  870. /* Not fragmented package */
  871. hdr.payload_len = htons(skb->len);
  872. pr_debug("skb headroom size = %d, data length = %d\n",
  873. skb_headroom(skb), skb->len);
  874. pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
  875. "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
  876. ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
  877. lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
  878. sizeof(hdr));
  879. return lowpan_skb_deliver(skb, &hdr);
  880. unlock_and_drop:
  881. spin_unlock_bh(&flist_lock);
  882. drop:
  883. kfree_skb(skb);
  884. return -EINVAL;
  885. }
  886. static int lowpan_set_address(struct net_device *dev, void *p)
  887. {
  888. struct sockaddr *sa = p;
  889. if (netif_running(dev))
  890. return -EBUSY;
  891. /* TODO: validate addr */
  892. memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
  893. return 0;
  894. }
  895. static int lowpan_get_mac_header_length(struct sk_buff *skb)
  896. {
  897. /*
  898. * Currently long addressing mode is supported only, so the overall
  899. * header size is 21:
  900. * FC SeqNum DPAN DA SA Sec
  901. * 2 + 1 + 2 + 8 + 8 + 0 = 21
  902. */
  903. return 21;
  904. }
  905. static int
  906. lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
  907. int mlen, int plen, int offset, int type)
  908. {
  909. struct sk_buff *frag;
  910. int hlen, ret;
  911. hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
  912. LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
  913. lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
  914. frag = dev_alloc_skb(hlen + mlen + plen + IEEE802154_MFR_SIZE);
  915. if (!frag)
  916. return -ENOMEM;
  917. frag->priority = skb->priority;
  918. frag->dev = skb->dev;
  919. /* copy header, MFR and payload */
  920. memcpy(skb_put(frag, mlen), skb->data, mlen);
  921. memcpy(skb_put(frag, hlen), head, hlen);
  922. if (plen)
  923. skb_copy_from_linear_data_offset(skb, offset + mlen,
  924. skb_put(frag, plen), plen);
  925. lowpan_raw_dump_table(__func__, " raw fragment dump", frag->data,
  926. frag->len);
  927. ret = dev_queue_xmit(frag);
  928. return ret;
  929. }
  930. static int
  931. lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
  932. {
  933. int err, header_length, payload_length, tag, offset = 0;
  934. u8 head[5];
  935. header_length = lowpan_get_mac_header_length(skb);
  936. payload_length = skb->len - header_length;
  937. tag = lowpan_dev_info(dev)->fragment_tag++;
  938. /* first fragment header */
  939. head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
  940. head[1] = payload_length & 0xff;
  941. head[2] = tag >> 8;
  942. head[3] = tag & 0xff;
  943. err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
  944. 0, LOWPAN_DISPATCH_FRAG1);
  945. if (err) {
  946. pr_debug("%s unable to send FRAG1 packet (tag: %d)",
  947. __func__, tag);
  948. goto exit;
  949. }
  950. offset = LOWPAN_FRAG_SIZE;
  951. /* next fragment header */
  952. head[0] &= ~LOWPAN_DISPATCH_FRAG1;
  953. head[0] |= LOWPAN_DISPATCH_FRAGN;
  954. while ((payload_length - offset > 0) && (err >= 0)) {
  955. int len = LOWPAN_FRAG_SIZE;
  956. head[4] = offset / 8;
  957. if (payload_length - offset < len)
  958. len = payload_length - offset;
  959. err = lowpan_fragment_xmit(skb, head, header_length,
  960. len, offset, LOWPAN_DISPATCH_FRAGN);
  961. if (err) {
  962. pr_debug("%s unable to send a subsequent FRAGN packet "
  963. "(tag: %d, offset: %d", __func__, tag, offset);
  964. goto exit;
  965. }
  966. offset += len;
  967. }
  968. exit:
  969. return err;
  970. }
  971. static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
  972. {
  973. int err = -1;
  974. pr_debug("package xmit\n");
  975. skb->dev = lowpan_dev_info(dev)->real_dev;
  976. if (skb->dev == NULL) {
  977. pr_debug("ERROR: no real wpan device found\n");
  978. goto error;
  979. }
  980. /* Send directly if less than the MTU minus the 2 checksum bytes. */
  981. if (skb->len <= IEEE802154_MTU - IEEE802154_MFR_SIZE) {
  982. err = dev_queue_xmit(skb);
  983. goto out;
  984. }
  985. pr_debug("frame is too big, fragmentation is needed\n");
  986. err = lowpan_skb_fragmentation(skb, dev);
  987. error:
  988. dev_kfree_skb(skb);
  989. out:
  990. if (err)
  991. pr_debug("ERROR: xmit failed\n");
  992. return (err < 0) ? NET_XMIT_DROP : err;
  993. }
  994. static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
  995. {
  996. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  997. return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
  998. }
  999. static u16 lowpan_get_pan_id(const struct net_device *dev)
  1000. {
  1001. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  1002. return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
  1003. }
  1004. static u16 lowpan_get_short_addr(const struct net_device *dev)
  1005. {
  1006. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  1007. return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
  1008. }
  1009. static u8 lowpan_get_dsn(const struct net_device *dev)
  1010. {
  1011. struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
  1012. return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
  1013. }
  1014. static struct header_ops lowpan_header_ops = {
  1015. .create = lowpan_header_create,
  1016. };
  1017. static const struct net_device_ops lowpan_netdev_ops = {
  1018. .ndo_start_xmit = lowpan_xmit,
  1019. .ndo_set_mac_address = lowpan_set_address,
  1020. };
  1021. static struct ieee802154_mlme_ops lowpan_mlme = {
  1022. .get_pan_id = lowpan_get_pan_id,
  1023. .get_phy = lowpan_get_phy,
  1024. .get_short_addr = lowpan_get_short_addr,
  1025. .get_dsn = lowpan_get_dsn,
  1026. };
  1027. static void lowpan_setup(struct net_device *dev)
  1028. {
  1029. dev->addr_len = IEEE802154_ADDR_LEN;
  1030. memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
  1031. dev->type = ARPHRD_IEEE802154;
  1032. /* Frame Control + Sequence Number + Address fields + Security Header */
  1033. dev->hard_header_len = 2 + 1 + 20 + 14;
  1034. dev->needed_tailroom = 2; /* FCS */
  1035. dev->mtu = 1281;
  1036. dev->tx_queue_len = 0;
  1037. dev->flags = IFF_BROADCAST | IFF_MULTICAST;
  1038. dev->watchdog_timeo = 0;
  1039. dev->netdev_ops = &lowpan_netdev_ops;
  1040. dev->header_ops = &lowpan_header_ops;
  1041. dev->ml_priv = &lowpan_mlme;
  1042. dev->destructor = free_netdev;
  1043. }
  1044. static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
  1045. {
  1046. if (tb[IFLA_ADDRESS]) {
  1047. if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
  1048. return -EINVAL;
  1049. }
  1050. return 0;
  1051. }
  1052. static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
  1053. struct packet_type *pt, struct net_device *orig_dev)
  1054. {
  1055. struct sk_buff *local_skb;
  1056. if (!netif_running(dev))
  1057. goto drop;
  1058. if (dev->type != ARPHRD_IEEE802154)
  1059. goto drop;
  1060. /* check that it's our buffer */
  1061. if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
  1062. /* Copy the packet so that the IPv6 header is
  1063. * properly aligned.
  1064. */
  1065. local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
  1066. skb_tailroom(skb), GFP_ATOMIC);
  1067. if (!local_skb)
  1068. goto drop;
  1069. local_skb->protocol = htons(ETH_P_IPV6);
  1070. local_skb->pkt_type = PACKET_HOST;
  1071. /* Pull off the 1-byte of 6lowpan header. */
  1072. skb_pull(local_skb, 1);
  1073. skb_reset_network_header(local_skb);
  1074. skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
  1075. lowpan_give_skb_to_devices(local_skb);
  1076. kfree_skb(local_skb);
  1077. kfree_skb(skb);
  1078. } else {
  1079. switch (skb->data[0] & 0xe0) {
  1080. case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
  1081. case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
  1082. case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
  1083. local_skb = skb_clone(skb, GFP_ATOMIC);
  1084. if (!local_skb)
  1085. goto drop;
  1086. lowpan_process_data(local_skb);
  1087. kfree_skb(skb);
  1088. break;
  1089. default:
  1090. break;
  1091. }
  1092. }
  1093. return NET_RX_SUCCESS;
  1094. drop:
  1095. kfree_skb(skb);
  1096. return NET_RX_DROP;
  1097. }
  1098. static int lowpan_newlink(struct net *src_net, struct net_device *dev,
  1099. struct nlattr *tb[], struct nlattr *data[])
  1100. {
  1101. struct net_device *real_dev;
  1102. struct lowpan_dev_record *entry;
  1103. pr_debug("adding new link\n");
  1104. if (!tb[IFLA_LINK])
  1105. return -EINVAL;
  1106. /* find and hold real wpan device */
  1107. real_dev = dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
  1108. if (!real_dev)
  1109. return -ENODEV;
  1110. lowpan_dev_info(dev)->real_dev = real_dev;
  1111. lowpan_dev_info(dev)->fragment_tag = 0;
  1112. mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
  1113. entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
  1114. if (!entry) {
  1115. dev_put(real_dev);
  1116. lowpan_dev_info(dev)->real_dev = NULL;
  1117. return -ENOMEM;
  1118. }
  1119. entry->ldev = dev;
  1120. mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
  1121. INIT_LIST_HEAD(&entry->list);
  1122. list_add_tail(&entry->list, &lowpan_devices);
  1123. mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
  1124. register_netdevice(dev);
  1125. return 0;
  1126. }
  1127. static void lowpan_dellink(struct net_device *dev, struct list_head *head)
  1128. {
  1129. struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
  1130. struct net_device *real_dev = lowpan_dev->real_dev;
  1131. struct lowpan_dev_record *entry, *tmp;
  1132. ASSERT_RTNL();
  1133. mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
  1134. list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
  1135. if (entry->ldev == dev) {
  1136. list_del(&entry->list);
  1137. kfree(entry);
  1138. }
  1139. }
  1140. mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
  1141. mutex_destroy(&lowpan_dev_info(dev)->dev_list_mtx);
  1142. unregister_netdevice_queue(dev, head);
  1143. dev_put(real_dev);
  1144. }
  1145. static struct rtnl_link_ops lowpan_link_ops __read_mostly = {
  1146. .kind = "lowpan",
  1147. .priv_size = sizeof(struct lowpan_dev_info),
  1148. .setup = lowpan_setup,
  1149. .newlink = lowpan_newlink,
  1150. .dellink = lowpan_dellink,
  1151. .validate = lowpan_validate,
  1152. };
  1153. static inline int __init lowpan_netlink_init(void)
  1154. {
  1155. return rtnl_link_register(&lowpan_link_ops);
  1156. }
  1157. static inline void lowpan_netlink_fini(void)
  1158. {
  1159. rtnl_link_unregister(&lowpan_link_ops);
  1160. }
  1161. static int lowpan_device_event(struct notifier_block *unused,
  1162. unsigned long event, void *ptr)
  1163. {
  1164. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  1165. LIST_HEAD(del_list);
  1166. struct lowpan_dev_record *entry, *tmp;
  1167. if (dev->type != ARPHRD_IEEE802154)
  1168. goto out;
  1169. if (event == NETDEV_UNREGISTER) {
  1170. list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
  1171. if (lowpan_dev_info(entry->ldev)->real_dev == dev)
  1172. lowpan_dellink(entry->ldev, &del_list);
  1173. }
  1174. unregister_netdevice_many(&del_list);
  1175. }
  1176. out:
  1177. return NOTIFY_DONE;
  1178. }
  1179. static struct notifier_block lowpan_dev_notifier = {
  1180. .notifier_call = lowpan_device_event,
  1181. };
  1182. static struct packet_type lowpan_packet_type = {
  1183. .type = __constant_htons(ETH_P_IEEE802154),
  1184. .func = lowpan_rcv,
  1185. };
  1186. static int __init lowpan_init_module(void)
  1187. {
  1188. int err = 0;
  1189. err = lowpan_netlink_init();
  1190. if (err < 0)
  1191. goto out;
  1192. dev_add_pack(&lowpan_packet_type);
  1193. err = register_netdevice_notifier(&lowpan_dev_notifier);
  1194. if (err < 0) {
  1195. dev_remove_pack(&lowpan_packet_type);
  1196. lowpan_netlink_fini();
  1197. }
  1198. out:
  1199. return err;
  1200. }
  1201. static void __exit lowpan_cleanup_module(void)
  1202. {
  1203. struct lowpan_fragment *frame, *tframe;
  1204. lowpan_netlink_fini();
  1205. dev_remove_pack(&lowpan_packet_type);
  1206. unregister_netdevice_notifier(&lowpan_dev_notifier);
  1207. /* Now 6lowpan packet_type is removed, so no new fragments are
  1208. * expected on RX, therefore that's the time to clean incomplete
  1209. * fragments.
  1210. */
  1211. spin_lock_bh(&flist_lock);
  1212. list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
  1213. del_timer_sync(&frame->timer);
  1214. list_del(&frame->list);
  1215. dev_kfree_skb(frame->skb);
  1216. kfree(frame);
  1217. }
  1218. spin_unlock_bh(&flist_lock);
  1219. }
  1220. module_init(lowpan_init_module);
  1221. module_exit(lowpan_cleanup_module);
  1222. MODULE_LICENSE("GPL");
  1223. MODULE_ALIAS_RTNL_LINK("lowpan");