rx.c 59 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123
  1. /*
  2. * Copyright 2002-2005, Instant802 Networks, Inc.
  3. * Copyright 2005-2006, Devicescape Software, Inc.
  4. * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  5. * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/jiffies.h>
  12. #include <linux/kernel.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/rcupdate.h>
  17. #include <net/mac80211.h>
  18. #include <net/ieee80211_radiotap.h>
  19. #include "ieee80211_i.h"
  20. #include "ieee80211_led.h"
  21. #include "wep.h"
  22. #include "wpa.h"
  23. #include "tkip.h"
  24. #include "wme.h"
  25. u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
  26. struct tid_ampdu_rx *tid_agg_rx,
  27. struct sk_buff *skb, u16 mpdu_seq_num,
  28. int bar_req);
  29. /*
  30. * monitor mode reception
  31. *
  32. * This function cleans up the SKB, i.e. it removes all the stuff
  33. * only useful for monitoring.
  34. */
  35. static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
  36. struct sk_buff *skb,
  37. int rtap_len)
  38. {
  39. skb_pull(skb, rtap_len);
  40. if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
  41. if (likely(skb->len > FCS_LEN))
  42. skb_trim(skb, skb->len - FCS_LEN);
  43. else {
  44. /* driver bug */
  45. WARN_ON(1);
  46. dev_kfree_skb(skb);
  47. skb = NULL;
  48. }
  49. }
  50. return skb;
  51. }
  52. static inline int should_drop_frame(struct ieee80211_rx_status *status,
  53. struct sk_buff *skb,
  54. int present_fcs_len,
  55. int radiotap_len)
  56. {
  57. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  58. if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
  59. return 1;
  60. if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len))
  61. return 1;
  62. if (((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) ==
  63. cpu_to_le16(IEEE80211_FTYPE_CTL)) &&
  64. ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) !=
  65. cpu_to_le16(IEEE80211_STYPE_PSPOLL)) &&
  66. ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) !=
  67. cpu_to_le16(IEEE80211_STYPE_BACK_REQ)))
  68. return 1;
  69. return 0;
  70. }
  71. /*
  72. * This function copies a received frame to all monitor interfaces and
  73. * returns a cleaned-up SKB that no longer includes the FCS nor the
  74. * radiotap header the driver might have added.
  75. */
  76. static struct sk_buff *
  77. ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
  78. struct ieee80211_rx_status *status,
  79. struct ieee80211_rate *rate)
  80. {
  81. struct ieee80211_sub_if_data *sdata;
  82. int needed_headroom = 0;
  83. struct ieee80211_radiotap_header *rthdr;
  84. __le64 *rttsft = NULL;
  85. struct ieee80211_rtap_fixed_data {
  86. u8 flags;
  87. u8 rate;
  88. __le16 chan_freq;
  89. __le16 chan_flags;
  90. u8 antsignal;
  91. u8 padding_for_rxflags;
  92. __le16 rx_flags;
  93. } __attribute__ ((packed)) *rtfixed;
  94. struct sk_buff *skb, *skb2;
  95. struct net_device *prev_dev = NULL;
  96. int present_fcs_len = 0;
  97. int rtap_len = 0;
  98. /*
  99. * First, we may need to make a copy of the skb because
  100. * (1) we need to modify it for radiotap (if not present), and
  101. * (2) the other RX handlers will modify the skb we got.
  102. *
  103. * We don't need to, of course, if we aren't going to return
  104. * the SKB because it has a bad FCS/PLCP checksum.
  105. */
  106. if (status->flag & RX_FLAG_RADIOTAP)
  107. rtap_len = ieee80211_get_radiotap_len(origskb->data);
  108. else
  109. /* room for radiotap header, always present fields and TSFT */
  110. needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8;
  111. if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
  112. present_fcs_len = FCS_LEN;
  113. if (!local->monitors) {
  114. if (should_drop_frame(status, origskb, present_fcs_len,
  115. rtap_len)) {
  116. dev_kfree_skb(origskb);
  117. return NULL;
  118. }
  119. return remove_monitor_info(local, origskb, rtap_len);
  120. }
  121. if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
  122. /* only need to expand headroom if necessary */
  123. skb = origskb;
  124. origskb = NULL;
  125. /*
  126. * This shouldn't trigger often because most devices have an
  127. * RX header they pull before we get here, and that should
  128. * be big enough for our radiotap information. We should
  129. * probably export the length to drivers so that we can have
  130. * them allocate enough headroom to start with.
  131. */
  132. if (skb_headroom(skb) < needed_headroom &&
  133. pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
  134. dev_kfree_skb(skb);
  135. return NULL;
  136. }
  137. } else {
  138. /*
  139. * Need to make a copy and possibly remove radiotap header
  140. * and FCS from the original.
  141. */
  142. skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
  143. origskb = remove_monitor_info(local, origskb, rtap_len);
  144. if (!skb)
  145. return origskb;
  146. }
  147. /* if necessary, prepend radiotap information */
  148. if (!(status->flag & RX_FLAG_RADIOTAP)) {
  149. rtfixed = (void *) skb_push(skb, sizeof(*rtfixed));
  150. rtap_len = sizeof(*rthdr) + sizeof(*rtfixed);
  151. if (status->flag & RX_FLAG_TSFT) {
  152. rttsft = (void *) skb_push(skb, sizeof(*rttsft));
  153. rtap_len += 8;
  154. }
  155. rthdr = (void *) skb_push(skb, sizeof(*rthdr));
  156. memset(rthdr, 0, sizeof(*rthdr));
  157. memset(rtfixed, 0, sizeof(*rtfixed));
  158. rthdr->it_present =
  159. cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
  160. (1 << IEEE80211_RADIOTAP_RATE) |
  161. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  162. (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) |
  163. (1 << IEEE80211_RADIOTAP_RX_FLAGS));
  164. rtfixed->flags = 0;
  165. if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
  166. rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS;
  167. if (rttsft) {
  168. *rttsft = cpu_to_le64(status->mactime);
  169. rthdr->it_present |=
  170. cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
  171. }
  172. /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */
  173. rtfixed->rx_flags = 0;
  174. if (status->flag &
  175. (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
  176. rtfixed->rx_flags |=
  177. cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
  178. rtfixed->rate = rate->bitrate / 5;
  179. rtfixed->chan_freq = cpu_to_le16(status->freq);
  180. if (status->band == IEEE80211_BAND_5GHZ)
  181. rtfixed->chan_flags =
  182. cpu_to_le16(IEEE80211_CHAN_OFDM |
  183. IEEE80211_CHAN_5GHZ);
  184. else
  185. rtfixed->chan_flags =
  186. cpu_to_le16(IEEE80211_CHAN_DYN |
  187. IEEE80211_CHAN_2GHZ);
  188. rtfixed->antsignal = status->ssi;
  189. rthdr->it_len = cpu_to_le16(rtap_len);
  190. }
  191. skb_reset_mac_header(skb);
  192. skb->ip_summed = CHECKSUM_UNNECESSARY;
  193. skb->pkt_type = PACKET_OTHERHOST;
  194. skb->protocol = htons(ETH_P_802_2);
  195. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  196. if (!netif_running(sdata->dev))
  197. continue;
  198. if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR)
  199. continue;
  200. if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
  201. continue;
  202. if (prev_dev) {
  203. skb2 = skb_clone(skb, GFP_ATOMIC);
  204. if (skb2) {
  205. skb2->dev = prev_dev;
  206. netif_rx(skb2);
  207. }
  208. }
  209. prev_dev = sdata->dev;
  210. sdata->dev->stats.rx_packets++;
  211. sdata->dev->stats.rx_bytes += skb->len;
  212. }
  213. if (prev_dev) {
  214. skb->dev = prev_dev;
  215. netif_rx(skb);
  216. } else
  217. dev_kfree_skb(skb);
  218. return origskb;
  219. }
  220. static void ieee80211_parse_qos(struct ieee80211_txrx_data *rx)
  221. {
  222. u8 *data = rx->skb->data;
  223. int tid;
  224. /* does the frame have a qos control field? */
  225. if (WLAN_FC_IS_QOS_DATA(rx->fc)) {
  226. u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN;
  227. /* frame has qos control */
  228. tid = qc[0] & QOS_CONTROL_TID_MASK;
  229. if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
  230. rx->flags |= IEEE80211_TXRXD_RX_AMSDU;
  231. else
  232. rx->flags &= ~IEEE80211_TXRXD_RX_AMSDU;
  233. } else {
  234. if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) {
  235. /* Separate TID for management frames */
  236. tid = NUM_RX_DATA_QUEUES - 1;
  237. } else {
  238. /* no qos control present */
  239. tid = 0; /* 802.1d - Best Effort */
  240. }
  241. }
  242. I802_DEBUG_INC(rx->local->wme_rx_queue[tid]);
  243. /* only a debug counter, sta might not be assigned properly yet */
  244. if (rx->sta)
  245. I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]);
  246. rx->u.rx.queue = tid;
  247. /* Set skb->priority to 1d tag if highest order bit of TID is not set.
  248. * For now, set skb->priority to 0 for other cases. */
  249. rx->skb->priority = (tid > 7) ? 0 : tid;
  250. }
  251. static void ieee80211_verify_ip_alignment(struct ieee80211_txrx_data *rx)
  252. {
  253. #ifdef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
  254. int hdrlen;
  255. if (!WLAN_FC_DATA_PRESENT(rx->fc))
  256. return;
  257. /*
  258. * Drivers are required to align the payload data in a way that
  259. * guarantees that the contained IP header is aligned to a four-
  260. * byte boundary. In the case of regular frames, this simply means
  261. * aligning the payload to a four-byte boundary (because either
  262. * the IP header is directly contained, or IV/RFC1042 headers that
  263. * have a length divisible by four are in front of it.
  264. *
  265. * With A-MSDU frames, however, the payload data address must
  266. * yield two modulo four because there are 14-byte 802.3 headers
  267. * within the A-MSDU frames that push the IP header further back
  268. * to a multiple of four again. Thankfully, the specs were sane
  269. * enough this time around to require padding each A-MSDU subframe
  270. * to a length that is a multiple of four.
  271. *
  272. * Padding like atheros hardware adds which is inbetween the 802.11
  273. * header and the payload is not supported, the driver is required
  274. * to move the 802.11 header further back in that case.
  275. */
  276. hdrlen = ieee80211_get_hdrlen(rx->fc);
  277. if (rx->flags & IEEE80211_TXRXD_RX_AMSDU)
  278. hdrlen += ETH_HLEN;
  279. WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3);
  280. #endif
  281. }
  282. static u32 ieee80211_rx_load_stats(struct ieee80211_local *local,
  283. struct sk_buff *skb,
  284. struct ieee80211_rx_status *status,
  285. struct ieee80211_rate *rate)
  286. {
  287. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  288. u32 load = 0, hdrtime;
  289. /* Estimate total channel use caused by this frame */
  290. /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values,
  291. * 1 usec = 1/8 * (1080 / 10) = 13.5 */
  292. if (status->band == IEEE80211_BAND_5GHZ ||
  293. (status->band == IEEE80211_BAND_5GHZ &&
  294. rate->flags & IEEE80211_RATE_ERP_G))
  295. hdrtime = CHAN_UTIL_HDR_SHORT;
  296. else
  297. hdrtime = CHAN_UTIL_HDR_LONG;
  298. load = hdrtime;
  299. if (!is_multicast_ether_addr(hdr->addr1))
  300. load += hdrtime;
  301. /* TODO: optimise again */
  302. load += skb->len * CHAN_UTIL_RATE_LCM / rate->bitrate;
  303. /* Divide channel_use by 8 to avoid wrapping around the counter */
  304. load >>= CHAN_UTIL_SHIFT;
  305. return load;
  306. }
  307. /* rx handlers */
  308. static ieee80211_rx_result
  309. ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx)
  310. {
  311. if (rx->sta)
  312. rx->sta->channel_use_raw += rx->u.rx.load;
  313. rx->sdata->channel_use_raw += rx->u.rx.load;
  314. return RX_CONTINUE;
  315. }
  316. static ieee80211_rx_result
  317. ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx)
  318. {
  319. struct ieee80211_local *local = rx->local;
  320. struct sk_buff *skb = rx->skb;
  321. if (unlikely(local->sta_hw_scanning))
  322. return ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status);
  323. if (unlikely(local->sta_sw_scanning)) {
  324. /* drop all the other packets during a software scan anyway */
  325. if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status)
  326. != RX_QUEUED)
  327. dev_kfree_skb(skb);
  328. return RX_QUEUED;
  329. }
  330. if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) {
  331. /* scanning finished during invoking of handlers */
  332. I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
  333. return RX_DROP_UNUSABLE;
  334. }
  335. return RX_CONTINUE;
  336. }
  337. static ieee80211_rx_result
  338. ieee80211_rx_h_check(struct ieee80211_txrx_data *rx)
  339. {
  340. struct ieee80211_hdr *hdr;
  341. hdr = (struct ieee80211_hdr *) rx->skb->data;
  342. /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
  343. if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
  344. if (unlikely(rx->fc & IEEE80211_FCTL_RETRY &&
  345. rx->sta->last_seq_ctrl[rx->u.rx.queue] ==
  346. hdr->seq_ctrl)) {
  347. if (rx->flags & IEEE80211_TXRXD_RXRA_MATCH) {
  348. rx->local->dot11FrameDuplicateCount++;
  349. rx->sta->num_duplicates++;
  350. }
  351. return RX_DROP_MONITOR;
  352. } else
  353. rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl;
  354. }
  355. if (unlikely(rx->skb->len < 16)) {
  356. I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
  357. return RX_DROP_MONITOR;
  358. }
  359. /* Drop disallowed frame classes based on STA auth/assoc state;
  360. * IEEE 802.11, Chap 5.5.
  361. *
  362. * 80211.o does filtering only based on association state, i.e., it
  363. * drops Class 3 frames from not associated stations. hostapd sends
  364. * deauth/disassoc frames when needed. In addition, hostapd is
  365. * responsible for filtering on both auth and assoc states.
  366. */
  367. if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA ||
  368. ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL &&
  369. (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) &&
  370. rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS &&
  371. (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) {
  372. if ((!(rx->fc & IEEE80211_FCTL_FROMDS) &&
  373. !(rx->fc & IEEE80211_FCTL_TODS) &&
  374. (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
  375. || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) {
  376. /* Drop IBSS frames and frames for other hosts
  377. * silently. */
  378. return RX_DROP_MONITOR;
  379. }
  380. return RX_DROP_MONITOR;
  381. }
  382. return RX_CONTINUE;
  383. }
  384. static ieee80211_rx_result
  385. ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
  386. {
  387. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
  388. int keyidx;
  389. int hdrlen;
  390. ieee80211_rx_result result = RX_DROP_UNUSABLE;
  391. struct ieee80211_key *stakey = NULL;
  392. /*
  393. * Key selection 101
  394. *
  395. * There are three types of keys:
  396. * - GTK (group keys)
  397. * - PTK (pairwise keys)
  398. * - STK (station-to-station pairwise keys)
  399. *
  400. * When selecting a key, we have to distinguish between multicast
  401. * (including broadcast) and unicast frames, the latter can only
  402. * use PTKs and STKs while the former always use GTKs. Unless, of
  403. * course, actual WEP keys ("pre-RSNA") are used, then unicast
  404. * frames can also use key indizes like GTKs. Hence, if we don't
  405. * have a PTK/STK we check the key index for a WEP key.
  406. *
  407. * Note that in a regular BSS, multicast frames are sent by the
  408. * AP only, associated stations unicast the frame to the AP first
  409. * which then multicasts it on their behalf.
  410. *
  411. * There is also a slight problem in IBSS mode: GTKs are negotiated
  412. * with each station, that is something we don't currently handle.
  413. * The spec seems to expect that one negotiates the same key with
  414. * every station but there's no such requirement; VLANs could be
  415. * possible.
  416. */
  417. if (!(rx->fc & IEEE80211_FCTL_PROTECTED))
  418. return RX_CONTINUE;
  419. /*
  420. * No point in finding a key and decrypting if the frame is neither
  421. * addressed to us nor a multicast frame.
  422. */
  423. if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
  424. return RX_CONTINUE;
  425. if (rx->sta)
  426. stakey = rcu_dereference(rx->sta->key);
  427. if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
  428. rx->key = stakey;
  429. } else {
  430. /*
  431. * The device doesn't give us the IV so we won't be
  432. * able to look up the key. That's ok though, we
  433. * don't need to decrypt the frame, we just won't
  434. * be able to keep statistics accurate.
  435. * Except for key threshold notifications, should
  436. * we somehow allow the driver to tell us which key
  437. * the hardware used if this flag is set?
  438. */
  439. if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) &&
  440. (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED))
  441. return RX_CONTINUE;
  442. hdrlen = ieee80211_get_hdrlen(rx->fc);
  443. if (rx->skb->len < 8 + hdrlen)
  444. return RX_DROP_UNUSABLE; /* TODO: count this? */
  445. /*
  446. * no need to call ieee80211_wep_get_keyidx,
  447. * it verifies a bunch of things we've done already
  448. */
  449. keyidx = rx->skb->data[hdrlen + 3] >> 6;
  450. rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
  451. /*
  452. * RSNA-protected unicast frames should always be sent with
  453. * pairwise or station-to-station keys, but for WEP we allow
  454. * using a key index as well.
  455. */
  456. if (rx->key && rx->key->conf.alg != ALG_WEP &&
  457. !is_multicast_ether_addr(hdr->addr1))
  458. rx->key = NULL;
  459. }
  460. if (rx->key) {
  461. rx->key->tx_rx_count++;
  462. /* TODO: add threshold stuff again */
  463. } else {
  464. #ifdef CONFIG_MAC80211_DEBUG
  465. if (net_ratelimit())
  466. printk(KERN_DEBUG "%s: RX protected frame,"
  467. " but have no key\n", rx->dev->name);
  468. #endif /* CONFIG_MAC80211_DEBUG */
  469. return RX_DROP_MONITOR;
  470. }
  471. /* Check for weak IVs if possible */
  472. if (rx->sta && rx->key->conf.alg == ALG_WEP &&
  473. ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
  474. (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) ||
  475. !(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) &&
  476. ieee80211_wep_is_weak_iv(rx->skb, rx->key))
  477. rx->sta->wep_weak_iv_count++;
  478. switch (rx->key->conf.alg) {
  479. case ALG_WEP:
  480. result = ieee80211_crypto_wep_decrypt(rx);
  481. break;
  482. case ALG_TKIP:
  483. result = ieee80211_crypto_tkip_decrypt(rx);
  484. break;
  485. case ALG_CCMP:
  486. result = ieee80211_crypto_ccmp_decrypt(rx);
  487. break;
  488. }
  489. /* either the frame has been decrypted or will be dropped */
  490. rx->u.rx.status->flag |= RX_FLAG_DECRYPTED;
  491. return result;
  492. }
  493. static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta)
  494. {
  495. struct ieee80211_sub_if_data *sdata;
  496. DECLARE_MAC_BUF(mac);
  497. sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
  498. if (sdata->bss)
  499. atomic_inc(&sdata->bss->num_sta_ps);
  500. sta->flags |= WLAN_STA_PS;
  501. sta->pspoll = 0;
  502. #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
  503. printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n",
  504. dev->name, print_mac(mac, sta->addr), sta->aid);
  505. #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
  506. }
  507. static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta)
  508. {
  509. struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
  510. struct sk_buff *skb;
  511. int sent = 0;
  512. struct ieee80211_sub_if_data *sdata;
  513. struct ieee80211_tx_packet_data *pkt_data;
  514. DECLARE_MAC_BUF(mac);
  515. sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev);
  516. if (sdata->bss)
  517. atomic_dec(&sdata->bss->num_sta_ps);
  518. sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM);
  519. sta->pspoll = 0;
  520. if (!skb_queue_empty(&sta->ps_tx_buf)) {
  521. if (local->ops->set_tim)
  522. local->ops->set_tim(local_to_hw(local), sta->aid, 0);
  523. if (sdata->bss)
  524. bss_tim_clear(local, sdata->bss, sta->aid);
  525. }
  526. #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
  527. printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n",
  528. dev->name, print_mac(mac, sta->addr), sta->aid);
  529. #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
  530. /* Send all buffered frames to the station */
  531. while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) {
  532. pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
  533. sent++;
  534. pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
  535. dev_queue_xmit(skb);
  536. }
  537. while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) {
  538. pkt_data = (struct ieee80211_tx_packet_data *) skb->cb;
  539. local->total_ps_buffered--;
  540. sent++;
  541. #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
  542. printk(KERN_DEBUG "%s: STA %s aid %d send PS frame "
  543. "since STA not sleeping anymore\n", dev->name,
  544. print_mac(mac, sta->addr), sta->aid);
  545. #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
  546. pkt_data->flags |= IEEE80211_TXPD_REQUEUE;
  547. dev_queue_xmit(skb);
  548. }
  549. return sent;
  550. }
  551. static ieee80211_rx_result
  552. ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx)
  553. {
  554. struct sta_info *sta = rx->sta;
  555. struct net_device *dev = rx->dev;
  556. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
  557. if (!sta)
  558. return RX_CONTINUE;
  559. /* Update last_rx only for IBSS packets which are for the current
  560. * BSSID to avoid keeping the current IBSS network alive in cases where
  561. * other STAs are using different BSSID. */
  562. if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) {
  563. u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
  564. IEEE80211_IF_TYPE_IBSS);
  565. if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0)
  566. sta->last_rx = jiffies;
  567. } else
  568. if (!is_multicast_ether_addr(hdr->addr1) ||
  569. rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) {
  570. /* Update last_rx only for unicast frames in order to prevent
  571. * the Probe Request frames (the only broadcast frames from a
  572. * STA in infrastructure mode) from keeping a connection alive.
  573. */
  574. sta->last_rx = jiffies;
  575. }
  576. if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
  577. return RX_CONTINUE;
  578. sta->rx_fragments++;
  579. sta->rx_bytes += rx->skb->len;
  580. sta->last_rssi = rx->u.rx.status->ssi;
  581. sta->last_signal = rx->u.rx.status->signal;
  582. sta->last_noise = rx->u.rx.status->noise;
  583. if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) {
  584. /* Change STA power saving mode only in the end of a frame
  585. * exchange sequence */
  586. if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM))
  587. rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta);
  588. else if (!(sta->flags & WLAN_STA_PS) &&
  589. (rx->fc & IEEE80211_FCTL_PM))
  590. ap_sta_ps_start(dev, sta);
  591. }
  592. /* Drop data::nullfunc frames silently, since they are used only to
  593. * control station power saving mode. */
  594. if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
  595. (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) {
  596. I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
  597. /* Update counter and free packet here to avoid counting this
  598. * as a dropped packed. */
  599. sta->rx_packets++;
  600. dev_kfree_skb(rx->skb);
  601. return RX_QUEUED;
  602. }
  603. return RX_CONTINUE;
  604. } /* ieee80211_rx_h_sta_process */
  605. static inline struct ieee80211_fragment_entry *
  606. ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
  607. unsigned int frag, unsigned int seq, int rx_queue,
  608. struct sk_buff **skb)
  609. {
  610. struct ieee80211_fragment_entry *entry;
  611. int idx;
  612. idx = sdata->fragment_next;
  613. entry = &sdata->fragments[sdata->fragment_next++];
  614. if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
  615. sdata->fragment_next = 0;
  616. if (!skb_queue_empty(&entry->skb_list)) {
  617. #ifdef CONFIG_MAC80211_DEBUG
  618. struct ieee80211_hdr *hdr =
  619. (struct ieee80211_hdr *) entry->skb_list.next->data;
  620. DECLARE_MAC_BUF(mac);
  621. DECLARE_MAC_BUF(mac2);
  622. printk(KERN_DEBUG "%s: RX reassembly removed oldest "
  623. "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
  624. "addr1=%s addr2=%s\n",
  625. sdata->dev->name, idx,
  626. jiffies - entry->first_frag_time, entry->seq,
  627. entry->last_frag, print_mac(mac, hdr->addr1),
  628. print_mac(mac2, hdr->addr2));
  629. #endif /* CONFIG_MAC80211_DEBUG */
  630. __skb_queue_purge(&entry->skb_list);
  631. }
  632. __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
  633. *skb = NULL;
  634. entry->first_frag_time = jiffies;
  635. entry->seq = seq;
  636. entry->rx_queue = rx_queue;
  637. entry->last_frag = frag;
  638. entry->ccmp = 0;
  639. entry->extra_len = 0;
  640. return entry;
  641. }
  642. static inline struct ieee80211_fragment_entry *
  643. ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
  644. u16 fc, unsigned int frag, unsigned int seq,
  645. int rx_queue, struct ieee80211_hdr *hdr)
  646. {
  647. struct ieee80211_fragment_entry *entry;
  648. int i, idx;
  649. idx = sdata->fragment_next;
  650. for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
  651. struct ieee80211_hdr *f_hdr;
  652. u16 f_fc;
  653. idx--;
  654. if (idx < 0)
  655. idx = IEEE80211_FRAGMENT_MAX - 1;
  656. entry = &sdata->fragments[idx];
  657. if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
  658. entry->rx_queue != rx_queue ||
  659. entry->last_frag + 1 != frag)
  660. continue;
  661. f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data;
  662. f_fc = le16_to_cpu(f_hdr->frame_control);
  663. if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) ||
  664. compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 ||
  665. compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0)
  666. continue;
  667. if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
  668. __skb_queue_purge(&entry->skb_list);
  669. continue;
  670. }
  671. return entry;
  672. }
  673. return NULL;
  674. }
  675. static ieee80211_rx_result
  676. ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx)
  677. {
  678. struct ieee80211_hdr *hdr;
  679. u16 sc;
  680. unsigned int frag, seq;
  681. struct ieee80211_fragment_entry *entry;
  682. struct sk_buff *skb;
  683. DECLARE_MAC_BUF(mac);
  684. hdr = (struct ieee80211_hdr *) rx->skb->data;
  685. sc = le16_to_cpu(hdr->seq_ctrl);
  686. frag = sc & IEEE80211_SCTL_FRAG;
  687. if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) ||
  688. (rx->skb)->len < 24 ||
  689. is_multicast_ether_addr(hdr->addr1))) {
  690. /* not fragmented */
  691. goto out;
  692. }
  693. I802_DEBUG_INC(rx->local->rx_handlers_fragments);
  694. seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
  695. if (frag == 0) {
  696. /* This is the first fragment of a new frame. */
  697. entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
  698. rx->u.rx.queue, &(rx->skb));
  699. if (rx->key && rx->key->conf.alg == ALG_CCMP &&
  700. (rx->fc & IEEE80211_FCTL_PROTECTED)) {
  701. /* Store CCMP PN so that we can verify that the next
  702. * fragment has a sequential PN value. */
  703. entry->ccmp = 1;
  704. memcpy(entry->last_pn,
  705. rx->key->u.ccmp.rx_pn[rx->u.rx.queue],
  706. CCMP_PN_LEN);
  707. }
  708. return RX_QUEUED;
  709. }
  710. /* This is a fragment for a frame that should already be pending in
  711. * fragment cache. Add this fragment to the end of the pending entry.
  712. */
  713. entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq,
  714. rx->u.rx.queue, hdr);
  715. if (!entry) {
  716. I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
  717. return RX_DROP_MONITOR;
  718. }
  719. /* Verify that MPDUs within one MSDU have sequential PN values.
  720. * (IEEE 802.11i, 8.3.3.4.5) */
  721. if (entry->ccmp) {
  722. int i;
  723. u8 pn[CCMP_PN_LEN], *rpn;
  724. if (!rx->key || rx->key->conf.alg != ALG_CCMP)
  725. return RX_DROP_UNUSABLE;
  726. memcpy(pn, entry->last_pn, CCMP_PN_LEN);
  727. for (i = CCMP_PN_LEN - 1; i >= 0; i--) {
  728. pn[i]++;
  729. if (pn[i])
  730. break;
  731. }
  732. rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue];
  733. if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) {
  734. if (net_ratelimit())
  735. printk(KERN_DEBUG "%s: defrag: CCMP PN not "
  736. "sequential A2=%s"
  737. " PN=%02x%02x%02x%02x%02x%02x "
  738. "(expected %02x%02x%02x%02x%02x%02x)\n",
  739. rx->dev->name, print_mac(mac, hdr->addr2),
  740. rpn[0], rpn[1], rpn[2], rpn[3], rpn[4],
  741. rpn[5], pn[0], pn[1], pn[2], pn[3],
  742. pn[4], pn[5]);
  743. return RX_DROP_UNUSABLE;
  744. }
  745. memcpy(entry->last_pn, pn, CCMP_PN_LEN);
  746. }
  747. skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc));
  748. __skb_queue_tail(&entry->skb_list, rx->skb);
  749. entry->last_frag = frag;
  750. entry->extra_len += rx->skb->len;
  751. if (rx->fc & IEEE80211_FCTL_MOREFRAGS) {
  752. rx->skb = NULL;
  753. return RX_QUEUED;
  754. }
  755. rx->skb = __skb_dequeue(&entry->skb_list);
  756. if (skb_tailroom(rx->skb) < entry->extra_len) {
  757. I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
  758. if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
  759. GFP_ATOMIC))) {
  760. I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
  761. __skb_queue_purge(&entry->skb_list);
  762. return RX_DROP_UNUSABLE;
  763. }
  764. }
  765. while ((skb = __skb_dequeue(&entry->skb_list))) {
  766. memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
  767. dev_kfree_skb(skb);
  768. }
  769. /* Complete frame has been reassembled - process it now */
  770. rx->flags |= IEEE80211_TXRXD_FRAGMENTED;
  771. out:
  772. if (rx->sta)
  773. rx->sta->rx_packets++;
  774. if (is_multicast_ether_addr(hdr->addr1))
  775. rx->local->dot11MulticastReceivedFrameCount++;
  776. else
  777. ieee80211_led_rx(rx->local);
  778. return RX_CONTINUE;
  779. }
  780. static ieee80211_rx_result
  781. ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx)
  782. {
  783. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
  784. struct sk_buff *skb;
  785. int no_pending_pkts;
  786. DECLARE_MAC_BUF(mac);
  787. if (likely(!rx->sta ||
  788. (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL ||
  789. (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL ||
  790. !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)))
  791. return RX_CONTINUE;
  792. if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) &&
  793. (sdata->vif.type != IEEE80211_IF_TYPE_VLAN))
  794. return RX_DROP_UNUSABLE;
  795. skb = skb_dequeue(&rx->sta->tx_filtered);
  796. if (!skb) {
  797. skb = skb_dequeue(&rx->sta->ps_tx_buf);
  798. if (skb)
  799. rx->local->total_ps_buffered--;
  800. }
  801. no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) &&
  802. skb_queue_empty(&rx->sta->ps_tx_buf);
  803. if (skb) {
  804. struct ieee80211_hdr *hdr =
  805. (struct ieee80211_hdr *) skb->data;
  806. /* tell TX path to send one frame even though the STA may
  807. * still remain is PS mode after this frame exchange */
  808. rx->sta->pspoll = 1;
  809. #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
  810. printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n",
  811. print_mac(mac, rx->sta->addr), rx->sta->aid,
  812. skb_queue_len(&rx->sta->ps_tx_buf));
  813. #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
  814. /* Use MoreData flag to indicate whether there are more
  815. * buffered frames for this STA */
  816. if (no_pending_pkts) {
  817. hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA);
  818. rx->sta->flags &= ~WLAN_STA_TIM;
  819. } else
  820. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  821. dev_queue_xmit(skb);
  822. if (no_pending_pkts) {
  823. if (rx->local->ops->set_tim)
  824. rx->local->ops->set_tim(local_to_hw(rx->local),
  825. rx->sta->aid, 0);
  826. if (rx->sdata->bss)
  827. bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid);
  828. }
  829. #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
  830. } else if (!rx->u.rx.sent_ps_buffered) {
  831. printk(KERN_DEBUG "%s: STA %s sent PS Poll even "
  832. "though there is no buffered frames for it\n",
  833. rx->dev->name, print_mac(mac, rx->sta->addr));
  834. #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
  835. }
  836. /* Free PS Poll skb here instead of returning RX_DROP that would
  837. * count as an dropped frame. */
  838. dev_kfree_skb(rx->skb);
  839. return RX_QUEUED;
  840. }
  841. static ieee80211_rx_result
  842. ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx)
  843. {
  844. u16 fc = rx->fc;
  845. u8 *data = rx->skb->data;
  846. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data;
  847. if (!WLAN_FC_IS_QOS_DATA(fc))
  848. return RX_CONTINUE;
  849. /* remove the qos control field, update frame type and meta-data */
  850. memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2);
  851. hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2);
  852. /* change frame type to non QOS */
  853. rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA;
  854. hdr->frame_control = cpu_to_le16(fc);
  855. return RX_CONTINUE;
  856. }
  857. static int
  858. ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx)
  859. {
  860. if (unlikely(!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED))) {
  861. #ifdef CONFIG_MAC80211_DEBUG
  862. if (net_ratelimit())
  863. printk(KERN_DEBUG "%s: dropped frame "
  864. "(unauthorized port)\n", rx->dev->name);
  865. #endif /* CONFIG_MAC80211_DEBUG */
  866. return -EACCES;
  867. }
  868. return 0;
  869. }
  870. static int
  871. ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx)
  872. {
  873. /*
  874. * Pass through unencrypted frames if the hardware has
  875. * decrypted them already.
  876. */
  877. if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED)
  878. return 0;
  879. /* Drop unencrypted frames if key is set. */
  880. if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) &&
  881. (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA &&
  882. (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC &&
  883. (rx->key || rx->sdata->drop_unencrypted))) {
  884. if (net_ratelimit())
  885. printk(KERN_DEBUG "%s: RX non-WEP frame, but expected "
  886. "encryption\n", rx->dev->name);
  887. return -EACCES;
  888. }
  889. return 0;
  890. }
  891. static int
  892. ieee80211_data_to_8023(struct ieee80211_txrx_data *rx)
  893. {
  894. struct net_device *dev = rx->dev;
  895. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data;
  896. u16 fc, hdrlen, ethertype;
  897. u8 *payload;
  898. u8 dst[ETH_ALEN];
  899. u8 src[ETH_ALEN];
  900. struct sk_buff *skb = rx->skb;
  901. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  902. DECLARE_MAC_BUF(mac);
  903. DECLARE_MAC_BUF(mac2);
  904. DECLARE_MAC_BUF(mac3);
  905. DECLARE_MAC_BUF(mac4);
  906. fc = rx->fc;
  907. if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
  908. return -1;
  909. hdrlen = ieee80211_get_hdrlen(fc);
  910. /* convert IEEE 802.11 header + possible LLC headers into Ethernet
  911. * header
  912. * IEEE 802.11 address fields:
  913. * ToDS FromDS Addr1 Addr2 Addr3 Addr4
  914. * 0 0 DA SA BSSID n/a
  915. * 0 1 DA BSSID SA n/a
  916. * 1 0 BSSID SA DA n/a
  917. * 1 1 RA TA DA SA
  918. */
  919. switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
  920. case IEEE80211_FCTL_TODS:
  921. /* BSSID SA DA */
  922. memcpy(dst, hdr->addr3, ETH_ALEN);
  923. memcpy(src, hdr->addr2, ETH_ALEN);
  924. if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP &&
  925. sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) {
  926. if (net_ratelimit())
  927. printk(KERN_DEBUG "%s: dropped ToDS frame "
  928. "(BSSID=%s SA=%s DA=%s)\n",
  929. dev->name,
  930. print_mac(mac, hdr->addr1),
  931. print_mac(mac2, hdr->addr2),
  932. print_mac(mac3, hdr->addr3));
  933. return -1;
  934. }
  935. break;
  936. case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS):
  937. /* RA TA DA SA */
  938. memcpy(dst, hdr->addr3, ETH_ALEN);
  939. memcpy(src, hdr->addr4, ETH_ALEN);
  940. if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS)) {
  941. if (net_ratelimit())
  942. printk(KERN_DEBUG "%s: dropped FromDS&ToDS "
  943. "frame (RA=%s TA=%s DA=%s SA=%s)\n",
  944. rx->dev->name,
  945. print_mac(mac, hdr->addr1),
  946. print_mac(mac2, hdr->addr2),
  947. print_mac(mac3, hdr->addr3),
  948. print_mac(mac4, hdr->addr4));
  949. return -1;
  950. }
  951. break;
  952. case IEEE80211_FCTL_FROMDS:
  953. /* DA BSSID SA */
  954. memcpy(dst, hdr->addr1, ETH_ALEN);
  955. memcpy(src, hdr->addr3, ETH_ALEN);
  956. if (sdata->vif.type != IEEE80211_IF_TYPE_STA ||
  957. (is_multicast_ether_addr(dst) &&
  958. !compare_ether_addr(src, dev->dev_addr)))
  959. return -1;
  960. break;
  961. case 0:
  962. /* DA SA BSSID */
  963. memcpy(dst, hdr->addr1, ETH_ALEN);
  964. memcpy(src, hdr->addr2, ETH_ALEN);
  965. if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) {
  966. if (net_ratelimit()) {
  967. printk(KERN_DEBUG "%s: dropped IBSS frame "
  968. "(DA=%s SA=%s BSSID=%s)\n",
  969. dev->name,
  970. print_mac(mac, hdr->addr1),
  971. print_mac(mac2, hdr->addr2),
  972. print_mac(mac3, hdr->addr3));
  973. }
  974. return -1;
  975. }
  976. break;
  977. }
  978. if (unlikely(skb->len - hdrlen < 8)) {
  979. if (net_ratelimit()) {
  980. printk(KERN_DEBUG "%s: RX too short data frame "
  981. "payload\n", dev->name);
  982. }
  983. return -1;
  984. }
  985. payload = skb->data + hdrlen;
  986. ethertype = (payload[6] << 8) | payload[7];
  987. if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
  988. ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
  989. compare_ether_addr(payload, bridge_tunnel_header) == 0)) {
  990. /* remove RFC1042 or Bridge-Tunnel encapsulation and
  991. * replace EtherType */
  992. skb_pull(skb, hdrlen + 6);
  993. memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
  994. memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
  995. } else {
  996. struct ethhdr *ehdr;
  997. __be16 len;
  998. skb_pull(skb, hdrlen);
  999. len = htons(skb->len);
  1000. ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
  1001. memcpy(ehdr->h_dest, dst, ETH_ALEN);
  1002. memcpy(ehdr->h_source, src, ETH_ALEN);
  1003. ehdr->h_proto = len;
  1004. }
  1005. return 0;
  1006. }
  1007. /*
  1008. * requires that rx->skb is a frame with ethernet header
  1009. */
  1010. static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx)
  1011. {
  1012. static const u8 pae_group_addr[ETH_ALEN]
  1013. = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
  1014. struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
  1015. /*
  1016. * Allow EAPOL frames to us/the PAE group address regardless
  1017. * of whether the frame was encrypted or not.
  1018. */
  1019. if (ehdr->h_proto == htons(ETH_P_PAE) &&
  1020. (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 ||
  1021. compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
  1022. return true;
  1023. if (ieee80211_802_1x_port_control(rx) ||
  1024. ieee80211_drop_unencrypted(rx))
  1025. return false;
  1026. return true;
  1027. }
  1028. /*
  1029. * requires that rx->skb is a frame with ethernet header
  1030. */
  1031. static void
  1032. ieee80211_deliver_skb(struct ieee80211_txrx_data *rx)
  1033. {
  1034. struct net_device *dev = rx->dev;
  1035. struct ieee80211_local *local = rx->local;
  1036. struct sk_buff *skb, *xmit_skb;
  1037. struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
  1038. struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
  1039. struct sta_info *dsta;
  1040. skb = rx->skb;
  1041. xmit_skb = NULL;
  1042. if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP ||
  1043. sdata->vif.type == IEEE80211_IF_TYPE_VLAN) &&
  1044. (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) {
  1045. if (is_multicast_ether_addr(ehdr->h_dest)) {
  1046. /*
  1047. * send multicast frames both to higher layers in
  1048. * local net stack and back to the wireless medium
  1049. */
  1050. xmit_skb = skb_copy(skb, GFP_ATOMIC);
  1051. if (!xmit_skb && net_ratelimit())
  1052. printk(KERN_DEBUG "%s: failed to clone "
  1053. "multicast frame\n", dev->name);
  1054. } else {
  1055. dsta = sta_info_get(local, skb->data);
  1056. if (dsta && dsta->dev == dev) {
  1057. /*
  1058. * The destination station is associated to
  1059. * this AP (in this VLAN), so send the frame
  1060. * directly to it and do not pass it to local
  1061. * net stack.
  1062. */
  1063. xmit_skb = skb;
  1064. skb = NULL;
  1065. }
  1066. if (dsta)
  1067. sta_info_put(dsta);
  1068. }
  1069. }
  1070. if (skb) {
  1071. /* deliver to local stack */
  1072. skb->protocol = eth_type_trans(skb, dev);
  1073. memset(skb->cb, 0, sizeof(skb->cb));
  1074. netif_rx(skb);
  1075. }
  1076. if (xmit_skb) {
  1077. /* send to wireless media */
  1078. xmit_skb->protocol = htons(ETH_P_802_3);
  1079. skb_reset_network_header(xmit_skb);
  1080. skb_reset_mac_header(xmit_skb);
  1081. dev_queue_xmit(xmit_skb);
  1082. }
  1083. }
  1084. static ieee80211_rx_result
  1085. ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx)
  1086. {
  1087. struct net_device *dev = rx->dev;
  1088. struct ieee80211_local *local = rx->local;
  1089. u16 fc, ethertype;
  1090. u8 *payload;
  1091. struct sk_buff *skb = rx->skb, *frame = NULL;
  1092. const struct ethhdr *eth;
  1093. int remaining, err;
  1094. u8 dst[ETH_ALEN];
  1095. u8 src[ETH_ALEN];
  1096. DECLARE_MAC_BUF(mac);
  1097. fc = rx->fc;
  1098. if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
  1099. return RX_CONTINUE;
  1100. if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
  1101. return RX_DROP_MONITOR;
  1102. if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU))
  1103. return RX_CONTINUE;
  1104. err = ieee80211_data_to_8023(rx);
  1105. if (unlikely(err))
  1106. return RX_DROP_UNUSABLE;
  1107. skb->dev = dev;
  1108. dev->stats.rx_packets++;
  1109. dev->stats.rx_bytes += skb->len;
  1110. /* skip the wrapping header */
  1111. eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
  1112. if (!eth)
  1113. return RX_DROP_UNUSABLE;
  1114. while (skb != frame) {
  1115. u8 padding;
  1116. __be16 len = eth->h_proto;
  1117. unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
  1118. remaining = skb->len;
  1119. memcpy(dst, eth->h_dest, ETH_ALEN);
  1120. memcpy(src, eth->h_source, ETH_ALEN);
  1121. padding = ((4 - subframe_len) & 0x3);
  1122. /* the last MSDU has no padding */
  1123. if (subframe_len > remaining) {
  1124. printk(KERN_DEBUG "%s: wrong buffer size", dev->name);
  1125. return RX_DROP_UNUSABLE;
  1126. }
  1127. skb_pull(skb, sizeof(struct ethhdr));
  1128. /* if last subframe reuse skb */
  1129. if (remaining <= subframe_len + padding)
  1130. frame = skb;
  1131. else {
  1132. frame = dev_alloc_skb(local->hw.extra_tx_headroom +
  1133. subframe_len);
  1134. if (frame == NULL)
  1135. return RX_DROP_UNUSABLE;
  1136. skb_reserve(frame, local->hw.extra_tx_headroom +
  1137. sizeof(struct ethhdr));
  1138. memcpy(skb_put(frame, ntohs(len)), skb->data,
  1139. ntohs(len));
  1140. eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
  1141. padding);
  1142. if (!eth) {
  1143. printk(KERN_DEBUG "%s: wrong buffer size ",
  1144. dev->name);
  1145. dev_kfree_skb(frame);
  1146. return RX_DROP_UNUSABLE;
  1147. }
  1148. }
  1149. skb_reset_network_header(frame);
  1150. frame->dev = dev;
  1151. frame->priority = skb->priority;
  1152. rx->skb = frame;
  1153. payload = frame->data;
  1154. ethertype = (payload[6] << 8) | payload[7];
  1155. if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
  1156. ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
  1157. compare_ether_addr(payload,
  1158. bridge_tunnel_header) == 0)) {
  1159. /* remove RFC1042 or Bridge-Tunnel
  1160. * encapsulation and replace EtherType */
  1161. skb_pull(frame, 6);
  1162. memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
  1163. memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
  1164. } else {
  1165. memcpy(skb_push(frame, sizeof(__be16)),
  1166. &len, sizeof(__be16));
  1167. memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
  1168. memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
  1169. }
  1170. if (!ieee80211_frame_allowed(rx)) {
  1171. if (skb == frame) /* last frame */
  1172. return RX_DROP_UNUSABLE;
  1173. dev_kfree_skb(frame);
  1174. continue;
  1175. }
  1176. ieee80211_deliver_skb(rx);
  1177. }
  1178. return RX_QUEUED;
  1179. }
  1180. static ieee80211_rx_result
  1181. ieee80211_rx_h_data(struct ieee80211_txrx_data *rx)
  1182. {
  1183. struct net_device *dev = rx->dev;
  1184. u16 fc;
  1185. int err;
  1186. fc = rx->fc;
  1187. if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA))
  1188. return RX_CONTINUE;
  1189. if (unlikely(!WLAN_FC_DATA_PRESENT(fc)))
  1190. return RX_DROP_MONITOR;
  1191. err = ieee80211_data_to_8023(rx);
  1192. if (unlikely(err))
  1193. return RX_DROP_UNUSABLE;
  1194. if (!ieee80211_frame_allowed(rx))
  1195. return RX_DROP_MONITOR;
  1196. rx->skb->dev = dev;
  1197. dev->stats.rx_packets++;
  1198. dev->stats.rx_bytes += rx->skb->len;
  1199. ieee80211_deliver_skb(rx);
  1200. return RX_QUEUED;
  1201. }
  1202. static ieee80211_rx_result
  1203. ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx)
  1204. {
  1205. struct ieee80211_local *local = rx->local;
  1206. struct ieee80211_hw *hw = &local->hw;
  1207. struct sk_buff *skb = rx->skb;
  1208. struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data;
  1209. struct tid_ampdu_rx *tid_agg_rx;
  1210. u16 start_seq_num;
  1211. u16 tid;
  1212. if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL))
  1213. return RX_CONTINUE;
  1214. if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) {
  1215. if (!rx->sta)
  1216. return RX_CONTINUE;
  1217. tid = le16_to_cpu(bar->control) >> 12;
  1218. tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]);
  1219. if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL)
  1220. return RX_CONTINUE;
  1221. start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
  1222. /* reset session timer */
  1223. if (tid_agg_rx->timeout) {
  1224. unsigned long expires =
  1225. jiffies + (tid_agg_rx->timeout / 1000) * HZ;
  1226. mod_timer(&tid_agg_rx->session_timer, expires);
  1227. }
  1228. /* manage reordering buffer according to requested */
  1229. /* sequence number */
  1230. rcu_read_lock();
  1231. ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
  1232. start_seq_num, 1);
  1233. rcu_read_unlock();
  1234. return RX_DROP_UNUSABLE;
  1235. }
  1236. return RX_CONTINUE;
  1237. }
  1238. static ieee80211_rx_result
  1239. ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx)
  1240. {
  1241. struct ieee80211_sub_if_data *sdata;
  1242. if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))
  1243. return RX_DROP_MONITOR;
  1244. sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
  1245. if ((sdata->vif.type == IEEE80211_IF_TYPE_STA ||
  1246. sdata->vif.type == IEEE80211_IF_TYPE_IBSS) &&
  1247. !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
  1248. ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status);
  1249. else
  1250. return RX_DROP_MONITOR;
  1251. return RX_QUEUED;
  1252. }
  1253. static void ieee80211_rx_michael_mic_report(struct net_device *dev,
  1254. struct ieee80211_hdr *hdr,
  1255. struct ieee80211_txrx_data *rx)
  1256. {
  1257. int keyidx, hdrlen;
  1258. DECLARE_MAC_BUF(mac);
  1259. DECLARE_MAC_BUF(mac2);
  1260. hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb);
  1261. if (rx->skb->len >= hdrlen + 4)
  1262. keyidx = rx->skb->data[hdrlen + 3] >> 6;
  1263. else
  1264. keyidx = -1;
  1265. if (net_ratelimit())
  1266. printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC "
  1267. "failure from %s to %s keyidx=%d\n",
  1268. dev->name, print_mac(mac, hdr->addr2),
  1269. print_mac(mac2, hdr->addr1), keyidx);
  1270. if (!rx->sta) {
  1271. /*
  1272. * Some hardware seem to generate incorrect Michael MIC
  1273. * reports; ignore them to avoid triggering countermeasures.
  1274. */
  1275. if (net_ratelimit())
  1276. printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
  1277. "error for unknown address %s\n",
  1278. dev->name, print_mac(mac, hdr->addr2));
  1279. goto ignore;
  1280. }
  1281. if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) {
  1282. if (net_ratelimit())
  1283. printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
  1284. "error for a frame with no PROTECTED flag (src "
  1285. "%s)\n", dev->name, print_mac(mac, hdr->addr2));
  1286. goto ignore;
  1287. }
  1288. if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) {
  1289. /*
  1290. * APs with pairwise keys should never receive Michael MIC
  1291. * errors for non-zero keyidx because these are reserved for
  1292. * group keys and only the AP is sending real multicast
  1293. * frames in the BSS.
  1294. */
  1295. if (net_ratelimit())
  1296. printk(KERN_DEBUG "%s: ignored Michael MIC error for "
  1297. "a frame with non-zero keyidx (%d)"
  1298. " (src %s)\n", dev->name, keyidx,
  1299. print_mac(mac, hdr->addr2));
  1300. goto ignore;
  1301. }
  1302. if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA &&
  1303. ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT ||
  1304. (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) {
  1305. if (net_ratelimit())
  1306. printk(KERN_DEBUG "%s: ignored spurious Michael MIC "
  1307. "error for a frame that cannot be encrypted "
  1308. "(fc=0x%04x) (src %s)\n",
  1309. dev->name, rx->fc, print_mac(mac, hdr->addr2));
  1310. goto ignore;
  1311. }
  1312. mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr);
  1313. ignore:
  1314. dev_kfree_skb(rx->skb);
  1315. rx->skb = NULL;
  1316. }
  1317. static void ieee80211_rx_cooked_monitor(struct ieee80211_txrx_data *rx)
  1318. {
  1319. struct ieee80211_sub_if_data *sdata;
  1320. struct ieee80211_local *local = rx->local;
  1321. struct ieee80211_rtap_hdr {
  1322. struct ieee80211_radiotap_header hdr;
  1323. u8 flags;
  1324. u8 rate;
  1325. __le16 chan_freq;
  1326. __le16 chan_flags;
  1327. } __attribute__ ((packed)) *rthdr;
  1328. struct sk_buff *skb = rx->skb, *skb2;
  1329. struct net_device *prev_dev = NULL;
  1330. struct ieee80211_rx_status *status = rx->u.rx.status;
  1331. if (rx->flags & IEEE80211_TXRXD_RX_CMNTR_REPORTED)
  1332. goto out_free_skb;
  1333. if (skb_headroom(skb) < sizeof(*rthdr) &&
  1334. pskb_expand_head(skb, sizeof(*rthdr), 0, GFP_ATOMIC))
  1335. goto out_free_skb;
  1336. rthdr = (void *)skb_push(skb, sizeof(*rthdr));
  1337. memset(rthdr, 0, sizeof(*rthdr));
  1338. rthdr->hdr.it_len = cpu_to_le16(sizeof(*rthdr));
  1339. rthdr->hdr.it_present =
  1340. cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
  1341. (1 << IEEE80211_RADIOTAP_RATE) |
  1342. (1 << IEEE80211_RADIOTAP_CHANNEL));
  1343. rthdr->rate = rx->u.rx.rate->bitrate / 5;
  1344. rthdr->chan_freq = cpu_to_le16(status->freq);
  1345. if (status->band == IEEE80211_BAND_5GHZ)
  1346. rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_OFDM |
  1347. IEEE80211_CHAN_5GHZ);
  1348. else
  1349. rthdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_DYN |
  1350. IEEE80211_CHAN_2GHZ);
  1351. skb_set_mac_header(skb, 0);
  1352. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1353. skb->pkt_type = PACKET_OTHERHOST;
  1354. skb->protocol = htons(ETH_P_802_2);
  1355. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1356. if (!netif_running(sdata->dev))
  1357. continue;
  1358. if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR ||
  1359. !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
  1360. continue;
  1361. if (prev_dev) {
  1362. skb2 = skb_clone(skb, GFP_ATOMIC);
  1363. if (skb2) {
  1364. skb2->dev = prev_dev;
  1365. netif_rx(skb2);
  1366. }
  1367. }
  1368. prev_dev = sdata->dev;
  1369. sdata->dev->stats.rx_packets++;
  1370. sdata->dev->stats.rx_bytes += skb->len;
  1371. }
  1372. if (prev_dev) {
  1373. skb->dev = prev_dev;
  1374. netif_rx(skb);
  1375. skb = NULL;
  1376. } else
  1377. goto out_free_skb;
  1378. rx->flags |= IEEE80211_TXRXD_RX_CMNTR_REPORTED;
  1379. return;
  1380. out_free_skb:
  1381. dev_kfree_skb(skb);
  1382. }
  1383. typedef ieee80211_rx_result (*ieee80211_rx_handler)(struct ieee80211_txrx_data *);
  1384. static ieee80211_rx_handler ieee80211_rx_handlers[] =
  1385. {
  1386. ieee80211_rx_h_if_stats,
  1387. ieee80211_rx_h_passive_scan,
  1388. ieee80211_rx_h_check,
  1389. ieee80211_rx_h_decrypt,
  1390. ieee80211_rx_h_sta_process,
  1391. ieee80211_rx_h_defragment,
  1392. ieee80211_rx_h_ps_poll,
  1393. ieee80211_rx_h_michael_mic_verify,
  1394. /* this must be after decryption - so header is counted in MPDU mic
  1395. * must be before pae and data, so QOS_DATA format frames
  1396. * are not passed to user space by these functions
  1397. */
  1398. ieee80211_rx_h_remove_qos_control,
  1399. ieee80211_rx_h_amsdu,
  1400. ieee80211_rx_h_data,
  1401. ieee80211_rx_h_ctrl,
  1402. ieee80211_rx_h_mgmt,
  1403. NULL
  1404. };
  1405. static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
  1406. struct ieee80211_txrx_data *rx,
  1407. struct sk_buff *skb)
  1408. {
  1409. ieee80211_rx_handler *handler;
  1410. ieee80211_rx_result res = RX_DROP_MONITOR;
  1411. rx->skb = skb;
  1412. rx->sdata = sdata;
  1413. rx->dev = sdata->dev;
  1414. for (handler = ieee80211_rx_handlers; *handler != NULL; handler++) {
  1415. res = (*handler)(rx);
  1416. switch (res) {
  1417. case RX_CONTINUE:
  1418. continue;
  1419. case RX_DROP_UNUSABLE:
  1420. case RX_DROP_MONITOR:
  1421. I802_DEBUG_INC(sdata->local->rx_handlers_drop);
  1422. if (rx->sta)
  1423. rx->sta->rx_dropped++;
  1424. break;
  1425. case RX_QUEUED:
  1426. I802_DEBUG_INC(sdata->local->rx_handlers_queued);
  1427. break;
  1428. }
  1429. break;
  1430. }
  1431. switch (res) {
  1432. case RX_CONTINUE:
  1433. case RX_DROP_MONITOR:
  1434. ieee80211_rx_cooked_monitor(rx);
  1435. break;
  1436. case RX_DROP_UNUSABLE:
  1437. dev_kfree_skb(rx->skb);
  1438. break;
  1439. }
  1440. }
  1441. /* main receive path */
  1442. static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
  1443. u8 *bssid, struct ieee80211_txrx_data *rx,
  1444. struct ieee80211_hdr *hdr)
  1445. {
  1446. int multicast = is_multicast_ether_addr(hdr->addr1);
  1447. switch (sdata->vif.type) {
  1448. case IEEE80211_IF_TYPE_STA:
  1449. if (!bssid)
  1450. return 0;
  1451. if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
  1452. if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
  1453. return 0;
  1454. rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
  1455. } else if (!multicast &&
  1456. compare_ether_addr(sdata->dev->dev_addr,
  1457. hdr->addr1) != 0) {
  1458. if (!(sdata->dev->flags & IFF_PROMISC))
  1459. return 0;
  1460. rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
  1461. }
  1462. break;
  1463. case IEEE80211_IF_TYPE_IBSS:
  1464. if (!bssid)
  1465. return 0;
  1466. if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT &&
  1467. (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
  1468. return 1;
  1469. else if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) {
  1470. if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
  1471. return 0;
  1472. rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
  1473. } else if (!multicast &&
  1474. compare_ether_addr(sdata->dev->dev_addr,
  1475. hdr->addr1) != 0) {
  1476. if (!(sdata->dev->flags & IFF_PROMISC))
  1477. return 0;
  1478. rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
  1479. } else if (!rx->sta)
  1480. rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb,
  1481. bssid, hdr->addr2);
  1482. break;
  1483. case IEEE80211_IF_TYPE_VLAN:
  1484. case IEEE80211_IF_TYPE_AP:
  1485. if (!bssid) {
  1486. if (compare_ether_addr(sdata->dev->dev_addr,
  1487. hdr->addr1))
  1488. return 0;
  1489. } else if (!ieee80211_bssid_match(bssid,
  1490. sdata->dev->dev_addr)) {
  1491. if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
  1492. return 0;
  1493. rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH;
  1494. }
  1495. if (sdata->dev == sdata->local->mdev &&
  1496. !(rx->flags & IEEE80211_TXRXD_RXIN_SCAN))
  1497. /* do not receive anything via
  1498. * master device when not scanning */
  1499. return 0;
  1500. break;
  1501. case IEEE80211_IF_TYPE_WDS:
  1502. if (bssid ||
  1503. (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)
  1504. return 0;
  1505. if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2))
  1506. return 0;
  1507. break;
  1508. case IEEE80211_IF_TYPE_MNTR:
  1509. /* take everything */
  1510. break;
  1511. case IEEE80211_IF_TYPE_INVALID:
  1512. /* should never get here */
  1513. WARN_ON(1);
  1514. break;
  1515. }
  1516. return 1;
  1517. }
  1518. /*
  1519. * This is the actual Rx frames handler. as it blongs to Rx path it must
  1520. * be called with rcu_read_lock protection.
  1521. */
  1522. static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
  1523. struct sk_buff *skb,
  1524. struct ieee80211_rx_status *status,
  1525. u32 load,
  1526. struct ieee80211_rate *rate)
  1527. {
  1528. struct ieee80211_local *local = hw_to_local(hw);
  1529. struct ieee80211_sub_if_data *sdata;
  1530. struct ieee80211_hdr *hdr;
  1531. struct ieee80211_txrx_data rx;
  1532. u16 type;
  1533. int prepares;
  1534. struct ieee80211_sub_if_data *prev = NULL;
  1535. struct sk_buff *skb_new;
  1536. u8 *bssid;
  1537. hdr = (struct ieee80211_hdr *) skb->data;
  1538. memset(&rx, 0, sizeof(rx));
  1539. rx.skb = skb;
  1540. rx.local = local;
  1541. rx.u.rx.status = status;
  1542. rx.u.rx.load = load;
  1543. rx.u.rx.rate = rate;
  1544. rx.fc = le16_to_cpu(hdr->frame_control);
  1545. type = rx.fc & IEEE80211_FCTL_FTYPE;
  1546. if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT)
  1547. local->dot11ReceivedFragmentCount++;
  1548. rx.sta = sta_info_get(local, hdr->addr2);
  1549. if (rx.sta) {
  1550. rx.dev = rx.sta->dev;
  1551. rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev);
  1552. }
  1553. if ((status->flag & RX_FLAG_MMIC_ERROR)) {
  1554. ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
  1555. goto end;
  1556. }
  1557. if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning))
  1558. rx.flags |= IEEE80211_TXRXD_RXIN_SCAN;
  1559. ieee80211_parse_qos(&rx);
  1560. ieee80211_verify_ip_alignment(&rx);
  1561. skb = rx.skb;
  1562. list_for_each_entry_rcu(sdata, &local->interfaces, list) {
  1563. if (!netif_running(sdata->dev))
  1564. continue;
  1565. if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR)
  1566. continue;
  1567. bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
  1568. rx.flags |= IEEE80211_TXRXD_RXRA_MATCH;
  1569. prepares = prepare_for_handlers(sdata, bssid, &rx, hdr);
  1570. if (!prepares)
  1571. continue;
  1572. /*
  1573. * frame is destined for this interface, but if it's not
  1574. * also for the previous one we handle that after the
  1575. * loop to avoid copying the SKB once too much
  1576. */
  1577. if (!prev) {
  1578. prev = sdata;
  1579. continue;
  1580. }
  1581. /*
  1582. * frame was destined for the previous interface
  1583. * so invoke RX handlers for it
  1584. */
  1585. skb_new = skb_copy(skb, GFP_ATOMIC);
  1586. if (!skb_new) {
  1587. if (net_ratelimit())
  1588. printk(KERN_DEBUG "%s: failed to copy "
  1589. "multicast frame for %s",
  1590. wiphy_name(local->hw.wiphy),
  1591. prev->dev->name);
  1592. continue;
  1593. }
  1594. rx.fc = le16_to_cpu(hdr->frame_control);
  1595. ieee80211_invoke_rx_handlers(prev, &rx, skb_new);
  1596. prev = sdata;
  1597. }
  1598. if (prev) {
  1599. rx.fc = le16_to_cpu(hdr->frame_control);
  1600. ieee80211_invoke_rx_handlers(prev, &rx, skb);
  1601. } else
  1602. dev_kfree_skb(skb);
  1603. end:
  1604. if (rx.sta)
  1605. sta_info_put(rx.sta);
  1606. }
  1607. #define SEQ_MODULO 0x1000
  1608. #define SEQ_MASK 0xfff
  1609. static inline int seq_less(u16 sq1, u16 sq2)
  1610. {
  1611. return (((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1));
  1612. }
  1613. static inline u16 seq_inc(u16 sq)
  1614. {
  1615. return ((sq + 1) & SEQ_MASK);
  1616. }
  1617. static inline u16 seq_sub(u16 sq1, u16 sq2)
  1618. {
  1619. return ((sq1 - sq2) & SEQ_MASK);
  1620. }
  1621. /*
  1622. * As it function blongs to Rx path it must be called with
  1623. * the proper rcu_read_lock protection for its flow.
  1624. */
  1625. u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
  1626. struct tid_ampdu_rx *tid_agg_rx,
  1627. struct sk_buff *skb, u16 mpdu_seq_num,
  1628. int bar_req)
  1629. {
  1630. struct ieee80211_local *local = hw_to_local(hw);
  1631. struct ieee80211_rx_status status;
  1632. u16 head_seq_num, buf_size;
  1633. int index;
  1634. u32 pkt_load;
  1635. struct ieee80211_supported_band *sband;
  1636. struct ieee80211_rate *rate;
  1637. buf_size = tid_agg_rx->buf_size;
  1638. head_seq_num = tid_agg_rx->head_seq_num;
  1639. /* frame with out of date sequence number */
  1640. if (seq_less(mpdu_seq_num, head_seq_num)) {
  1641. dev_kfree_skb(skb);
  1642. return 1;
  1643. }
  1644. /* if frame sequence number exceeds our buffering window size or
  1645. * block Ack Request arrived - release stored frames */
  1646. if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) {
  1647. /* new head to the ordering buffer */
  1648. if (bar_req)
  1649. head_seq_num = mpdu_seq_num;
  1650. else
  1651. head_seq_num =
  1652. seq_inc(seq_sub(mpdu_seq_num, buf_size));
  1653. /* release stored frames up to new head to stack */
  1654. while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
  1655. index = seq_sub(tid_agg_rx->head_seq_num,
  1656. tid_agg_rx->ssn)
  1657. % tid_agg_rx->buf_size;
  1658. if (tid_agg_rx->reorder_buf[index]) {
  1659. /* release the reordered frames to stack */
  1660. memcpy(&status,
  1661. tid_agg_rx->reorder_buf[index]->cb,
  1662. sizeof(status));
  1663. sband = local->hw.wiphy->bands[status.band];
  1664. rate = &sband->bitrates[status.rate_idx];
  1665. pkt_load = ieee80211_rx_load_stats(local,
  1666. tid_agg_rx->reorder_buf[index],
  1667. &status, rate);
  1668. __ieee80211_rx_handle_packet(hw,
  1669. tid_agg_rx->reorder_buf[index],
  1670. &status, pkt_load, rate);
  1671. tid_agg_rx->stored_mpdu_num--;
  1672. tid_agg_rx->reorder_buf[index] = NULL;
  1673. }
  1674. tid_agg_rx->head_seq_num =
  1675. seq_inc(tid_agg_rx->head_seq_num);
  1676. }
  1677. if (bar_req)
  1678. return 1;
  1679. }
  1680. /* now the new frame is always in the range of the reordering */
  1681. /* buffer window */
  1682. index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn)
  1683. % tid_agg_rx->buf_size;
  1684. /* check if we already stored this frame */
  1685. if (tid_agg_rx->reorder_buf[index]) {
  1686. dev_kfree_skb(skb);
  1687. return 1;
  1688. }
  1689. /* if arrived mpdu is in the right order and nothing else stored */
  1690. /* release it immediately */
  1691. if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
  1692. tid_agg_rx->stored_mpdu_num == 0) {
  1693. tid_agg_rx->head_seq_num =
  1694. seq_inc(tid_agg_rx->head_seq_num);
  1695. return 0;
  1696. }
  1697. /* put the frame in the reordering buffer */
  1698. tid_agg_rx->reorder_buf[index] = skb;
  1699. tid_agg_rx->stored_mpdu_num++;
  1700. /* release the buffer until next missing frame */
  1701. index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
  1702. % tid_agg_rx->buf_size;
  1703. while (tid_agg_rx->reorder_buf[index]) {
  1704. /* release the reordered frame back to stack */
  1705. memcpy(&status, tid_agg_rx->reorder_buf[index]->cb,
  1706. sizeof(status));
  1707. sband = local->hw.wiphy->bands[status.band];
  1708. rate = &sband->bitrates[status.rate_idx];
  1709. pkt_load = ieee80211_rx_load_stats(local,
  1710. tid_agg_rx->reorder_buf[index],
  1711. &status, rate);
  1712. __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
  1713. &status, pkt_load, rate);
  1714. tid_agg_rx->stored_mpdu_num--;
  1715. tid_agg_rx->reorder_buf[index] = NULL;
  1716. tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
  1717. index = seq_sub(tid_agg_rx->head_seq_num,
  1718. tid_agg_rx->ssn) % tid_agg_rx->buf_size;
  1719. }
  1720. return 1;
  1721. }
  1722. static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
  1723. struct sk_buff *skb)
  1724. {
  1725. struct ieee80211_hw *hw = &local->hw;
  1726. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  1727. struct sta_info *sta;
  1728. struct tid_ampdu_rx *tid_agg_rx;
  1729. u16 fc, sc;
  1730. u16 mpdu_seq_num;
  1731. u8 ret = 0, *qc;
  1732. int tid;
  1733. sta = sta_info_get(local, hdr->addr2);
  1734. if (!sta)
  1735. return ret;
  1736. fc = le16_to_cpu(hdr->frame_control);
  1737. /* filter the QoS data rx stream according to
  1738. * STA/TID and check if this STA/TID is on aggregation */
  1739. if (!WLAN_FC_IS_QOS_DATA(fc))
  1740. goto end_reorder;
  1741. qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN;
  1742. tid = qc[0] & QOS_CONTROL_TID_MASK;
  1743. tid_agg_rx = &(sta->ampdu_mlme.tid_rx[tid]);
  1744. if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL)
  1745. goto end_reorder;
  1746. /* null data frames are excluded */
  1747. if (unlikely(fc & IEEE80211_STYPE_NULLFUNC))
  1748. goto end_reorder;
  1749. /* new un-ordered ampdu frame - process it */
  1750. /* reset session timer */
  1751. if (tid_agg_rx->timeout) {
  1752. unsigned long expires =
  1753. jiffies + (tid_agg_rx->timeout / 1000) * HZ;
  1754. mod_timer(&tid_agg_rx->session_timer, expires);
  1755. }
  1756. /* if this mpdu is fragmented - terminate rx aggregation session */
  1757. sc = le16_to_cpu(hdr->seq_ctrl);
  1758. if (sc & IEEE80211_SCTL_FRAG) {
  1759. ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr,
  1760. tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
  1761. ret = 1;
  1762. goto end_reorder;
  1763. }
  1764. /* according to mpdu sequence number deal with reordering buffer */
  1765. mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
  1766. ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
  1767. mpdu_seq_num, 0);
  1768. end_reorder:
  1769. if (sta)
  1770. sta_info_put(sta);
  1771. return ret;
  1772. }
  1773. /*
  1774. * This is the receive path handler. It is called by a low level driver when an
  1775. * 802.11 MPDU is received from the hardware.
  1776. */
  1777. void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
  1778. struct ieee80211_rx_status *status)
  1779. {
  1780. struct ieee80211_local *local = hw_to_local(hw);
  1781. u32 pkt_load;
  1782. struct ieee80211_rate *rate = NULL;
  1783. struct ieee80211_supported_band *sband;
  1784. if (status->band < 0 ||
  1785. status->band > IEEE80211_NUM_BANDS) {
  1786. WARN_ON(1);
  1787. return;
  1788. }
  1789. sband = local->hw.wiphy->bands[status->band];
  1790. if (!sband ||
  1791. status->rate_idx < 0 ||
  1792. status->rate_idx >= sband->n_bitrates) {
  1793. WARN_ON(1);
  1794. return;
  1795. }
  1796. rate = &sband->bitrates[status->rate_idx];
  1797. /*
  1798. * key references and virtual interfaces are protected using RCU
  1799. * and this requires that we are in a read-side RCU section during
  1800. * receive processing
  1801. */
  1802. rcu_read_lock();
  1803. /*
  1804. * Frames with failed FCS/PLCP checksum are not returned,
  1805. * all other frames are returned without radiotap header
  1806. * if it was previously present.
  1807. * Also, frames with less than 16 bytes are dropped.
  1808. */
  1809. skb = ieee80211_rx_monitor(local, skb, status, rate);
  1810. if (!skb) {
  1811. rcu_read_unlock();
  1812. return;
  1813. }
  1814. pkt_load = ieee80211_rx_load_stats(local, skb, status, rate);
  1815. local->channel_use_raw += pkt_load;
  1816. if (!ieee80211_rx_reorder_ampdu(local, skb))
  1817. __ieee80211_rx_handle_packet(hw, skb, status, pkt_load, rate);
  1818. rcu_read_unlock();
  1819. }
  1820. EXPORT_SYMBOL(__ieee80211_rx);
  1821. /* This is a version of the rx handler that can be called from hard irq
  1822. * context. Post the skb on the queue and schedule the tasklet */
  1823. void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
  1824. struct ieee80211_rx_status *status)
  1825. {
  1826. struct ieee80211_local *local = hw_to_local(hw);
  1827. BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
  1828. skb->dev = local->mdev;
  1829. /* copy status into skb->cb for use by tasklet */
  1830. memcpy(skb->cb, status, sizeof(*status));
  1831. skb->pkt_type = IEEE80211_RX_MSG;
  1832. skb_queue_tail(&local->skb_queue, skb);
  1833. tasklet_schedule(&local->tasklet);
  1834. }
  1835. EXPORT_SYMBOL(ieee80211_rx_irqsafe);