mwl8k.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664
  1. /*
  2. * drivers/net/wireless/mwl8k.c
  3. * Driver for Marvell TOPDOG 802.11 Wireless cards
  4. *
  5. * Copyright (C) 2008 Marvell Semiconductor Inc.
  6. *
  7. * This file is licensed under the terms of the GNU General Public
  8. * License version 2. This program is licensed "as is" without any
  9. * warranty of any kind, whether express or implied.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/list.h>
  16. #include <linux/pci.h>
  17. #include <linux/delay.h>
  18. #include <linux/completion.h>
  19. #include <linux/etherdevice.h>
  20. #include <net/mac80211.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/firmware.h>
  23. #include <linux/workqueue.h>
  24. #define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
  25. #define MWL8K_NAME KBUILD_MODNAME
  26. #define MWL8K_VERSION "0.9.1"
  27. MODULE_DESCRIPTION(MWL8K_DESC);
  28. MODULE_VERSION(MWL8K_VERSION);
  29. MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
  30. MODULE_LICENSE("GPL");
  31. static DEFINE_PCI_DEVICE_TABLE(mwl8k_table) = {
  32. { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = 8687, },
  33. { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = 8687, },
  34. { }
  35. };
  36. MODULE_DEVICE_TABLE(pci, mwl8k_table);
  37. /* Register definitions */
  38. #define MWL8K_HIU_GEN_PTR 0x00000c10
  39. #define MWL8K_MODE_STA 0x0000005a
  40. #define MWL8K_MODE_AP 0x000000a5
  41. #define MWL8K_HIU_INT_CODE 0x00000c14
  42. #define MWL8K_FWSTA_READY 0xf0f1f2f4
  43. #define MWL8K_FWAP_READY 0xf1f2f4a5
  44. #define MWL8K_INT_CODE_CMD_FINISHED 0x00000005
  45. #define MWL8K_HIU_SCRATCH 0x00000c40
  46. /* Host->device communications */
  47. #define MWL8K_HIU_H2A_INTERRUPT_EVENTS 0x00000c18
  48. #define MWL8K_HIU_H2A_INTERRUPT_STATUS 0x00000c1c
  49. #define MWL8K_HIU_H2A_INTERRUPT_MASK 0x00000c20
  50. #define MWL8K_HIU_H2A_INTERRUPT_CLEAR_SEL 0x00000c24
  51. #define MWL8K_HIU_H2A_INTERRUPT_STATUS_MASK 0x00000c28
  52. #define MWL8K_H2A_INT_DUMMY (1 << 20)
  53. #define MWL8K_H2A_INT_RESET (1 << 15)
  54. #define MWL8K_H2A_INT_DOORBELL (1 << 1)
  55. #define MWL8K_H2A_INT_PPA_READY (1 << 0)
  56. /* Device->host communications */
  57. #define MWL8K_HIU_A2H_INTERRUPT_EVENTS 0x00000c2c
  58. #define MWL8K_HIU_A2H_INTERRUPT_STATUS 0x00000c30
  59. #define MWL8K_HIU_A2H_INTERRUPT_MASK 0x00000c34
  60. #define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38
  61. #define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c
  62. #define MWL8K_A2H_INT_DUMMY (1 << 20)
  63. #define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11)
  64. #define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10)
  65. #define MWL8K_A2H_INT_RADAR_DETECT (1 << 7)
  66. #define MWL8K_A2H_INT_RADIO_ON (1 << 6)
  67. #define MWL8K_A2H_INT_RADIO_OFF (1 << 5)
  68. #define MWL8K_A2H_INT_MAC_EVENT (1 << 3)
  69. #define MWL8K_A2H_INT_OPC_DONE (1 << 2)
  70. #define MWL8K_A2H_INT_RX_READY (1 << 1)
  71. #define MWL8K_A2H_INT_TX_DONE (1 << 0)
  72. #define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
  73. MWL8K_A2H_INT_CHNL_SWITCHED | \
  74. MWL8K_A2H_INT_QUEUE_EMPTY | \
  75. MWL8K_A2H_INT_RADAR_DETECT | \
  76. MWL8K_A2H_INT_RADIO_ON | \
  77. MWL8K_A2H_INT_RADIO_OFF | \
  78. MWL8K_A2H_INT_MAC_EVENT | \
  79. MWL8K_A2H_INT_OPC_DONE | \
  80. MWL8K_A2H_INT_RX_READY | \
  81. MWL8K_A2H_INT_TX_DONE)
  82. /* WME stream classes */
  83. #define WME_AC_BE 0 /* best effort */
  84. #define WME_AC_BK 1 /* background */
  85. #define WME_AC_VI 2 /* video */
  86. #define WME_AC_VO 3 /* voice */
  87. #define MWL8K_RX_QUEUES 1
  88. #define MWL8K_TX_QUEUES 4
  89. struct mwl8k_rx_queue {
  90. int rx_desc_count;
  91. /* hw receives here */
  92. int rx_head;
  93. /* refill descs here */
  94. int rx_tail;
  95. struct mwl8k_rx_desc *rx_desc_area;
  96. dma_addr_t rx_desc_dma;
  97. struct sk_buff **rx_skb;
  98. };
  99. struct mwl8k_skb {
  100. /*
  101. * The DMA engine requires a modification to the payload.
  102. * If the skbuff is shared/cloned, it needs to be unshared.
  103. * This method is used to ensure the stack always gets back
  104. * the skbuff it sent for transmission.
  105. */
  106. struct sk_buff *clone;
  107. struct sk_buff *skb;
  108. };
  109. struct mwl8k_tx_queue {
  110. /* hw transmits here */
  111. int tx_head;
  112. /* sw appends here */
  113. int tx_tail;
  114. struct ieee80211_tx_queue_stats tx_stats;
  115. struct mwl8k_tx_desc *tx_desc_area;
  116. dma_addr_t tx_desc_dma;
  117. struct mwl8k_skb *tx_skb;
  118. };
  119. /* Pointers to the firmware data and meta information about it. */
  120. struct mwl8k_firmware {
  121. /* Microcode */
  122. struct firmware *ucode;
  123. /* Boot helper code */
  124. struct firmware *helper;
  125. };
  126. struct mwl8k_priv {
  127. void __iomem *regs;
  128. struct ieee80211_hw *hw;
  129. struct pci_dev *pdev;
  130. u8 name[16];
  131. /* firmware access lock */
  132. spinlock_t fw_lock;
  133. /* firmware files and meta data */
  134. struct mwl8k_firmware fw;
  135. u32 part_num;
  136. /* lock held over TX and TX reap */
  137. spinlock_t tx_lock;
  138. struct ieee80211_vif *vif;
  139. struct ieee80211_channel *current_channel;
  140. /* power management status cookie from firmware */
  141. u32 *cookie;
  142. dma_addr_t cookie_dma;
  143. u16 num_mcaddrs;
  144. u8 hw_rev;
  145. __le32 fw_rev;
  146. /*
  147. * Running count of TX packets in flight, to avoid
  148. * iterating over the transmit rings each time.
  149. */
  150. int pending_tx_pkts;
  151. struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
  152. struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
  153. /* PHY parameters */
  154. struct ieee80211_supported_band band;
  155. struct ieee80211_channel channels[14];
  156. struct ieee80211_rate rates[12];
  157. bool radio_on;
  158. bool radio_short_preamble;
  159. bool wmm_enabled;
  160. /* Set if PHY config is in progress */
  161. bool inconfig;
  162. /* XXX need to convert this to handle multiple interfaces */
  163. bool capture_beacon;
  164. u8 capture_bssid[ETH_ALEN];
  165. struct sk_buff *beacon_skb;
  166. /*
  167. * This FJ worker has to be global as it is scheduled from the
  168. * RX handler. At this point we don't know which interface it
  169. * belongs to until the list of bssids waiting to complete join
  170. * is checked.
  171. */
  172. struct work_struct finalize_join_worker;
  173. /* Tasklet to reclaim TX descriptors and buffers after tx */
  174. struct tasklet_struct tx_reclaim_task;
  175. /* Work thread to serialize configuration requests */
  176. struct workqueue_struct *config_wq;
  177. struct completion *hostcmd_wait;
  178. struct completion *tx_wait;
  179. };
  180. /* Per interface specific private data */
  181. struct mwl8k_vif {
  182. /* backpointer to parent config block */
  183. struct mwl8k_priv *priv;
  184. /* BSS config of AP or IBSS from mac80211*/
  185. struct ieee80211_bss_conf bss_info;
  186. /* BSSID of AP or IBSS */
  187. u8 bssid[ETH_ALEN];
  188. u8 mac_addr[ETH_ALEN];
  189. /*
  190. * Subset of supported legacy rates.
  191. * Intersection of AP and STA supported rates.
  192. */
  193. struct ieee80211_rate legacy_rates[12];
  194. /* number of supported legacy rates */
  195. u8 legacy_nrates;
  196. /* Index into station database.Returned by update_sta_db call */
  197. u8 peer_id;
  198. /* Non AMPDU sequence number assigned by driver */
  199. u16 seqno;
  200. };
  201. #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
  202. static const struct ieee80211_channel mwl8k_channels[] = {
  203. { .center_freq = 2412, .hw_value = 1, },
  204. { .center_freq = 2417, .hw_value = 2, },
  205. { .center_freq = 2422, .hw_value = 3, },
  206. { .center_freq = 2427, .hw_value = 4, },
  207. { .center_freq = 2432, .hw_value = 5, },
  208. { .center_freq = 2437, .hw_value = 6, },
  209. { .center_freq = 2442, .hw_value = 7, },
  210. { .center_freq = 2447, .hw_value = 8, },
  211. { .center_freq = 2452, .hw_value = 9, },
  212. { .center_freq = 2457, .hw_value = 10, },
  213. { .center_freq = 2462, .hw_value = 11, },
  214. };
  215. static const struct ieee80211_rate mwl8k_rates[] = {
  216. { .bitrate = 10, .hw_value = 2, },
  217. { .bitrate = 20, .hw_value = 4, },
  218. { .bitrate = 55, .hw_value = 11, },
  219. { .bitrate = 60, .hw_value = 12, },
  220. { .bitrate = 90, .hw_value = 18, },
  221. { .bitrate = 110, .hw_value = 22, },
  222. { .bitrate = 120, .hw_value = 24, },
  223. { .bitrate = 180, .hw_value = 36, },
  224. { .bitrate = 240, .hw_value = 48, },
  225. { .bitrate = 360, .hw_value = 72, },
  226. { .bitrate = 480, .hw_value = 96, },
  227. { .bitrate = 540, .hw_value = 108, },
  228. };
  229. /* Set or get info from Firmware */
  230. #define MWL8K_CMD_SET 0x0001
  231. #define MWL8K_CMD_GET 0x0000
  232. /* Firmware command codes */
  233. #define MWL8K_CMD_CODE_DNLD 0x0001
  234. #define MWL8K_CMD_GET_HW_SPEC 0x0003
  235. #define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
  236. #define MWL8K_CMD_GET_STAT 0x0014
  237. #define MWL8K_CMD_RADIO_CONTROL 0x001c
  238. #define MWL8K_CMD_RF_TX_POWER 0x001e
  239. #define MWL8K_CMD_SET_PRE_SCAN 0x0107
  240. #define MWL8K_CMD_SET_POST_SCAN 0x0108
  241. #define MWL8K_CMD_SET_RF_CHANNEL 0x010a
  242. #define MWL8K_CMD_SET_AID 0x010d
  243. #define MWL8K_CMD_SET_RATE 0x0110
  244. #define MWL8K_CMD_SET_FINALIZE_JOIN 0x0111
  245. #define MWL8K_CMD_RTS_THRESHOLD 0x0113
  246. #define MWL8K_CMD_SET_SLOT 0x0114
  247. #define MWL8K_CMD_SET_EDCA_PARAMS 0x0115
  248. #define MWL8K_CMD_SET_WMM_MODE 0x0123
  249. #define MWL8K_CMD_MIMO_CONFIG 0x0125
  250. #define MWL8K_CMD_USE_FIXED_RATE 0x0126
  251. #define MWL8K_CMD_ENABLE_SNIFFER 0x0150
  252. #define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
  253. #define MWL8K_CMD_UPDATE_STADB 0x1123
  254. static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
  255. {
  256. #define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\
  257. snprintf(buf, bufsize, "%s", #x);\
  258. return buf;\
  259. } while (0)
  260. switch (cmd & ~0x8000) {
  261. MWL8K_CMDNAME(CODE_DNLD);
  262. MWL8K_CMDNAME(GET_HW_SPEC);
  263. MWL8K_CMDNAME(MAC_MULTICAST_ADR);
  264. MWL8K_CMDNAME(GET_STAT);
  265. MWL8K_CMDNAME(RADIO_CONTROL);
  266. MWL8K_CMDNAME(RF_TX_POWER);
  267. MWL8K_CMDNAME(SET_PRE_SCAN);
  268. MWL8K_CMDNAME(SET_POST_SCAN);
  269. MWL8K_CMDNAME(SET_RF_CHANNEL);
  270. MWL8K_CMDNAME(SET_AID);
  271. MWL8K_CMDNAME(SET_RATE);
  272. MWL8K_CMDNAME(SET_FINALIZE_JOIN);
  273. MWL8K_CMDNAME(RTS_THRESHOLD);
  274. MWL8K_CMDNAME(SET_SLOT);
  275. MWL8K_CMDNAME(SET_EDCA_PARAMS);
  276. MWL8K_CMDNAME(SET_WMM_MODE);
  277. MWL8K_CMDNAME(MIMO_CONFIG);
  278. MWL8K_CMDNAME(USE_FIXED_RATE);
  279. MWL8K_CMDNAME(ENABLE_SNIFFER);
  280. MWL8K_CMDNAME(SET_RATEADAPT_MODE);
  281. MWL8K_CMDNAME(UPDATE_STADB);
  282. default:
  283. snprintf(buf, bufsize, "0x%x", cmd);
  284. }
  285. #undef MWL8K_CMDNAME
  286. return buf;
  287. }
  288. /* Hardware and firmware reset */
  289. static void mwl8k_hw_reset(struct mwl8k_priv *priv)
  290. {
  291. iowrite32(MWL8K_H2A_INT_RESET,
  292. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  293. iowrite32(MWL8K_H2A_INT_RESET,
  294. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  295. msleep(20);
  296. }
  297. /* Release fw image */
  298. static void mwl8k_release_fw(struct firmware **fw)
  299. {
  300. if (*fw == NULL)
  301. return;
  302. release_firmware(*fw);
  303. *fw = NULL;
  304. }
  305. static void mwl8k_release_firmware(struct mwl8k_priv *priv)
  306. {
  307. mwl8k_release_fw(&priv->fw.ucode);
  308. mwl8k_release_fw(&priv->fw.helper);
  309. }
  310. /* Request fw image */
  311. static int mwl8k_request_fw(struct mwl8k_priv *priv,
  312. const char *fname, struct firmware **fw)
  313. {
  314. /* release current image */
  315. if (*fw != NULL)
  316. mwl8k_release_fw(fw);
  317. return request_firmware((const struct firmware **)fw,
  318. fname, &priv->pdev->dev);
  319. }
  320. static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num)
  321. {
  322. u8 filename[64];
  323. int rc;
  324. priv->part_num = part_num;
  325. snprintf(filename, sizeof(filename),
  326. "mwl8k/helper_%u.fw", priv->part_num);
  327. rc = mwl8k_request_fw(priv, filename, &priv->fw.helper);
  328. if (rc) {
  329. printk(KERN_ERR
  330. "%s Error requesting helper firmware file %s\n",
  331. pci_name(priv->pdev), filename);
  332. return rc;
  333. }
  334. snprintf(filename, sizeof(filename),
  335. "mwl8k/fmimage_%u.fw", priv->part_num);
  336. rc = mwl8k_request_fw(priv, filename, &priv->fw.ucode);
  337. if (rc) {
  338. printk(KERN_ERR "%s Error requesting firmware file %s\n",
  339. pci_name(priv->pdev), filename);
  340. mwl8k_release_fw(&priv->fw.helper);
  341. return rc;
  342. }
  343. return 0;
  344. }
  345. struct mwl8k_cmd_pkt {
  346. __le16 code;
  347. __le16 length;
  348. __le16 seq_num;
  349. __le16 result;
  350. char payload[0];
  351. } __attribute__((packed));
  352. /*
  353. * Firmware loading.
  354. */
  355. static int
  356. mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
  357. {
  358. void __iomem *regs = priv->regs;
  359. dma_addr_t dma_addr;
  360. int rc;
  361. int loops;
  362. dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE);
  363. if (pci_dma_mapping_error(priv->pdev, dma_addr))
  364. return -ENOMEM;
  365. iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
  366. iowrite32(0, regs + MWL8K_HIU_INT_CODE);
  367. iowrite32(MWL8K_H2A_INT_DOORBELL,
  368. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  369. iowrite32(MWL8K_H2A_INT_DUMMY,
  370. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  371. rc = -ETIMEDOUT;
  372. loops = 1000;
  373. do {
  374. u32 int_code;
  375. int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
  376. if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
  377. iowrite32(0, regs + MWL8K_HIU_INT_CODE);
  378. rc = 0;
  379. break;
  380. }
  381. udelay(1);
  382. } while (--loops);
  383. pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE);
  384. /*
  385. * Clear 'command done' interrupt bit.
  386. */
  387. loops = 1000;
  388. do {
  389. u32 status;
  390. status = ioread32(priv->regs +
  391. MWL8K_HIU_A2H_INTERRUPT_STATUS);
  392. if (status & MWL8K_A2H_INT_OPC_DONE) {
  393. iowrite32(~MWL8K_A2H_INT_OPC_DONE,
  394. priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  395. ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  396. break;
  397. }
  398. udelay(1);
  399. } while (--loops);
  400. return rc;
  401. }
  402. static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
  403. const u8 *data, size_t length)
  404. {
  405. struct mwl8k_cmd_pkt *cmd;
  406. int done;
  407. int rc = 0;
  408. cmd = kmalloc(sizeof(*cmd) + 256, GFP_KERNEL);
  409. if (cmd == NULL)
  410. return -ENOMEM;
  411. cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
  412. cmd->seq_num = 0;
  413. cmd->result = 0;
  414. done = 0;
  415. while (length) {
  416. int block_size = length > 256 ? 256 : length;
  417. memcpy(cmd->payload, data + done, block_size);
  418. cmd->length = cpu_to_le16(block_size);
  419. rc = mwl8k_send_fw_load_cmd(priv, cmd,
  420. sizeof(*cmd) + block_size);
  421. if (rc)
  422. break;
  423. done += block_size;
  424. length -= block_size;
  425. }
  426. if (!rc) {
  427. cmd->length = 0;
  428. rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd));
  429. }
  430. kfree(cmd);
  431. return rc;
  432. }
  433. static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
  434. const u8 *data, size_t length)
  435. {
  436. unsigned char *buffer;
  437. int may_continue, rc = 0;
  438. u32 done, prev_block_size;
  439. buffer = kmalloc(1024, GFP_KERNEL);
  440. if (buffer == NULL)
  441. return -ENOMEM;
  442. done = 0;
  443. prev_block_size = 0;
  444. may_continue = 1000;
  445. while (may_continue > 0) {
  446. u32 block_size;
  447. block_size = ioread32(priv->regs + MWL8K_HIU_SCRATCH);
  448. if (block_size & 1) {
  449. block_size &= ~1;
  450. may_continue--;
  451. } else {
  452. done += prev_block_size;
  453. length -= prev_block_size;
  454. }
  455. if (block_size > 1024 || block_size > length) {
  456. rc = -EOVERFLOW;
  457. break;
  458. }
  459. if (length == 0) {
  460. rc = 0;
  461. break;
  462. }
  463. if (block_size == 0) {
  464. rc = -EPROTO;
  465. may_continue--;
  466. udelay(1);
  467. continue;
  468. }
  469. prev_block_size = block_size;
  470. memcpy(buffer, data + done, block_size);
  471. rc = mwl8k_send_fw_load_cmd(priv, buffer, block_size);
  472. if (rc)
  473. break;
  474. }
  475. if (!rc && length != 0)
  476. rc = -EREMOTEIO;
  477. kfree(buffer);
  478. return rc;
  479. }
  480. static int mwl8k_load_firmware(struct mwl8k_priv *priv)
  481. {
  482. int loops, rc;
  483. const u8 *ucode = priv->fw.ucode->data;
  484. size_t ucode_len = priv->fw.ucode->size;
  485. const u8 *helper = priv->fw.helper->data;
  486. size_t helper_len = priv->fw.helper->size;
  487. if (!memcmp(ucode, "\x01\x00\x00\x00", 4)) {
  488. rc = mwl8k_load_fw_image(priv, helper, helper_len);
  489. if (rc) {
  490. printk(KERN_ERR "%s: unable to load firmware "
  491. "helper image\n", pci_name(priv->pdev));
  492. return rc;
  493. }
  494. msleep(1);
  495. rc = mwl8k_feed_fw_image(priv, ucode, ucode_len);
  496. } else {
  497. rc = mwl8k_load_fw_image(priv, ucode, ucode_len);
  498. }
  499. if (rc) {
  500. printk(KERN_ERR "%s: unable to load firmware data\n",
  501. pci_name(priv->pdev));
  502. return rc;
  503. }
  504. iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
  505. msleep(1);
  506. loops = 200000;
  507. do {
  508. if (ioread32(priv->regs + MWL8K_HIU_INT_CODE)
  509. == MWL8K_FWSTA_READY)
  510. break;
  511. udelay(1);
  512. } while (--loops);
  513. return loops ? 0 : -ETIMEDOUT;
  514. }
  515. /*
  516. * Defines shared between transmission and reception.
  517. */
  518. /* HT control fields for firmware */
  519. struct ewc_ht_info {
  520. __le16 control1;
  521. __le16 control2;
  522. __le16 control3;
  523. } __attribute__((packed));
  524. /* Firmware Station database operations */
  525. #define MWL8K_STA_DB_ADD_ENTRY 0
  526. #define MWL8K_STA_DB_MODIFY_ENTRY 1
  527. #define MWL8K_STA_DB_DEL_ENTRY 2
  528. #define MWL8K_STA_DB_FLUSH 3
  529. /* Peer Entry flags - used to define the type of the peer node */
  530. #define MWL8K_PEER_TYPE_ACCESSPOINT 2
  531. #define MWL8K_IEEE_LEGACY_DATA_RATES 12
  532. #define MWL8K_MCS_BITMAP_SIZE 16
  533. struct peer_capability_info {
  534. /* Peer type - AP vs. STA. */
  535. __u8 peer_type;
  536. /* Basic 802.11 capabilities from assoc resp. */
  537. __le16 basic_caps;
  538. /* Set if peer supports 802.11n high throughput (HT). */
  539. __u8 ht_support;
  540. /* Valid if HT is supported. */
  541. __le16 ht_caps;
  542. __u8 extended_ht_caps;
  543. struct ewc_ht_info ewc_info;
  544. /* Legacy rate table. Intersection of our rates and peer rates. */
  545. __u8 legacy_rates[MWL8K_IEEE_LEGACY_DATA_RATES];
  546. /* HT rate table. Intersection of our rates and peer rates. */
  547. __u8 ht_rates[MWL8K_MCS_BITMAP_SIZE];
  548. __u8 pad[16];
  549. /* If set, interoperability mode, no proprietary extensions. */
  550. __u8 interop;
  551. __u8 pad2;
  552. __u8 station_id;
  553. __le16 amsdu_enabled;
  554. } __attribute__((packed));
  555. /* Inline functions to manipulate QoS field in data descriptor. */
  556. static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
  557. {
  558. u16 val_mask = 1 << 4;
  559. /* End of Service Period Bit 4 */
  560. return qos | val_mask;
  561. }
  562. static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
  563. {
  564. u16 val_mask = 0x3;
  565. u8 shift = 5;
  566. u16 qos_mask = ~(val_mask << shift);
  567. /* Ack Policy Bit 5-6 */
  568. return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
  569. }
  570. static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
  571. {
  572. u16 val_mask = 1 << 7;
  573. /* AMSDU present Bit 7 */
  574. return qos | val_mask;
  575. }
  576. static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
  577. {
  578. u16 val_mask = 0xff;
  579. u8 shift = 8;
  580. u16 qos_mask = ~(val_mask << shift);
  581. /* Queue Length Bits 8-15 */
  582. return (qos & qos_mask) | ((len & val_mask) << shift);
  583. }
  584. /* DMA header used by firmware and hardware. */
  585. struct mwl8k_dma_data {
  586. __le16 fwlen;
  587. struct ieee80211_hdr wh;
  588. } __attribute__((packed));
  589. /* Routines to add/remove DMA header from skb. */
  590. static inline int mwl8k_remove_dma_header(struct sk_buff *skb)
  591. {
  592. struct mwl8k_dma_data *tr = (struct mwl8k_dma_data *)(skb->data);
  593. void *dst, *src = &tr->wh;
  594. __le16 fc = tr->wh.frame_control;
  595. int hdrlen = ieee80211_hdrlen(fc);
  596. u16 space = sizeof(struct mwl8k_dma_data) - hdrlen;
  597. dst = (void *)tr + space;
  598. if (dst != src) {
  599. memmove(dst, src, hdrlen);
  600. skb_pull(skb, space);
  601. }
  602. return 0;
  603. }
  604. static inline struct sk_buff *mwl8k_add_dma_header(struct sk_buff *skb)
  605. {
  606. struct ieee80211_hdr *wh;
  607. u32 hdrlen, pktlen;
  608. struct mwl8k_dma_data *tr;
  609. wh = (struct ieee80211_hdr *)skb->data;
  610. hdrlen = ieee80211_hdrlen(wh->frame_control);
  611. pktlen = skb->len;
  612. /*
  613. * Copy up/down the 802.11 header; the firmware requires
  614. * we present a 2-byte payload length followed by a
  615. * 4-address header (w/o QoS), followed (optionally) by
  616. * any WEP/ExtIV header (but only filled in for CCMP).
  617. */
  618. if (hdrlen != sizeof(struct mwl8k_dma_data))
  619. skb_push(skb, sizeof(struct mwl8k_dma_data) - hdrlen);
  620. tr = (struct mwl8k_dma_data *)skb->data;
  621. if (wh != &tr->wh)
  622. memmove(&tr->wh, wh, hdrlen);
  623. /* Clear addr4 */
  624. memset(tr->wh.addr4, 0, ETH_ALEN);
  625. /*
  626. * Firmware length is the length of the fully formed "802.11
  627. * payload". That is, everything except for the 802.11 header.
  628. * This includes all crypto material including the MIC.
  629. */
  630. tr->fwlen = cpu_to_le16(pktlen - hdrlen);
  631. return skb;
  632. }
  633. /*
  634. * Packet reception.
  635. */
  636. #define MWL8K_RX_CTRL_OWNED_BY_HOST 0x02
  637. struct mwl8k_rx_desc {
  638. __le16 pkt_len;
  639. __u8 link_quality;
  640. __u8 noise_level;
  641. __le32 pkt_phys_addr;
  642. __le32 next_rx_desc_phys_addr;
  643. __le16 qos_control;
  644. __le16 rate_info;
  645. __le32 pad0[4];
  646. __u8 rssi;
  647. __u8 channel;
  648. __le16 pad1;
  649. __u8 rx_ctrl;
  650. __u8 rx_status;
  651. __u8 pad2[2];
  652. } __attribute__((packed));
  653. #define MWL8K_RX_DESCS 256
  654. #define MWL8K_RX_MAXSZ 3800
  655. static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
  656. {
  657. struct mwl8k_priv *priv = hw->priv;
  658. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  659. int size;
  660. int i;
  661. rxq->rx_desc_count = 0;
  662. rxq->rx_head = 0;
  663. rxq->rx_tail = 0;
  664. size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc);
  665. rxq->rx_desc_area =
  666. pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma);
  667. if (rxq->rx_desc_area == NULL) {
  668. printk(KERN_ERR "%s: failed to alloc RX descriptors\n",
  669. priv->name);
  670. return -ENOMEM;
  671. }
  672. memset(rxq->rx_desc_area, 0, size);
  673. rxq->rx_skb = kmalloc(MWL8K_RX_DESCS *
  674. sizeof(*rxq->rx_skb), GFP_KERNEL);
  675. if (rxq->rx_skb == NULL) {
  676. printk(KERN_ERR "%s: failed to alloc RX skbuff list\n",
  677. priv->name);
  678. pci_free_consistent(priv->pdev, size,
  679. rxq->rx_desc_area, rxq->rx_desc_dma);
  680. return -ENOMEM;
  681. }
  682. memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb));
  683. for (i = 0; i < MWL8K_RX_DESCS; i++) {
  684. struct mwl8k_rx_desc *rx_desc;
  685. int nexti;
  686. rx_desc = rxq->rx_desc_area + i;
  687. nexti = (i + 1) % MWL8K_RX_DESCS;
  688. rx_desc->next_rx_desc_phys_addr =
  689. cpu_to_le32(rxq->rx_desc_dma
  690. + nexti * sizeof(*rx_desc));
  691. rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
  692. }
  693. return 0;
  694. }
  695. static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
  696. {
  697. struct mwl8k_priv *priv = hw->priv;
  698. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  699. int refilled;
  700. refilled = 0;
  701. while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) {
  702. struct sk_buff *skb;
  703. int rx;
  704. skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
  705. if (skb == NULL)
  706. break;
  707. rxq->rx_desc_count++;
  708. rx = rxq->rx_tail;
  709. rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS;
  710. rxq->rx_desc_area[rx].pkt_phys_addr =
  711. cpu_to_le32(pci_map_single(priv->pdev, skb->data,
  712. MWL8K_RX_MAXSZ, DMA_FROM_DEVICE));
  713. rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ);
  714. rxq->rx_skb[rx] = skb;
  715. wmb();
  716. rxq->rx_desc_area[rx].rx_ctrl = 0;
  717. refilled++;
  718. }
  719. return refilled;
  720. }
  721. /* Must be called only when the card's reception is completely halted */
  722. static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
  723. {
  724. struct mwl8k_priv *priv = hw->priv;
  725. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  726. int i;
  727. for (i = 0; i < MWL8K_RX_DESCS; i++) {
  728. if (rxq->rx_skb[i] != NULL) {
  729. unsigned long addr;
  730. addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr);
  731. pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ,
  732. PCI_DMA_FROMDEVICE);
  733. kfree_skb(rxq->rx_skb[i]);
  734. rxq->rx_skb[i] = NULL;
  735. }
  736. }
  737. kfree(rxq->rx_skb);
  738. rxq->rx_skb = NULL;
  739. pci_free_consistent(priv->pdev,
  740. MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc),
  741. rxq->rx_desc_area, rxq->rx_desc_dma);
  742. rxq->rx_desc_area = NULL;
  743. }
  744. /*
  745. * Scan a list of BSSIDs to process for finalize join.
  746. * Allows for extension to process multiple BSSIDs.
  747. */
  748. static inline int
  749. mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
  750. {
  751. return priv->capture_beacon &&
  752. ieee80211_is_beacon(wh->frame_control) &&
  753. !compare_ether_addr(wh->addr3, priv->capture_bssid);
  754. }
  755. static inline void mwl8k_save_beacon(struct mwl8k_priv *priv,
  756. struct sk_buff *skb)
  757. {
  758. priv->capture_beacon = false;
  759. memset(priv->capture_bssid, 0, ETH_ALEN);
  760. /*
  761. * Use GFP_ATOMIC as rxq_process is called from
  762. * the primary interrupt handler, memory allocation call
  763. * must not sleep.
  764. */
  765. priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
  766. if (priv->beacon_skb != NULL)
  767. queue_work(priv->config_wq,
  768. &priv->finalize_join_worker);
  769. }
  770. static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
  771. {
  772. struct mwl8k_priv *priv = hw->priv;
  773. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  774. int processed;
  775. processed = 0;
  776. while (rxq->rx_desc_count && limit--) {
  777. struct mwl8k_rx_desc *rx_desc;
  778. struct sk_buff *skb;
  779. struct ieee80211_rx_status status;
  780. unsigned long addr;
  781. struct ieee80211_hdr *wh;
  782. rx_desc = rxq->rx_desc_area + rxq->rx_head;
  783. if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST))
  784. break;
  785. rmb();
  786. skb = rxq->rx_skb[rxq->rx_head];
  787. if (skb == NULL)
  788. break;
  789. rxq->rx_skb[rxq->rx_head] = NULL;
  790. rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS;
  791. rxq->rx_desc_count--;
  792. addr = le32_to_cpu(rx_desc->pkt_phys_addr);
  793. pci_unmap_single(priv->pdev, addr,
  794. MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
  795. skb_put(skb, le16_to_cpu(rx_desc->pkt_len));
  796. if (mwl8k_remove_dma_header(skb)) {
  797. dev_kfree_skb(skb);
  798. continue;
  799. }
  800. wh = (struct ieee80211_hdr *)skb->data;
  801. /*
  802. * Check for pending join operation. save a copy of
  803. * the beacon and schedule a tasklet to send finalize
  804. * join command to the firmware.
  805. */
  806. if (mwl8k_capture_bssid(priv, wh))
  807. mwl8k_save_beacon(priv, skb);
  808. memset(&status, 0, sizeof(status));
  809. status.mactime = 0;
  810. status.signal = -rx_desc->rssi;
  811. status.noise = -rx_desc->noise_level;
  812. status.qual = rx_desc->link_quality;
  813. status.antenna = 1;
  814. status.rate_idx = 1;
  815. status.flag = 0;
  816. status.band = IEEE80211_BAND_2GHZ;
  817. status.freq = ieee80211_channel_to_frequency(rx_desc->channel);
  818. memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
  819. ieee80211_rx_irqsafe(hw, skb);
  820. processed++;
  821. }
  822. return processed;
  823. }
  824. /*
  825. * Packet transmission.
  826. */
  827. /* Transmit queue assignment. */
  828. enum {
  829. MWL8K_WME_AC_BK = 0, /* background access */
  830. MWL8K_WME_AC_BE = 1, /* best effort access */
  831. MWL8K_WME_AC_VI = 2, /* video access */
  832. MWL8K_WME_AC_VO = 3, /* voice access */
  833. };
  834. /* Transmit packet ACK policy */
  835. #define MWL8K_TXD_ACK_POLICY_NORMAL 0
  836. #define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
  837. #define GET_TXQ(_ac) (\
  838. ((_ac) == WME_AC_VO) ? MWL8K_WME_AC_VO : \
  839. ((_ac) == WME_AC_VI) ? MWL8K_WME_AC_VI : \
  840. ((_ac) == WME_AC_BK) ? MWL8K_WME_AC_BK : \
  841. MWL8K_WME_AC_BE)
  842. #define MWL8K_TXD_STATUS_OK 0x00000001
  843. #define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
  844. #define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
  845. #define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
  846. #define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
  847. struct mwl8k_tx_desc {
  848. __le32 status;
  849. __u8 data_rate;
  850. __u8 tx_priority;
  851. __le16 qos_control;
  852. __le32 pkt_phys_addr;
  853. __le16 pkt_len;
  854. __u8 dest_MAC_addr[ETH_ALEN];
  855. __le32 next_tx_desc_phys_addr;
  856. __le32 reserved;
  857. __le16 rate_info;
  858. __u8 peer_id;
  859. __u8 tx_frag_cnt;
  860. } __attribute__((packed));
  861. #define MWL8K_TX_DESCS 128
  862. static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
  863. {
  864. struct mwl8k_priv *priv = hw->priv;
  865. struct mwl8k_tx_queue *txq = priv->txq + index;
  866. int size;
  867. int i;
  868. memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats));
  869. txq->tx_stats.limit = MWL8K_TX_DESCS;
  870. txq->tx_head = 0;
  871. txq->tx_tail = 0;
  872. size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
  873. txq->tx_desc_area =
  874. pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma);
  875. if (txq->tx_desc_area == NULL) {
  876. printk(KERN_ERR "%s: failed to alloc TX descriptors\n",
  877. priv->name);
  878. return -ENOMEM;
  879. }
  880. memset(txq->tx_desc_area, 0, size);
  881. txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb),
  882. GFP_KERNEL);
  883. if (txq->tx_skb == NULL) {
  884. printk(KERN_ERR "%s: failed to alloc TX skbuff list\n",
  885. priv->name);
  886. pci_free_consistent(priv->pdev, size,
  887. txq->tx_desc_area, txq->tx_desc_dma);
  888. return -ENOMEM;
  889. }
  890. memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb));
  891. for (i = 0; i < MWL8K_TX_DESCS; i++) {
  892. struct mwl8k_tx_desc *tx_desc;
  893. int nexti;
  894. tx_desc = txq->tx_desc_area + i;
  895. nexti = (i + 1) % MWL8K_TX_DESCS;
  896. tx_desc->status = 0;
  897. tx_desc->next_tx_desc_phys_addr =
  898. cpu_to_le32(txq->tx_desc_dma +
  899. nexti * sizeof(*tx_desc));
  900. }
  901. return 0;
  902. }
  903. static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
  904. {
  905. iowrite32(MWL8K_H2A_INT_PPA_READY,
  906. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  907. iowrite32(MWL8K_H2A_INT_DUMMY,
  908. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  909. ioread32(priv->regs + MWL8K_HIU_INT_CODE);
  910. }
  911. static inline int mwl8k_txq_busy(struct mwl8k_priv *priv)
  912. {
  913. return priv->pending_tx_pkts;
  914. }
  915. struct mwl8k_txq_info {
  916. u32 fw_owned;
  917. u32 drv_owned;
  918. u32 unused;
  919. u32 len;
  920. u32 head;
  921. u32 tail;
  922. };
  923. static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
  924. struct mwl8k_txq_info txinfo[],
  925. u32 num_queues)
  926. {
  927. int count, desc, status;
  928. struct mwl8k_tx_queue *txq;
  929. struct mwl8k_tx_desc *tx_desc;
  930. int ndescs = 0;
  931. memset(txinfo, 0, num_queues * sizeof(struct mwl8k_txq_info));
  932. spin_lock_bh(&priv->tx_lock);
  933. for (count = 0; count < num_queues; count++) {
  934. txq = priv->txq + count;
  935. txinfo[count].len = txq->tx_stats.len;
  936. txinfo[count].head = txq->tx_head;
  937. txinfo[count].tail = txq->tx_tail;
  938. for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
  939. tx_desc = txq->tx_desc_area + desc;
  940. status = le32_to_cpu(tx_desc->status);
  941. if (status & MWL8K_TXD_STATUS_FW_OWNED)
  942. txinfo[count].fw_owned++;
  943. else
  944. txinfo[count].drv_owned++;
  945. if (tx_desc->pkt_len == 0)
  946. txinfo[count].unused++;
  947. }
  948. }
  949. spin_unlock_bh(&priv->tx_lock);
  950. return ndescs;
  951. }
  952. static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw, u32 delay_ms)
  953. {
  954. struct mwl8k_priv *priv = hw->priv;
  955. DECLARE_COMPLETION_ONSTACK(cmd_wait);
  956. u32 count;
  957. unsigned long timeout;
  958. might_sleep();
  959. if (priv->tx_wait != NULL)
  960. printk(KERN_ERR "WARNING Previous TXWaitEmpty instance\n");
  961. spin_lock_bh(&priv->tx_lock);
  962. count = mwl8k_txq_busy(priv);
  963. if (count) {
  964. priv->tx_wait = &cmd_wait;
  965. if (priv->radio_on)
  966. mwl8k_tx_start(priv);
  967. }
  968. spin_unlock_bh(&priv->tx_lock);
  969. if (count) {
  970. struct mwl8k_txq_info txinfo[4];
  971. int index;
  972. int newcount;
  973. timeout = wait_for_completion_timeout(&cmd_wait,
  974. msecs_to_jiffies(delay_ms));
  975. if (timeout)
  976. return 0;
  977. spin_lock_bh(&priv->tx_lock);
  978. priv->tx_wait = NULL;
  979. newcount = mwl8k_txq_busy(priv);
  980. spin_unlock_bh(&priv->tx_lock);
  981. printk(KERN_ERR "%s(%u) TIMEDOUT:%ums Pend:%u-->%u\n",
  982. __func__, __LINE__, delay_ms, count, newcount);
  983. mwl8k_scan_tx_ring(priv, txinfo, 4);
  984. for (index = 0; index < 4; index++)
  985. printk(KERN_ERR
  986. "TXQ:%u L:%u H:%u T:%u FW:%u DRV:%u U:%u\n",
  987. index,
  988. txinfo[index].len,
  989. txinfo[index].head,
  990. txinfo[index].tail,
  991. txinfo[index].fw_owned,
  992. txinfo[index].drv_owned,
  993. txinfo[index].unused);
  994. return -ETIMEDOUT;
  995. }
  996. return 0;
  997. }
  998. #define MWL8K_TXD_SUCCESS(status) \
  999. ((status) & (MWL8K_TXD_STATUS_OK | \
  1000. MWL8K_TXD_STATUS_OK_RETRY | \
  1001. MWL8K_TXD_STATUS_OK_MORE_RETRY))
  1002. static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
  1003. {
  1004. struct mwl8k_priv *priv = hw->priv;
  1005. struct mwl8k_tx_queue *txq = priv->txq + index;
  1006. int wake = 0;
  1007. while (txq->tx_stats.len > 0) {
  1008. int tx;
  1009. int rc;
  1010. struct mwl8k_tx_desc *tx_desc;
  1011. unsigned long addr;
  1012. int size;
  1013. struct sk_buff *skb;
  1014. struct ieee80211_tx_info *info;
  1015. u32 status;
  1016. rc = 0;
  1017. tx = txq->tx_head;
  1018. tx_desc = txq->tx_desc_area + tx;
  1019. status = le32_to_cpu(tx_desc->status);
  1020. if (status & MWL8K_TXD_STATUS_FW_OWNED) {
  1021. if (!force)
  1022. break;
  1023. tx_desc->status &=
  1024. ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
  1025. }
  1026. txq->tx_head = (tx + 1) % MWL8K_TX_DESCS;
  1027. BUG_ON(txq->tx_stats.len == 0);
  1028. txq->tx_stats.len--;
  1029. priv->pending_tx_pkts--;
  1030. addr = le32_to_cpu(tx_desc->pkt_phys_addr);
  1031. size = le16_to_cpu(tx_desc->pkt_len);
  1032. skb = txq->tx_skb[tx].skb;
  1033. txq->tx_skb[tx].skb = NULL;
  1034. BUG_ON(skb == NULL);
  1035. pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
  1036. rc = mwl8k_remove_dma_header(skb);
  1037. /* Mark descriptor as unused */
  1038. tx_desc->pkt_phys_addr = 0;
  1039. tx_desc->pkt_len = 0;
  1040. if (txq->tx_skb[tx].clone) {
  1041. /* Replace with original skb
  1042. * before returning to stack
  1043. * as buffer has been cloned
  1044. */
  1045. dev_kfree_skb(skb);
  1046. skb = txq->tx_skb[tx].clone;
  1047. txq->tx_skb[tx].clone = NULL;
  1048. }
  1049. if (rc) {
  1050. /* Something has gone wrong here.
  1051. * Failed to remove DMA header.
  1052. * Print error message and drop packet.
  1053. */
  1054. printk(KERN_ERR "%s: Error removing DMA header from "
  1055. "tx skb 0x%p.\n", priv->name, skb);
  1056. dev_kfree_skb(skb);
  1057. continue;
  1058. }
  1059. info = IEEE80211_SKB_CB(skb);
  1060. ieee80211_tx_info_clear_status(info);
  1061. if (MWL8K_TXD_SUCCESS(status))
  1062. info->flags |= IEEE80211_TX_STAT_ACK;
  1063. ieee80211_tx_status_irqsafe(hw, skb);
  1064. wake = !priv->inconfig && priv->radio_on;
  1065. }
  1066. if (wake)
  1067. ieee80211_wake_queue(hw, index);
  1068. }
  1069. /* must be called only when the card's transmit is completely halted */
  1070. static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
  1071. {
  1072. struct mwl8k_priv *priv = hw->priv;
  1073. struct mwl8k_tx_queue *txq = priv->txq + index;
  1074. mwl8k_txq_reclaim(hw, index, 1);
  1075. kfree(txq->tx_skb);
  1076. txq->tx_skb = NULL;
  1077. pci_free_consistent(priv->pdev,
  1078. MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
  1079. txq->tx_desc_area, txq->tx_desc_dma);
  1080. txq->tx_desc_area = NULL;
  1081. }
  1082. static int
  1083. mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
  1084. {
  1085. struct mwl8k_priv *priv = hw->priv;
  1086. struct ieee80211_tx_info *tx_info;
  1087. struct ieee80211_hdr *wh;
  1088. struct mwl8k_tx_queue *txq;
  1089. struct mwl8k_tx_desc *tx;
  1090. struct mwl8k_dma_data *tr;
  1091. struct mwl8k_vif *mwl8k_vif;
  1092. struct sk_buff *org_skb = skb;
  1093. dma_addr_t dma;
  1094. u16 qos = 0;
  1095. bool qosframe = false, ampduframe = false;
  1096. bool mcframe = false, eapolframe = false;
  1097. bool amsduframe = false;
  1098. __le16 fc;
  1099. txq = priv->txq + index;
  1100. tx = txq->tx_desc_area + txq->tx_tail;
  1101. BUG_ON(txq->tx_skb[txq->tx_tail].skb != NULL);
  1102. /*
  1103. * Append HW DMA header to start of packet. Drop packet if
  1104. * there is not enough space or a failure to unshare/unclone
  1105. * the skb.
  1106. */
  1107. skb = mwl8k_add_dma_header(skb);
  1108. if (skb == NULL) {
  1109. printk(KERN_DEBUG "%s: failed to prepend HW DMA "
  1110. "header, dropping TX frame.\n", priv->name);
  1111. dev_kfree_skb(org_skb);
  1112. return NETDEV_TX_OK;
  1113. }
  1114. tx_info = IEEE80211_SKB_CB(skb);
  1115. mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
  1116. tr = (struct mwl8k_dma_data *)skb->data;
  1117. wh = &tr->wh;
  1118. fc = wh->frame_control;
  1119. qosframe = ieee80211_is_data_qos(fc);
  1120. mcframe = is_multicast_ether_addr(wh->addr1);
  1121. ampduframe = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
  1122. if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1123. u16 seqno = mwl8k_vif->seqno;
  1124. wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1125. wh->seq_ctrl |= cpu_to_le16(seqno << 4);
  1126. mwl8k_vif->seqno = seqno++ % 4096;
  1127. }
  1128. if (qosframe)
  1129. qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh)));
  1130. dma = pci_map_single(priv->pdev, skb->data,
  1131. skb->len, PCI_DMA_TODEVICE);
  1132. if (pci_dma_mapping_error(priv->pdev, dma)) {
  1133. printk(KERN_DEBUG "%s: failed to dma map skb, "
  1134. "dropping TX frame.\n", priv->name);
  1135. if (org_skb != NULL)
  1136. dev_kfree_skb(org_skb);
  1137. if (skb != NULL)
  1138. dev_kfree_skb(skb);
  1139. return NETDEV_TX_OK;
  1140. }
  1141. /* Set desc header, cpu bit order. */
  1142. tx->status = 0;
  1143. tx->data_rate = 0;
  1144. tx->tx_priority = index;
  1145. tx->qos_control = 0;
  1146. tx->rate_info = 0;
  1147. tx->peer_id = mwl8k_vif->peer_id;
  1148. amsduframe = !!(qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT);
  1149. /* Setup firmware control bit fields for each frame type. */
  1150. if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
  1151. tx->data_rate = 0;
  1152. qos = mwl8k_qos_setbit_eosp(qos);
  1153. /* Set Queue size to unspecified */
  1154. qos = mwl8k_qos_setbit_qlen(qos, 0xff);
  1155. } else if (ieee80211_is_data(fc)) {
  1156. tx->data_rate = 1;
  1157. if (mcframe)
  1158. tx->status |= MWL8K_TXD_STATUS_MULTICAST_TX;
  1159. /*
  1160. * Tell firmware to not send EAPOL pkts in an
  1161. * aggregate. Verify against mac80211 tx path. If
  1162. * stack turns off AMPDU for an EAPOL frame this
  1163. * check will be removed.
  1164. */
  1165. if (eapolframe) {
  1166. qos = mwl8k_qos_setbit_ack(qos,
  1167. MWL8K_TXD_ACK_POLICY_NORMAL);
  1168. } else {
  1169. /* Send pkt in an aggregate if AMPDU frame. */
  1170. if (ampduframe)
  1171. qos = mwl8k_qos_setbit_ack(qos,
  1172. MWL8K_TXD_ACK_POLICY_BLOCKACK);
  1173. else
  1174. qos = mwl8k_qos_setbit_ack(qos,
  1175. MWL8K_TXD_ACK_POLICY_NORMAL);
  1176. if (amsduframe)
  1177. qos = mwl8k_qos_setbit_amsdu(qos);
  1178. }
  1179. }
  1180. /* Convert to little endian */
  1181. tx->qos_control = cpu_to_le16(qos);
  1182. tx->status = cpu_to_le32(tx->status);
  1183. tx->pkt_phys_addr = cpu_to_le32(dma);
  1184. tx->pkt_len = cpu_to_le16(skb->len);
  1185. txq->tx_skb[txq->tx_tail].skb = skb;
  1186. txq->tx_skb[txq->tx_tail].clone =
  1187. skb == org_skb ? NULL : org_skb;
  1188. spin_lock_bh(&priv->tx_lock);
  1189. tx->status = cpu_to_le32(MWL8K_TXD_STATUS_OK |
  1190. MWL8K_TXD_STATUS_FW_OWNED);
  1191. wmb();
  1192. txq->tx_stats.len++;
  1193. priv->pending_tx_pkts++;
  1194. txq->tx_stats.count++;
  1195. txq->tx_tail++;
  1196. if (txq->tx_tail == MWL8K_TX_DESCS)
  1197. txq->tx_tail = 0;
  1198. if (txq->tx_head == txq->tx_tail)
  1199. ieee80211_stop_queue(hw, index);
  1200. if (priv->inconfig) {
  1201. /*
  1202. * Silently queue packet when we are in the middle of
  1203. * a config cycle. Notify firmware only if we are
  1204. * waiting for TXQs to empty. If a packet is sent
  1205. * before .config() is complete, perhaps it is better
  1206. * to drop the packet, as the channel is being changed
  1207. * and the packet will end up on the wrong channel.
  1208. */
  1209. printk(KERN_ERR "%s(): WARNING TX activity while "
  1210. "in config\n", __func__);
  1211. if (priv->tx_wait != NULL)
  1212. mwl8k_tx_start(priv);
  1213. } else
  1214. mwl8k_tx_start(priv);
  1215. spin_unlock_bh(&priv->tx_lock);
  1216. return NETDEV_TX_OK;
  1217. }
  1218. /*
  1219. * Command processing.
  1220. */
  1221. /* Timeout firmware commands after 2000ms */
  1222. #define MWL8K_CMD_TIMEOUT_MS 2000
  1223. static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
  1224. {
  1225. DECLARE_COMPLETION_ONSTACK(cmd_wait);
  1226. struct mwl8k_priv *priv = hw->priv;
  1227. void __iomem *regs = priv->regs;
  1228. dma_addr_t dma_addr;
  1229. unsigned int dma_size;
  1230. int rc;
  1231. unsigned long timeout = 0;
  1232. u8 buf[32];
  1233. cmd->result = 0xFFFF;
  1234. dma_size = le16_to_cpu(cmd->length);
  1235. dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
  1236. PCI_DMA_BIDIRECTIONAL);
  1237. if (pci_dma_mapping_error(priv->pdev, dma_addr))
  1238. return -ENOMEM;
  1239. if (priv->hostcmd_wait != NULL)
  1240. printk(KERN_ERR "WARNING host command in progress\n");
  1241. spin_lock_irq(&priv->fw_lock);
  1242. priv->hostcmd_wait = &cmd_wait;
  1243. iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
  1244. iowrite32(MWL8K_H2A_INT_DOORBELL,
  1245. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  1246. iowrite32(MWL8K_H2A_INT_DUMMY,
  1247. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  1248. spin_unlock_irq(&priv->fw_lock);
  1249. timeout = wait_for_completion_timeout(&cmd_wait,
  1250. msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
  1251. pci_unmap_single(priv->pdev, dma_addr, dma_size,
  1252. PCI_DMA_BIDIRECTIONAL);
  1253. if (!timeout) {
  1254. spin_lock_irq(&priv->fw_lock);
  1255. priv->hostcmd_wait = NULL;
  1256. spin_unlock_irq(&priv->fw_lock);
  1257. printk(KERN_ERR "%s: Command %s timeout after %u ms\n",
  1258. priv->name,
  1259. mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
  1260. MWL8K_CMD_TIMEOUT_MS);
  1261. rc = -ETIMEDOUT;
  1262. } else {
  1263. rc = cmd->result ? -EINVAL : 0;
  1264. if (rc)
  1265. printk(KERN_ERR "%s: Command %s error 0x%x\n",
  1266. priv->name,
  1267. mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
  1268. cmd->result);
  1269. }
  1270. return rc;
  1271. }
  1272. /*
  1273. * GET_HW_SPEC.
  1274. */
  1275. struct mwl8k_cmd_get_hw_spec {
  1276. struct mwl8k_cmd_pkt header;
  1277. __u8 hw_rev;
  1278. __u8 host_interface;
  1279. __le16 num_mcaddrs;
  1280. __u8 perm_addr[ETH_ALEN];
  1281. __le16 region_code;
  1282. __le32 fw_rev;
  1283. __le32 ps_cookie;
  1284. __le32 caps;
  1285. __u8 mcs_bitmap[16];
  1286. __le32 rx_queue_ptr;
  1287. __le32 num_tx_queues;
  1288. __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
  1289. __le32 caps2;
  1290. __le32 num_tx_desc_per_queue;
  1291. __le32 total_rx_desc;
  1292. } __attribute__((packed));
  1293. static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
  1294. {
  1295. struct mwl8k_priv *priv = hw->priv;
  1296. struct mwl8k_cmd_get_hw_spec *cmd;
  1297. int rc;
  1298. int i;
  1299. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1300. if (cmd == NULL)
  1301. return -ENOMEM;
  1302. cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
  1303. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1304. memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
  1305. cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
  1306. cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma);
  1307. cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
  1308. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  1309. cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma);
  1310. cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
  1311. cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS);
  1312. rc = mwl8k_post_cmd(hw, &cmd->header);
  1313. if (!rc) {
  1314. SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
  1315. priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
  1316. priv->fw_rev = le32_to_cpu(cmd->fw_rev);
  1317. priv->hw_rev = cmd->hw_rev;
  1318. }
  1319. kfree(cmd);
  1320. return rc;
  1321. }
  1322. /*
  1323. * CMD_MAC_MULTICAST_ADR.
  1324. */
  1325. struct mwl8k_cmd_mac_multicast_adr {
  1326. struct mwl8k_cmd_pkt header;
  1327. __le16 action;
  1328. __le16 numaddr;
  1329. __u8 addr[0][ETH_ALEN];
  1330. };
  1331. #define MWL8K_ENABLE_RX_MULTICAST 0x000F
  1332. static struct mwl8k_cmd_pkt *
  1333. __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
  1334. int mc_count, struct dev_addr_list *mclist)
  1335. {
  1336. struct mwl8k_priv *priv = hw->priv;
  1337. struct mwl8k_cmd_mac_multicast_adr *cmd;
  1338. int size;
  1339. int i;
  1340. if (mc_count > priv->num_mcaddrs)
  1341. mc_count = priv->num_mcaddrs;
  1342. size = sizeof(*cmd) + mc_count * ETH_ALEN;
  1343. cmd = kzalloc(size, GFP_ATOMIC);
  1344. if (cmd == NULL)
  1345. return NULL;
  1346. cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR);
  1347. cmd->header.length = cpu_to_le16(size);
  1348. cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
  1349. cmd->numaddr = cpu_to_le16(mc_count);
  1350. for (i = 0; i < mc_count && mclist; i++) {
  1351. if (mclist->da_addrlen != ETH_ALEN) {
  1352. kfree(cmd);
  1353. return NULL;
  1354. }
  1355. memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
  1356. mclist = mclist->next;
  1357. }
  1358. return &cmd->header;
  1359. }
  1360. /*
  1361. * CMD_802_11_GET_STAT.
  1362. */
  1363. struct mwl8k_cmd_802_11_get_stat {
  1364. struct mwl8k_cmd_pkt header;
  1365. __le16 action;
  1366. __le32 stats[64];
  1367. } __attribute__((packed));
  1368. #define MWL8K_STAT_ACK_FAILURE 9
  1369. #define MWL8K_STAT_RTS_FAILURE 12
  1370. #define MWL8K_STAT_FCS_ERROR 24
  1371. #define MWL8K_STAT_RTS_SUCCESS 11
  1372. static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
  1373. struct ieee80211_low_level_stats *stats)
  1374. {
  1375. struct mwl8k_cmd_802_11_get_stat *cmd;
  1376. int rc;
  1377. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1378. if (cmd == NULL)
  1379. return -ENOMEM;
  1380. cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT);
  1381. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1382. cmd->action = cpu_to_le16(MWL8K_CMD_GET);
  1383. rc = mwl8k_post_cmd(hw, &cmd->header);
  1384. if (!rc) {
  1385. stats->dot11ACKFailureCount =
  1386. le32_to_cpu(cmd->stats[MWL8K_STAT_ACK_FAILURE]);
  1387. stats->dot11RTSFailureCount =
  1388. le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_FAILURE]);
  1389. stats->dot11FCSErrorCount =
  1390. le32_to_cpu(cmd->stats[MWL8K_STAT_FCS_ERROR]);
  1391. stats->dot11RTSSuccessCount =
  1392. le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_SUCCESS]);
  1393. }
  1394. kfree(cmd);
  1395. return rc;
  1396. }
  1397. /*
  1398. * CMD_802_11_RADIO_CONTROL.
  1399. */
  1400. struct mwl8k_cmd_802_11_radio_control {
  1401. struct mwl8k_cmd_pkt header;
  1402. __le16 action;
  1403. __le16 control;
  1404. __le16 radio_on;
  1405. } __attribute__((packed));
  1406. static int
  1407. mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
  1408. {
  1409. struct mwl8k_priv *priv = hw->priv;
  1410. struct mwl8k_cmd_802_11_radio_control *cmd;
  1411. int rc;
  1412. if (enable == priv->radio_on && !force)
  1413. return 0;
  1414. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1415. if (cmd == NULL)
  1416. return -ENOMEM;
  1417. cmd->header.code = cpu_to_le16(MWL8K_CMD_RADIO_CONTROL);
  1418. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1419. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1420. cmd->control = cpu_to_le16(priv->radio_short_preamble ? 3 : 1);
  1421. cmd->radio_on = cpu_to_le16(enable ? 0x0001 : 0x0000);
  1422. rc = mwl8k_post_cmd(hw, &cmd->header);
  1423. kfree(cmd);
  1424. if (!rc)
  1425. priv->radio_on = enable;
  1426. return rc;
  1427. }
  1428. static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw)
  1429. {
  1430. return mwl8k_cmd_802_11_radio_control(hw, 0, 0);
  1431. }
  1432. static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw)
  1433. {
  1434. return mwl8k_cmd_802_11_radio_control(hw, 1, 0);
  1435. }
  1436. static int
  1437. mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
  1438. {
  1439. struct mwl8k_priv *priv;
  1440. if (hw == NULL || hw->priv == NULL)
  1441. return -EINVAL;
  1442. priv = hw->priv;
  1443. priv->radio_short_preamble = short_preamble;
  1444. return mwl8k_cmd_802_11_radio_control(hw, 1, 1);
  1445. }
  1446. /*
  1447. * CMD_802_11_RF_TX_POWER.
  1448. */
  1449. #define MWL8K_TX_POWER_LEVEL_TOTAL 8
  1450. struct mwl8k_cmd_802_11_rf_tx_power {
  1451. struct mwl8k_cmd_pkt header;
  1452. __le16 action;
  1453. __le16 support_level;
  1454. __le16 current_level;
  1455. __le16 reserved;
  1456. __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
  1457. } __attribute__((packed));
  1458. static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
  1459. {
  1460. struct mwl8k_cmd_802_11_rf_tx_power *cmd;
  1461. int rc;
  1462. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1463. if (cmd == NULL)
  1464. return -ENOMEM;
  1465. cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_TX_POWER);
  1466. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1467. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1468. cmd->support_level = cpu_to_le16(dBm);
  1469. rc = mwl8k_post_cmd(hw, &cmd->header);
  1470. kfree(cmd);
  1471. return rc;
  1472. }
  1473. /*
  1474. * CMD_SET_PRE_SCAN.
  1475. */
  1476. struct mwl8k_cmd_set_pre_scan {
  1477. struct mwl8k_cmd_pkt header;
  1478. } __attribute__((packed));
  1479. static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
  1480. {
  1481. struct mwl8k_cmd_set_pre_scan *cmd;
  1482. int rc;
  1483. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1484. if (cmd == NULL)
  1485. return -ENOMEM;
  1486. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_PRE_SCAN);
  1487. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1488. rc = mwl8k_post_cmd(hw, &cmd->header);
  1489. kfree(cmd);
  1490. return rc;
  1491. }
  1492. /*
  1493. * CMD_SET_POST_SCAN.
  1494. */
  1495. struct mwl8k_cmd_set_post_scan {
  1496. struct mwl8k_cmd_pkt header;
  1497. __le32 isibss;
  1498. __u8 bssid[ETH_ALEN];
  1499. } __attribute__((packed));
  1500. static int
  1501. mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
  1502. {
  1503. struct mwl8k_cmd_set_post_scan *cmd;
  1504. int rc;
  1505. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1506. if (cmd == NULL)
  1507. return -ENOMEM;
  1508. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_POST_SCAN);
  1509. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1510. cmd->isibss = 0;
  1511. memcpy(cmd->bssid, mac, ETH_ALEN);
  1512. rc = mwl8k_post_cmd(hw, &cmd->header);
  1513. kfree(cmd);
  1514. return rc;
  1515. }
  1516. /*
  1517. * CMD_SET_RF_CHANNEL.
  1518. */
  1519. struct mwl8k_cmd_set_rf_channel {
  1520. struct mwl8k_cmd_pkt header;
  1521. __le16 action;
  1522. __u8 current_channel;
  1523. __le32 channel_flags;
  1524. } __attribute__((packed));
  1525. static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
  1526. struct ieee80211_channel *channel)
  1527. {
  1528. struct mwl8k_cmd_set_rf_channel *cmd;
  1529. int rc;
  1530. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1531. if (cmd == NULL)
  1532. return -ENOMEM;
  1533. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RF_CHANNEL);
  1534. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1535. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1536. cmd->current_channel = channel->hw_value;
  1537. if (channel->band == IEEE80211_BAND_2GHZ)
  1538. cmd->channel_flags = cpu_to_le32(0x00000081);
  1539. else
  1540. cmd->channel_flags = cpu_to_le32(0x00000000);
  1541. rc = mwl8k_post_cmd(hw, &cmd->header);
  1542. kfree(cmd);
  1543. return rc;
  1544. }
  1545. /*
  1546. * CMD_SET_SLOT.
  1547. */
  1548. struct mwl8k_cmd_set_slot {
  1549. struct mwl8k_cmd_pkt header;
  1550. __le16 action;
  1551. __u8 short_slot;
  1552. } __attribute__((packed));
  1553. static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
  1554. {
  1555. struct mwl8k_cmd_set_slot *cmd;
  1556. int rc;
  1557. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1558. if (cmd == NULL)
  1559. return -ENOMEM;
  1560. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
  1561. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1562. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1563. cmd->short_slot = short_slot_time;
  1564. rc = mwl8k_post_cmd(hw, &cmd->header);
  1565. kfree(cmd);
  1566. return rc;
  1567. }
  1568. /*
  1569. * CMD_MIMO_CONFIG.
  1570. */
  1571. struct mwl8k_cmd_mimo_config {
  1572. struct mwl8k_cmd_pkt header;
  1573. __le32 action;
  1574. __u8 rx_antenna_map;
  1575. __u8 tx_antenna_map;
  1576. } __attribute__((packed));
  1577. static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
  1578. {
  1579. struct mwl8k_cmd_mimo_config *cmd;
  1580. int rc;
  1581. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1582. if (cmd == NULL)
  1583. return -ENOMEM;
  1584. cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
  1585. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1586. cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
  1587. cmd->rx_antenna_map = rx;
  1588. cmd->tx_antenna_map = tx;
  1589. rc = mwl8k_post_cmd(hw, &cmd->header);
  1590. kfree(cmd);
  1591. return rc;
  1592. }
  1593. /*
  1594. * CMD_ENABLE_SNIFFER.
  1595. */
  1596. struct mwl8k_cmd_enable_sniffer {
  1597. struct mwl8k_cmd_pkt header;
  1598. __le32 action;
  1599. } __attribute__((packed));
  1600. static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
  1601. {
  1602. struct mwl8k_cmd_enable_sniffer *cmd;
  1603. int rc;
  1604. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1605. if (cmd == NULL)
  1606. return -ENOMEM;
  1607. cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
  1608. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1609. cmd->action = cpu_to_le32(!!enable);
  1610. rc = mwl8k_post_cmd(hw, &cmd->header);
  1611. kfree(cmd);
  1612. return rc;
  1613. }
  1614. /*
  1615. * CMD_SET_RATEADAPT_MODE.
  1616. */
  1617. struct mwl8k_cmd_set_rate_adapt_mode {
  1618. struct mwl8k_cmd_pkt header;
  1619. __le16 action;
  1620. __le16 mode;
  1621. } __attribute__((packed));
  1622. static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
  1623. {
  1624. struct mwl8k_cmd_set_rate_adapt_mode *cmd;
  1625. int rc;
  1626. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1627. if (cmd == NULL)
  1628. return -ENOMEM;
  1629. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
  1630. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1631. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1632. cmd->mode = cpu_to_le16(mode);
  1633. rc = mwl8k_post_cmd(hw, &cmd->header);
  1634. kfree(cmd);
  1635. return rc;
  1636. }
  1637. /*
  1638. * CMD_SET_WMM_MODE.
  1639. */
  1640. struct mwl8k_cmd_set_wmm {
  1641. struct mwl8k_cmd_pkt header;
  1642. __le16 action;
  1643. } __attribute__((packed));
  1644. static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable)
  1645. {
  1646. struct mwl8k_priv *priv = hw->priv;
  1647. struct mwl8k_cmd_set_wmm *cmd;
  1648. int rc;
  1649. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1650. if (cmd == NULL)
  1651. return -ENOMEM;
  1652. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
  1653. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1654. cmd->action = cpu_to_le16(!!enable);
  1655. rc = mwl8k_post_cmd(hw, &cmd->header);
  1656. kfree(cmd);
  1657. if (!rc)
  1658. priv->wmm_enabled = enable;
  1659. return rc;
  1660. }
  1661. /*
  1662. * CMD_SET_RTS_THRESHOLD.
  1663. */
  1664. struct mwl8k_cmd_rts_threshold {
  1665. struct mwl8k_cmd_pkt header;
  1666. __le16 action;
  1667. __le16 threshold;
  1668. } __attribute__((packed));
  1669. static int mwl8k_rts_threshold(struct ieee80211_hw *hw,
  1670. u16 action, u16 *threshold)
  1671. {
  1672. struct mwl8k_cmd_rts_threshold *cmd;
  1673. int rc;
  1674. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1675. if (cmd == NULL)
  1676. return -ENOMEM;
  1677. cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
  1678. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1679. cmd->action = cpu_to_le16(action);
  1680. cmd->threshold = cpu_to_le16(*threshold);
  1681. rc = mwl8k_post_cmd(hw, &cmd->header);
  1682. kfree(cmd);
  1683. return rc;
  1684. }
  1685. /*
  1686. * CMD_SET_EDCA_PARAMS.
  1687. */
  1688. struct mwl8k_cmd_set_edca_params {
  1689. struct mwl8k_cmd_pkt header;
  1690. /* See MWL8K_SET_EDCA_XXX below */
  1691. __le16 action;
  1692. /* TX opportunity in units of 32 us */
  1693. __le16 txop;
  1694. /* Log exponent of max contention period: 0...15*/
  1695. __u8 log_cw_max;
  1696. /* Log exponent of min contention period: 0...15 */
  1697. __u8 log_cw_min;
  1698. /* Adaptive interframe spacing in units of 32us */
  1699. __u8 aifs;
  1700. /* TX queue to configure */
  1701. __u8 txq;
  1702. } __attribute__((packed));
  1703. #define MWL8K_SET_EDCA_CW 0x01
  1704. #define MWL8K_SET_EDCA_TXOP 0x02
  1705. #define MWL8K_SET_EDCA_AIFS 0x04
  1706. #define MWL8K_SET_EDCA_ALL (MWL8K_SET_EDCA_CW | \
  1707. MWL8K_SET_EDCA_TXOP | \
  1708. MWL8K_SET_EDCA_AIFS)
  1709. static int
  1710. mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
  1711. __u16 cw_min, __u16 cw_max,
  1712. __u8 aifs, __u16 txop)
  1713. {
  1714. struct mwl8k_cmd_set_edca_params *cmd;
  1715. int rc;
  1716. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1717. if (cmd == NULL)
  1718. return -ENOMEM;
  1719. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
  1720. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1721. cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
  1722. cmd->txop = cpu_to_le16(txop);
  1723. cmd->log_cw_max = (u8)ilog2(cw_max + 1);
  1724. cmd->log_cw_min = (u8)ilog2(cw_min + 1);
  1725. cmd->aifs = aifs;
  1726. cmd->txq = qnum;
  1727. rc = mwl8k_post_cmd(hw, &cmd->header);
  1728. kfree(cmd);
  1729. return rc;
  1730. }
  1731. /*
  1732. * CMD_FINALIZE_JOIN.
  1733. */
  1734. /* FJ beacon buffer size is compiled into the firmware. */
  1735. #define MWL8K_FJ_BEACON_MAXLEN 128
  1736. struct mwl8k_cmd_finalize_join {
  1737. struct mwl8k_cmd_pkt header;
  1738. __le32 sleep_interval; /* Number of beacon periods to sleep */
  1739. __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
  1740. } __attribute__((packed));
  1741. static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
  1742. __u16 framelen, __u16 dtim)
  1743. {
  1744. struct mwl8k_cmd_finalize_join *cmd;
  1745. struct ieee80211_mgmt *payload = frame;
  1746. u16 hdrlen;
  1747. u32 payload_len;
  1748. int rc;
  1749. if (frame == NULL)
  1750. return -EINVAL;
  1751. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1752. if (cmd == NULL)
  1753. return -ENOMEM;
  1754. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
  1755. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1756. cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
  1757. hdrlen = ieee80211_hdrlen(payload->frame_control);
  1758. payload_len = framelen > hdrlen ? framelen - hdrlen : 0;
  1759. /* XXX TBD Might just have to abort and return an error */
  1760. if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
  1761. printk(KERN_ERR "%s(): WARNING: Incomplete beacon "
  1762. "sent to firmware. Sz=%u MAX=%u\n", __func__,
  1763. payload_len, MWL8K_FJ_BEACON_MAXLEN);
  1764. if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
  1765. payload_len = MWL8K_FJ_BEACON_MAXLEN;
  1766. if (payload && payload_len)
  1767. memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
  1768. rc = mwl8k_post_cmd(hw, &cmd->header);
  1769. kfree(cmd);
  1770. return rc;
  1771. }
  1772. /*
  1773. * CMD_UPDATE_STADB.
  1774. */
  1775. struct mwl8k_cmd_update_sta_db {
  1776. struct mwl8k_cmd_pkt header;
  1777. /* See STADB_ACTION_TYPE */
  1778. __le32 action;
  1779. /* Peer MAC address */
  1780. __u8 peer_addr[ETH_ALEN];
  1781. __le32 reserved;
  1782. /* Peer info - valid during add/update. */
  1783. struct peer_capability_info peer_info;
  1784. } __attribute__((packed));
  1785. static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
  1786. struct ieee80211_vif *vif, __u32 action)
  1787. {
  1788. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  1789. struct ieee80211_bss_conf *info = &mv_vif->bss_info;
  1790. struct mwl8k_cmd_update_sta_db *cmd;
  1791. struct peer_capability_info *peer_info;
  1792. struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
  1793. int rc;
  1794. __u8 count, *rates;
  1795. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1796. if (cmd == NULL)
  1797. return -ENOMEM;
  1798. cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
  1799. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1800. cmd->action = cpu_to_le32(action);
  1801. peer_info = &cmd->peer_info;
  1802. memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
  1803. switch (action) {
  1804. case MWL8K_STA_DB_ADD_ENTRY:
  1805. case MWL8K_STA_DB_MODIFY_ENTRY:
  1806. /* Build peer_info block */
  1807. peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
  1808. peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
  1809. peer_info->interop = 1;
  1810. peer_info->amsdu_enabled = 0;
  1811. rates = peer_info->legacy_rates;
  1812. for (count = 0; count < mv_vif->legacy_nrates; count++)
  1813. rates[count] = bitrates[count].hw_value;
  1814. rc = mwl8k_post_cmd(hw, &cmd->header);
  1815. if (rc == 0)
  1816. mv_vif->peer_id = peer_info->station_id;
  1817. break;
  1818. case MWL8K_STA_DB_DEL_ENTRY:
  1819. case MWL8K_STA_DB_FLUSH:
  1820. default:
  1821. rc = mwl8k_post_cmd(hw, &cmd->header);
  1822. if (rc == 0)
  1823. mv_vif->peer_id = 0;
  1824. break;
  1825. }
  1826. kfree(cmd);
  1827. return rc;
  1828. }
  1829. /*
  1830. * CMD_SET_AID.
  1831. */
  1832. #define MWL8K_RATE_INDEX_MAX_ARRAY 14
  1833. #define MWL8K_FRAME_PROT_DISABLED 0x00
  1834. #define MWL8K_FRAME_PROT_11G 0x07
  1835. #define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
  1836. #define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
  1837. struct mwl8k_cmd_update_set_aid {
  1838. struct mwl8k_cmd_pkt header;
  1839. __le16 aid;
  1840. /* AP's MAC address (BSSID) */
  1841. __u8 bssid[ETH_ALEN];
  1842. __le16 protection_mode;
  1843. __u8 supp_rates[MWL8K_RATE_INDEX_MAX_ARRAY];
  1844. } __attribute__((packed));
  1845. static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
  1846. struct ieee80211_vif *vif)
  1847. {
  1848. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  1849. struct ieee80211_bss_conf *info = &mv_vif->bss_info;
  1850. struct mwl8k_cmd_update_set_aid *cmd;
  1851. struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
  1852. int count;
  1853. u16 prot_mode;
  1854. int rc;
  1855. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1856. if (cmd == NULL)
  1857. return -ENOMEM;
  1858. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
  1859. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1860. cmd->aid = cpu_to_le16(info->aid);
  1861. memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
  1862. if (info->use_cts_prot) {
  1863. prot_mode = MWL8K_FRAME_PROT_11G;
  1864. } else {
  1865. switch (info->ht_operation_mode &
  1866. IEEE80211_HT_OP_MODE_PROTECTION) {
  1867. case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
  1868. prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
  1869. break;
  1870. case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
  1871. prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
  1872. break;
  1873. default:
  1874. prot_mode = MWL8K_FRAME_PROT_DISABLED;
  1875. break;
  1876. }
  1877. }
  1878. cmd->protection_mode = cpu_to_le16(prot_mode);
  1879. for (count = 0; count < mv_vif->legacy_nrates; count++)
  1880. cmd->supp_rates[count] = bitrates[count].hw_value;
  1881. rc = mwl8k_post_cmd(hw, &cmd->header);
  1882. kfree(cmd);
  1883. return rc;
  1884. }
  1885. /*
  1886. * CMD_SET_RATE.
  1887. */
  1888. struct mwl8k_cmd_update_rateset {
  1889. struct mwl8k_cmd_pkt header;
  1890. __u8 legacy_rates[MWL8K_RATE_INDEX_MAX_ARRAY];
  1891. /* Bitmap for supported MCS codes. */
  1892. __u8 mcs_set[MWL8K_IEEE_LEGACY_DATA_RATES];
  1893. __u8 reserved[MWL8K_IEEE_LEGACY_DATA_RATES];
  1894. } __attribute__((packed));
  1895. static int mwl8k_update_rateset(struct ieee80211_hw *hw,
  1896. struct ieee80211_vif *vif)
  1897. {
  1898. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  1899. struct mwl8k_cmd_update_rateset *cmd;
  1900. struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
  1901. int count;
  1902. int rc;
  1903. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1904. if (cmd == NULL)
  1905. return -ENOMEM;
  1906. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
  1907. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1908. for (count = 0; count < mv_vif->legacy_nrates; count++)
  1909. cmd->legacy_rates[count] = bitrates[count].hw_value;
  1910. rc = mwl8k_post_cmd(hw, &cmd->header);
  1911. kfree(cmd);
  1912. return rc;
  1913. }
  1914. /*
  1915. * CMD_USE_FIXED_RATE.
  1916. */
  1917. #define MWL8K_RATE_TABLE_SIZE 8
  1918. #define MWL8K_UCAST_RATE 0
  1919. #define MWL8K_USE_AUTO_RATE 0x0002
  1920. struct mwl8k_rate_entry {
  1921. /* Set to 1 if HT rate, 0 if legacy. */
  1922. __le32 is_ht_rate;
  1923. /* Set to 1 to use retry_count field. */
  1924. __le32 enable_retry;
  1925. /* Specified legacy rate or MCS. */
  1926. __le32 rate;
  1927. /* Number of allowed retries. */
  1928. __le32 retry_count;
  1929. } __attribute__((packed));
  1930. struct mwl8k_rate_table {
  1931. /* 1 to allow specified rate and below */
  1932. __le32 allow_rate_drop;
  1933. __le32 num_rates;
  1934. struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
  1935. } __attribute__((packed));
  1936. struct mwl8k_cmd_use_fixed_rate {
  1937. struct mwl8k_cmd_pkt header;
  1938. __le32 action;
  1939. struct mwl8k_rate_table rate_table;
  1940. /* Unicast, Broadcast or Multicast */
  1941. __le32 rate_type;
  1942. __le32 reserved1;
  1943. __le32 reserved2;
  1944. } __attribute__((packed));
  1945. static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
  1946. u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
  1947. {
  1948. struct mwl8k_cmd_use_fixed_rate *cmd;
  1949. int count;
  1950. int rc;
  1951. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1952. if (cmd == NULL)
  1953. return -ENOMEM;
  1954. cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
  1955. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1956. cmd->action = cpu_to_le32(action);
  1957. cmd->rate_type = cpu_to_le32(rate_type);
  1958. if (rate_table != NULL) {
  1959. /* Copy over each field manually so
  1960. * that bitflipping can be done
  1961. */
  1962. cmd->rate_table.allow_rate_drop =
  1963. cpu_to_le32(rate_table->allow_rate_drop);
  1964. cmd->rate_table.num_rates =
  1965. cpu_to_le32(rate_table->num_rates);
  1966. for (count = 0; count < rate_table->num_rates; count++) {
  1967. struct mwl8k_rate_entry *dst =
  1968. &cmd->rate_table.rate_entry[count];
  1969. struct mwl8k_rate_entry *src =
  1970. &rate_table->rate_entry[count];
  1971. dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
  1972. dst->enable_retry = cpu_to_le32(src->enable_retry);
  1973. dst->rate = cpu_to_le32(src->rate);
  1974. dst->retry_count = cpu_to_le32(src->retry_count);
  1975. }
  1976. }
  1977. rc = mwl8k_post_cmd(hw, &cmd->header);
  1978. kfree(cmd);
  1979. return rc;
  1980. }
  1981. /*
  1982. * Interrupt handling.
  1983. */
  1984. static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
  1985. {
  1986. struct ieee80211_hw *hw = dev_id;
  1987. struct mwl8k_priv *priv = hw->priv;
  1988. u32 status;
  1989. status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  1990. iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  1991. if (!status)
  1992. return IRQ_NONE;
  1993. if (status & MWL8K_A2H_INT_TX_DONE)
  1994. tasklet_schedule(&priv->tx_reclaim_task);
  1995. if (status & MWL8K_A2H_INT_RX_READY) {
  1996. while (rxq_process(hw, 0, 1))
  1997. rxq_refill(hw, 0, 1);
  1998. }
  1999. if (status & MWL8K_A2H_INT_OPC_DONE) {
  2000. if (priv->hostcmd_wait != NULL) {
  2001. complete(priv->hostcmd_wait);
  2002. priv->hostcmd_wait = NULL;
  2003. }
  2004. }
  2005. if (status & MWL8K_A2H_INT_QUEUE_EMPTY) {
  2006. if (!priv->inconfig &&
  2007. priv->radio_on &&
  2008. mwl8k_txq_busy(priv))
  2009. mwl8k_tx_start(priv);
  2010. }
  2011. return IRQ_HANDLED;
  2012. }
  2013. /*
  2014. * Core driver operations.
  2015. */
  2016. static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
  2017. {
  2018. struct mwl8k_priv *priv = hw->priv;
  2019. int index = skb_get_queue_mapping(skb);
  2020. int rc;
  2021. if (priv->current_channel == NULL) {
  2022. printk(KERN_DEBUG "%s: dropped TX frame since radio "
  2023. "disabled\n", priv->name);
  2024. dev_kfree_skb(skb);
  2025. return NETDEV_TX_OK;
  2026. }
  2027. rc = mwl8k_txq_xmit(hw, index, skb);
  2028. return rc;
  2029. }
  2030. struct mwl8k_work_struct {
  2031. /* Initialized by mwl8k_queue_work(). */
  2032. struct work_struct wt;
  2033. /* Required field passed in to mwl8k_queue_work(). */
  2034. struct ieee80211_hw *hw;
  2035. /* Required field passed in to mwl8k_queue_work(). */
  2036. int (*wfunc)(struct work_struct *w);
  2037. /* Initialized by mwl8k_queue_work(). */
  2038. struct completion *cmd_wait;
  2039. /* Result code. */
  2040. int rc;
  2041. /*
  2042. * Optional field. Refer to explanation of MWL8K_WQ_XXX_XXX
  2043. * flags for explanation. Defaults to MWL8K_WQ_DEFAULT_OPTIONS.
  2044. */
  2045. u32 options;
  2046. /* Optional field. Defaults to MWL8K_CONFIG_TIMEOUT_MS. */
  2047. unsigned long timeout_ms;
  2048. /* Optional field. Defaults to MWL8K_WQ_TXWAIT_ATTEMPTS. */
  2049. u32 txwait_attempts;
  2050. /* Optional field. Defaults to MWL8K_TXWAIT_MS. */
  2051. u32 tx_timeout_ms;
  2052. u32 step;
  2053. };
  2054. /* Flags controlling behavior of config queue requests */
  2055. /* Caller spins while waiting for completion. */
  2056. #define MWL8K_WQ_SPIN 0x00000001
  2057. /* Wait for TX queues to empty before proceeding with configuration. */
  2058. #define MWL8K_WQ_TX_WAIT_EMPTY 0x00000002
  2059. /* Queue request and return immediately. */
  2060. #define MWL8K_WQ_POST_REQUEST 0x00000004
  2061. /*
  2062. * Caller sleeps and waits for task complete notification.
  2063. * Do not use in atomic context.
  2064. */
  2065. #define MWL8K_WQ_SLEEP 0x00000008
  2066. /* Free work struct when task is done. */
  2067. #define MWL8K_WQ_FREE_WORKSTRUCT 0x00000010
  2068. /*
  2069. * Config request is queued and returns to caller imediately. Use
  2070. * this in atomic context. Work struct is freed by mwl8k_queue_work()
  2071. * when this flag is set.
  2072. */
  2073. #define MWL8K_WQ_QUEUE_ONLY (MWL8K_WQ_POST_REQUEST | \
  2074. MWL8K_WQ_FREE_WORKSTRUCT)
  2075. /* Default work queue behavior is to sleep and wait for tx completion. */
  2076. #define MWL8K_WQ_DEFAULT_OPTIONS (MWL8K_WQ_SLEEP | MWL8K_WQ_TX_WAIT_EMPTY)
  2077. /*
  2078. * Default config request timeout. Add adjustments to make sure the
  2079. * config thread waits long enough for both tx wait and cmd wait before
  2080. * timing out.
  2081. */
  2082. /* Time to wait for all TXQs to drain. TX Doorbell is pressed each time. */
  2083. #define MWL8K_TXWAIT_TIMEOUT_MS 1000
  2084. /* Default number of TX wait attempts. */
  2085. #define MWL8K_WQ_TXWAIT_ATTEMPTS 4
  2086. /* Total time to wait for TXQ to drain. */
  2087. #define MWL8K_TXWAIT_MS (MWL8K_TXWAIT_TIMEOUT_MS * \
  2088. MWL8K_WQ_TXWAIT_ATTEMPTS)
  2089. /* Scheduling slop. */
  2090. #define MWL8K_OS_SCHEDULE_OVERHEAD_MS 200
  2091. #define MWL8K_CONFIG_TIMEOUT_MS (MWL8K_CMD_TIMEOUT_MS + \
  2092. MWL8K_TXWAIT_MS + \
  2093. MWL8K_OS_SCHEDULE_OVERHEAD_MS)
  2094. static void mwl8k_config_thread(struct work_struct *wt)
  2095. {
  2096. struct mwl8k_work_struct *worker = (struct mwl8k_work_struct *)wt;
  2097. struct ieee80211_hw *hw = worker->hw;
  2098. struct mwl8k_priv *priv = hw->priv;
  2099. int rc = 0;
  2100. spin_lock_irq(&priv->tx_lock);
  2101. priv->inconfig = true;
  2102. spin_unlock_irq(&priv->tx_lock);
  2103. ieee80211_stop_queues(hw);
  2104. /*
  2105. * Wait for host queues to drain before doing PHY
  2106. * reconfiguration. This avoids interrupting any in-flight
  2107. * DMA transfers to the hardware.
  2108. */
  2109. if (worker->options & MWL8K_WQ_TX_WAIT_EMPTY) {
  2110. u32 timeout;
  2111. u32 time_remaining;
  2112. u32 iter;
  2113. u32 tx_wait_attempts = worker->txwait_attempts;
  2114. time_remaining = worker->tx_timeout_ms;
  2115. if (!tx_wait_attempts)
  2116. tx_wait_attempts = 1;
  2117. timeout = worker->tx_timeout_ms/tx_wait_attempts;
  2118. if (!timeout)
  2119. timeout = 1;
  2120. iter = tx_wait_attempts;
  2121. do {
  2122. int wait_time;
  2123. if (time_remaining > timeout) {
  2124. time_remaining -= timeout;
  2125. wait_time = timeout;
  2126. } else
  2127. wait_time = time_remaining;
  2128. if (!wait_time)
  2129. wait_time = 1;
  2130. rc = mwl8k_tx_wait_empty(hw, wait_time);
  2131. if (rc)
  2132. printk(KERN_ERR "%s() txwait timeout=%ums "
  2133. "Retry:%u/%u\n", __func__, timeout,
  2134. tx_wait_attempts - iter + 1,
  2135. tx_wait_attempts);
  2136. } while (rc && --iter);
  2137. rc = iter ? 0 : -ETIMEDOUT;
  2138. }
  2139. if (!rc)
  2140. rc = worker->wfunc(wt);
  2141. spin_lock_irq(&priv->tx_lock);
  2142. priv->inconfig = false;
  2143. if (priv->pending_tx_pkts && priv->radio_on)
  2144. mwl8k_tx_start(priv);
  2145. spin_unlock_irq(&priv->tx_lock);
  2146. ieee80211_wake_queues(hw);
  2147. worker->rc = rc;
  2148. if (worker->options & MWL8K_WQ_SLEEP)
  2149. complete(worker->cmd_wait);
  2150. if (worker->options & MWL8K_WQ_FREE_WORKSTRUCT)
  2151. kfree(wt);
  2152. }
  2153. static int mwl8k_queue_work(struct ieee80211_hw *hw,
  2154. struct mwl8k_work_struct *worker,
  2155. struct workqueue_struct *wqueue,
  2156. int (*wfunc)(struct work_struct *w))
  2157. {
  2158. unsigned long timeout = 0;
  2159. int rc = 0;
  2160. DECLARE_COMPLETION_ONSTACK(cmd_wait);
  2161. if (!worker->timeout_ms)
  2162. worker->timeout_ms = MWL8K_CONFIG_TIMEOUT_MS;
  2163. if (!worker->options)
  2164. worker->options = MWL8K_WQ_DEFAULT_OPTIONS;
  2165. if (!worker->txwait_attempts)
  2166. worker->txwait_attempts = MWL8K_WQ_TXWAIT_ATTEMPTS;
  2167. if (!worker->tx_timeout_ms)
  2168. worker->tx_timeout_ms = MWL8K_TXWAIT_MS;
  2169. worker->hw = hw;
  2170. worker->cmd_wait = &cmd_wait;
  2171. worker->rc = 1;
  2172. worker->wfunc = wfunc;
  2173. INIT_WORK(&worker->wt, mwl8k_config_thread);
  2174. queue_work(wqueue, &worker->wt);
  2175. if (worker->options & MWL8K_WQ_POST_REQUEST) {
  2176. rc = 0;
  2177. } else {
  2178. if (worker->options & MWL8K_WQ_SPIN) {
  2179. timeout = worker->timeout_ms;
  2180. while (timeout && (worker->rc > 0)) {
  2181. mdelay(1);
  2182. timeout--;
  2183. }
  2184. } else if (worker->options & MWL8K_WQ_SLEEP)
  2185. timeout = wait_for_completion_timeout(&cmd_wait,
  2186. msecs_to_jiffies(worker->timeout_ms));
  2187. if (timeout)
  2188. rc = worker->rc;
  2189. else {
  2190. cancel_work_sync(&worker->wt);
  2191. rc = -ETIMEDOUT;
  2192. }
  2193. }
  2194. return rc;
  2195. }
  2196. struct mwl8k_start_worker {
  2197. struct mwl8k_work_struct header;
  2198. };
  2199. static int mwl8k_start_wt(struct work_struct *wt)
  2200. {
  2201. struct mwl8k_start_worker *worker = (struct mwl8k_start_worker *)wt;
  2202. struct ieee80211_hw *hw = worker->header.hw;
  2203. struct mwl8k_priv *priv = hw->priv;
  2204. int rc = 0;
  2205. if (priv->vif != NULL) {
  2206. rc = -EIO;
  2207. goto mwl8k_start_exit;
  2208. }
  2209. /* Turn on radio */
  2210. if (mwl8k_cmd_802_11_radio_enable(hw)) {
  2211. rc = -EIO;
  2212. goto mwl8k_start_exit;
  2213. }
  2214. /* Purge TX/RX HW queues */
  2215. if (mwl8k_cmd_set_pre_scan(hw)) {
  2216. rc = -EIO;
  2217. goto mwl8k_start_exit;
  2218. }
  2219. if (mwl8k_cmd_set_post_scan(hw, "\x00\x00\x00\x00\x00\x00")) {
  2220. rc = -EIO;
  2221. goto mwl8k_start_exit;
  2222. }
  2223. /* Enable firmware rate adaptation */
  2224. if (mwl8k_cmd_setrateadaptmode(hw, 0)) {
  2225. rc = -EIO;
  2226. goto mwl8k_start_exit;
  2227. }
  2228. /* Disable WMM. WMM gets enabled when stack sends WMM parms */
  2229. if (mwl8k_set_wmm(hw, 0)) {
  2230. rc = -EIO;
  2231. goto mwl8k_start_exit;
  2232. }
  2233. /* Disable sniffer mode */
  2234. if (mwl8k_enable_sniffer(hw, 0))
  2235. rc = -EIO;
  2236. mwl8k_start_exit:
  2237. return rc;
  2238. }
  2239. static int mwl8k_start(struct ieee80211_hw *hw)
  2240. {
  2241. struct mwl8k_start_worker *worker;
  2242. struct mwl8k_priv *priv = hw->priv;
  2243. int rc;
  2244. /* Enable tx reclaim tasklet */
  2245. tasklet_enable(&priv->tx_reclaim_task);
  2246. rc = request_irq(priv->pdev->irq, &mwl8k_interrupt,
  2247. IRQF_SHARED, MWL8K_NAME, hw);
  2248. if (rc) {
  2249. printk(KERN_ERR "%s: failed to register IRQ handler\n",
  2250. priv->name);
  2251. rc = -EIO;
  2252. goto mwl8k_start_disable_tasklet;
  2253. }
  2254. /* Enable interrupts */
  2255. iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2256. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2257. if (worker == NULL) {
  2258. rc = -ENOMEM;
  2259. goto mwl8k_start_disable_irq;
  2260. }
  2261. rc = mwl8k_queue_work(hw, &worker->header,
  2262. priv->config_wq, mwl8k_start_wt);
  2263. kfree(worker);
  2264. if (!rc)
  2265. return rc;
  2266. if (rc == -ETIMEDOUT)
  2267. printk(KERN_ERR "%s() timed out\n", __func__);
  2268. rc = -EIO;
  2269. mwl8k_start_disable_irq:
  2270. spin_lock_irq(&priv->tx_lock);
  2271. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2272. spin_unlock_irq(&priv->tx_lock);
  2273. free_irq(priv->pdev->irq, hw);
  2274. mwl8k_start_disable_tasklet:
  2275. tasklet_disable(&priv->tx_reclaim_task);
  2276. return rc;
  2277. }
  2278. struct mwl8k_stop_worker {
  2279. struct mwl8k_work_struct header;
  2280. };
  2281. static int mwl8k_stop_wt(struct work_struct *wt)
  2282. {
  2283. struct mwl8k_stop_worker *worker = (struct mwl8k_stop_worker *)wt;
  2284. struct ieee80211_hw *hw = worker->header.hw;
  2285. return mwl8k_cmd_802_11_radio_disable(hw);
  2286. }
  2287. static void mwl8k_stop(struct ieee80211_hw *hw)
  2288. {
  2289. int rc;
  2290. struct mwl8k_stop_worker *worker;
  2291. struct mwl8k_priv *priv = hw->priv;
  2292. int i;
  2293. if (priv->vif != NULL)
  2294. return;
  2295. ieee80211_stop_queues(hw);
  2296. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2297. if (worker == NULL)
  2298. return;
  2299. rc = mwl8k_queue_work(hw, &worker->header,
  2300. priv->config_wq, mwl8k_stop_wt);
  2301. kfree(worker);
  2302. if (rc == -ETIMEDOUT)
  2303. printk(KERN_ERR "%s() timed out\n", __func__);
  2304. /* Disable interrupts */
  2305. spin_lock_irq(&priv->tx_lock);
  2306. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2307. spin_unlock_irq(&priv->tx_lock);
  2308. free_irq(priv->pdev->irq, hw);
  2309. /* Stop finalize join worker */
  2310. cancel_work_sync(&priv->finalize_join_worker);
  2311. if (priv->beacon_skb != NULL)
  2312. dev_kfree_skb(priv->beacon_skb);
  2313. /* Stop tx reclaim tasklet */
  2314. tasklet_disable(&priv->tx_reclaim_task);
  2315. /* Stop config thread */
  2316. flush_workqueue(priv->config_wq);
  2317. /* Return all skbs to mac80211 */
  2318. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2319. mwl8k_txq_reclaim(hw, i, 1);
  2320. }
  2321. static int mwl8k_add_interface(struct ieee80211_hw *hw,
  2322. struct ieee80211_if_init_conf *conf)
  2323. {
  2324. struct mwl8k_priv *priv = hw->priv;
  2325. struct mwl8k_vif *mwl8k_vif;
  2326. /*
  2327. * We only support one active interface at a time.
  2328. */
  2329. if (priv->vif != NULL)
  2330. return -EBUSY;
  2331. /*
  2332. * We only support managed interfaces for now.
  2333. */
  2334. if (conf->type != NL80211_IFTYPE_STATION &&
  2335. conf->type != NL80211_IFTYPE_MONITOR)
  2336. return -EINVAL;
  2337. /* Clean out driver private area */
  2338. mwl8k_vif = MWL8K_VIF(conf->vif);
  2339. memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
  2340. /* Save the mac address */
  2341. memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
  2342. /* Back pointer to parent config block */
  2343. mwl8k_vif->priv = priv;
  2344. /* Setup initial PHY parameters */
  2345. memcpy(mwl8k_vif->legacy_rates,
  2346. priv->rates, sizeof(mwl8k_vif->legacy_rates));
  2347. mwl8k_vif->legacy_nrates = ARRAY_SIZE(priv->rates);
  2348. /* Set Initial sequence number to zero */
  2349. mwl8k_vif->seqno = 0;
  2350. priv->vif = conf->vif;
  2351. priv->current_channel = NULL;
  2352. return 0;
  2353. }
  2354. static void mwl8k_remove_interface(struct ieee80211_hw *hw,
  2355. struct ieee80211_if_init_conf *conf)
  2356. {
  2357. struct mwl8k_priv *priv = hw->priv;
  2358. if (priv->vif == NULL)
  2359. return;
  2360. priv->vif = NULL;
  2361. }
  2362. struct mwl8k_config_worker {
  2363. struct mwl8k_work_struct header;
  2364. u32 changed;
  2365. };
  2366. static int mwl8k_config_wt(struct work_struct *wt)
  2367. {
  2368. struct mwl8k_config_worker *worker =
  2369. (struct mwl8k_config_worker *)wt;
  2370. struct ieee80211_hw *hw = worker->header.hw;
  2371. struct ieee80211_conf *conf = &hw->conf;
  2372. struct mwl8k_priv *priv = hw->priv;
  2373. int rc = 0;
  2374. if (mwl8k_cmd_802_11_radio_enable(hw)) {
  2375. rc = -EINVAL;
  2376. goto mwl8k_config_exit;
  2377. }
  2378. priv->current_channel = conf->channel;
  2379. if (mwl8k_cmd_set_rf_channel(hw, conf->channel)) {
  2380. rc = -EINVAL;
  2381. goto mwl8k_config_exit;
  2382. }
  2383. if (conf->power_level > 18)
  2384. conf->power_level = 18;
  2385. if (mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level)) {
  2386. rc = -EINVAL;
  2387. goto mwl8k_config_exit;
  2388. }
  2389. if (mwl8k_cmd_mimo_config(hw, 0x7, 0x7))
  2390. rc = -EINVAL;
  2391. mwl8k_config_exit:
  2392. return rc;
  2393. }
  2394. static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
  2395. {
  2396. int rc = 0;
  2397. struct mwl8k_config_worker *worker;
  2398. struct mwl8k_priv *priv = hw->priv;
  2399. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2400. if (worker == NULL)
  2401. return -ENOMEM;
  2402. worker->changed = changed;
  2403. rc = mwl8k_queue_work(hw, &worker->header,
  2404. priv->config_wq, mwl8k_config_wt);
  2405. if (rc == -ETIMEDOUT) {
  2406. printk(KERN_ERR "%s() timed out.\n", __func__);
  2407. rc = -EINVAL;
  2408. }
  2409. kfree(worker);
  2410. /*
  2411. * mac80211 will crash on anything other than -EINVAL on
  2412. * error. Looks like wireless extensions which calls mac80211
  2413. * may be the actual culprit...
  2414. */
  2415. return rc ? -EINVAL : 0;
  2416. }
  2417. struct mwl8k_bss_info_changed_worker {
  2418. struct mwl8k_work_struct header;
  2419. struct ieee80211_vif *vif;
  2420. struct ieee80211_bss_conf *info;
  2421. u32 changed;
  2422. };
  2423. static int mwl8k_bss_info_changed_wt(struct work_struct *wt)
  2424. {
  2425. struct mwl8k_bss_info_changed_worker *worker =
  2426. (struct mwl8k_bss_info_changed_worker *)wt;
  2427. struct ieee80211_hw *hw = worker->header.hw;
  2428. struct ieee80211_vif *vif = worker->vif;
  2429. struct ieee80211_bss_conf *info = worker->info;
  2430. u32 changed;
  2431. int rc;
  2432. struct mwl8k_priv *priv = hw->priv;
  2433. struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
  2434. changed = worker->changed;
  2435. priv->capture_beacon = false;
  2436. if (info->assoc) {
  2437. memcpy(&mwl8k_vif->bss_info, info,
  2438. sizeof(struct ieee80211_bss_conf));
  2439. /* Install rates */
  2440. if (mwl8k_update_rateset(hw, vif))
  2441. goto mwl8k_bss_info_changed_exit;
  2442. /* Turn on rate adaptation */
  2443. if (mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
  2444. MWL8K_UCAST_RATE, NULL))
  2445. goto mwl8k_bss_info_changed_exit;
  2446. /* Set radio preamble */
  2447. if (mwl8k_set_radio_preamble(hw, info->use_short_preamble))
  2448. goto mwl8k_bss_info_changed_exit;
  2449. /* Set slot time */
  2450. if (mwl8k_cmd_set_slot(hw, info->use_short_slot))
  2451. goto mwl8k_bss_info_changed_exit;
  2452. /* Update peer rate info */
  2453. if (mwl8k_cmd_update_sta_db(hw, vif,
  2454. MWL8K_STA_DB_MODIFY_ENTRY))
  2455. goto mwl8k_bss_info_changed_exit;
  2456. /* Set AID */
  2457. if (mwl8k_cmd_set_aid(hw, vif))
  2458. goto mwl8k_bss_info_changed_exit;
  2459. /*
  2460. * Finalize the join. Tell rx handler to process
  2461. * next beacon from our BSSID.
  2462. */
  2463. memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
  2464. priv->capture_beacon = true;
  2465. } else {
  2466. mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
  2467. memset(&mwl8k_vif->bss_info, 0,
  2468. sizeof(struct ieee80211_bss_conf));
  2469. memset(mwl8k_vif->bssid, 0, ETH_ALEN);
  2470. }
  2471. mwl8k_bss_info_changed_exit:
  2472. rc = 0;
  2473. return rc;
  2474. }
  2475. static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
  2476. struct ieee80211_vif *vif,
  2477. struct ieee80211_bss_conf *info,
  2478. u32 changed)
  2479. {
  2480. struct mwl8k_bss_info_changed_worker *worker;
  2481. struct mwl8k_priv *priv = hw->priv;
  2482. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  2483. int rc;
  2484. if (changed & BSS_CHANGED_BSSID)
  2485. memcpy(mv_vif->bssid, info->bssid, ETH_ALEN);
  2486. if ((changed & BSS_CHANGED_ASSOC) == 0)
  2487. return;
  2488. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2489. if (worker == NULL)
  2490. return;
  2491. worker->vif = vif;
  2492. worker->info = info;
  2493. worker->changed = changed;
  2494. rc = mwl8k_queue_work(hw, &worker->header,
  2495. priv->config_wq,
  2496. mwl8k_bss_info_changed_wt);
  2497. kfree(worker);
  2498. if (rc == -ETIMEDOUT)
  2499. printk(KERN_ERR "%s() timed out\n", __func__);
  2500. }
  2501. static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
  2502. int mc_count, struct dev_addr_list *mclist)
  2503. {
  2504. struct mwl8k_cmd_pkt *cmd;
  2505. cmd = __mwl8k_cmd_mac_multicast_adr(hw, mc_count, mclist);
  2506. return (unsigned long)cmd;
  2507. }
  2508. struct mwl8k_configure_filter_worker {
  2509. struct mwl8k_work_struct header;
  2510. unsigned int changed_flags;
  2511. unsigned int total_flags;
  2512. struct mwl8k_cmd_pkt *multicast_adr_cmd;
  2513. };
  2514. #define MWL8K_SUPPORTED_IF_FLAGS FIF_BCN_PRBRESP_PROMISC
  2515. static int mwl8k_configure_filter_wt(struct work_struct *wt)
  2516. {
  2517. struct mwl8k_configure_filter_worker *worker =
  2518. (struct mwl8k_configure_filter_worker *)wt;
  2519. struct ieee80211_hw *hw = worker->header.hw;
  2520. struct mwl8k_priv *priv = hw->priv;
  2521. int rc = 0;
  2522. if (worker->changed_flags & FIF_BCN_PRBRESP_PROMISC) {
  2523. if (worker->total_flags & FIF_BCN_PRBRESP_PROMISC)
  2524. rc = mwl8k_cmd_set_pre_scan(hw);
  2525. else {
  2526. u8 *bssid;
  2527. bssid = "\x00\x00\x00\x00\x00\x00";
  2528. if (priv->vif != NULL)
  2529. bssid = MWL8K_VIF(priv->vif)->bssid;
  2530. rc = mwl8k_cmd_set_post_scan(hw, bssid);
  2531. }
  2532. }
  2533. if (!rc && worker->multicast_adr_cmd != NULL)
  2534. rc = mwl8k_post_cmd(hw, worker->multicast_adr_cmd);
  2535. kfree(worker->multicast_adr_cmd);
  2536. return rc;
  2537. }
  2538. static void mwl8k_configure_filter(struct ieee80211_hw *hw,
  2539. unsigned int changed_flags,
  2540. unsigned int *total_flags,
  2541. u64 multicast)
  2542. {
  2543. struct mwl8k_priv *priv = hw->priv;
  2544. struct mwl8k_configure_filter_worker *worker;
  2545. /* Clear unsupported feature flags */
  2546. *total_flags &= MWL8K_SUPPORTED_IF_FLAGS;
  2547. if (!(changed_flags & MWL8K_SUPPORTED_IF_FLAGS))
  2548. return;
  2549. worker = kzalloc(sizeof(*worker), GFP_ATOMIC);
  2550. if (worker == NULL)
  2551. return;
  2552. worker->changed_flags = changed_flags;
  2553. worker->total_flags = *total_flags;
  2554. worker->multicast_adr_cmd = (void *)(unsigned long)multicast;
  2555. mwl8k_queue_work(hw, &worker->header, priv->config_wq,
  2556. mwl8k_configure_filter_wt);
  2557. }
  2558. struct mwl8k_set_rts_threshold_worker {
  2559. struct mwl8k_work_struct header;
  2560. u32 value;
  2561. };
  2562. static int mwl8k_set_rts_threshold_wt(struct work_struct *wt)
  2563. {
  2564. struct mwl8k_set_rts_threshold_worker *worker =
  2565. (struct mwl8k_set_rts_threshold_worker *)wt;
  2566. struct ieee80211_hw *hw = worker->header.hw;
  2567. u16 threshold = (u16)(worker->value);
  2568. int rc;
  2569. rc = mwl8k_rts_threshold(hw, MWL8K_CMD_SET, &threshold);
  2570. return rc;
  2571. }
  2572. static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
  2573. {
  2574. int rc;
  2575. struct mwl8k_set_rts_threshold_worker *worker;
  2576. struct mwl8k_priv *priv = hw->priv;
  2577. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2578. if (worker == NULL)
  2579. return -ENOMEM;
  2580. worker->value = value;
  2581. rc = mwl8k_queue_work(hw, &worker->header,
  2582. priv->config_wq,
  2583. mwl8k_set_rts_threshold_wt);
  2584. kfree(worker);
  2585. if (rc == -ETIMEDOUT) {
  2586. printk(KERN_ERR "%s() timed out\n", __func__);
  2587. rc = -EINVAL;
  2588. }
  2589. return rc;
  2590. }
  2591. struct mwl8k_conf_tx_worker {
  2592. struct mwl8k_work_struct header;
  2593. u16 queue;
  2594. const struct ieee80211_tx_queue_params *params;
  2595. };
  2596. static int mwl8k_conf_tx_wt(struct work_struct *wt)
  2597. {
  2598. struct mwl8k_conf_tx_worker *worker =
  2599. (struct mwl8k_conf_tx_worker *)wt;
  2600. struct ieee80211_hw *hw = worker->header.hw;
  2601. u16 queue = worker->queue;
  2602. const struct ieee80211_tx_queue_params *params = worker->params;
  2603. struct mwl8k_priv *priv = hw->priv;
  2604. int rc = 0;
  2605. if (!priv->wmm_enabled) {
  2606. if (mwl8k_set_wmm(hw, 1)) {
  2607. rc = -EINVAL;
  2608. goto mwl8k_conf_tx_exit;
  2609. }
  2610. }
  2611. if (mwl8k_set_edca_params(hw, GET_TXQ(queue), params->cw_min,
  2612. params->cw_max, params->aifs, params->txop))
  2613. rc = -EINVAL;
  2614. mwl8k_conf_tx_exit:
  2615. return rc;
  2616. }
  2617. static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
  2618. const struct ieee80211_tx_queue_params *params)
  2619. {
  2620. int rc;
  2621. struct mwl8k_conf_tx_worker *worker;
  2622. struct mwl8k_priv *priv = hw->priv;
  2623. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2624. if (worker == NULL)
  2625. return -ENOMEM;
  2626. worker->queue = queue;
  2627. worker->params = params;
  2628. rc = mwl8k_queue_work(hw, &worker->header,
  2629. priv->config_wq, mwl8k_conf_tx_wt);
  2630. kfree(worker);
  2631. if (rc == -ETIMEDOUT) {
  2632. printk(KERN_ERR "%s() timed out\n", __func__);
  2633. rc = -EINVAL;
  2634. }
  2635. return rc;
  2636. }
  2637. static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
  2638. struct ieee80211_tx_queue_stats *stats)
  2639. {
  2640. struct mwl8k_priv *priv = hw->priv;
  2641. struct mwl8k_tx_queue *txq;
  2642. int index;
  2643. spin_lock_bh(&priv->tx_lock);
  2644. for (index = 0; index < MWL8K_TX_QUEUES; index++) {
  2645. txq = priv->txq + index;
  2646. memcpy(&stats[index], &txq->tx_stats,
  2647. sizeof(struct ieee80211_tx_queue_stats));
  2648. }
  2649. spin_unlock_bh(&priv->tx_lock);
  2650. return 0;
  2651. }
  2652. struct mwl8k_get_stats_worker {
  2653. struct mwl8k_work_struct header;
  2654. struct ieee80211_low_level_stats *stats;
  2655. };
  2656. static int mwl8k_get_stats_wt(struct work_struct *wt)
  2657. {
  2658. struct mwl8k_get_stats_worker *worker =
  2659. (struct mwl8k_get_stats_worker *)wt;
  2660. return mwl8k_cmd_802_11_get_stat(worker->header.hw, worker->stats);
  2661. }
  2662. static int mwl8k_get_stats(struct ieee80211_hw *hw,
  2663. struct ieee80211_low_level_stats *stats)
  2664. {
  2665. int rc;
  2666. struct mwl8k_get_stats_worker *worker;
  2667. struct mwl8k_priv *priv = hw->priv;
  2668. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2669. if (worker == NULL)
  2670. return -ENOMEM;
  2671. worker->stats = stats;
  2672. rc = mwl8k_queue_work(hw, &worker->header,
  2673. priv->config_wq, mwl8k_get_stats_wt);
  2674. kfree(worker);
  2675. if (rc == -ETIMEDOUT) {
  2676. printk(KERN_ERR "%s() timed out\n", __func__);
  2677. rc = -EINVAL;
  2678. }
  2679. return rc;
  2680. }
  2681. static const struct ieee80211_ops mwl8k_ops = {
  2682. .tx = mwl8k_tx,
  2683. .start = mwl8k_start,
  2684. .stop = mwl8k_stop,
  2685. .add_interface = mwl8k_add_interface,
  2686. .remove_interface = mwl8k_remove_interface,
  2687. .config = mwl8k_config,
  2688. .bss_info_changed = mwl8k_bss_info_changed,
  2689. .prepare_multicast = mwl8k_prepare_multicast,
  2690. .configure_filter = mwl8k_configure_filter,
  2691. .set_rts_threshold = mwl8k_set_rts_threshold,
  2692. .conf_tx = mwl8k_conf_tx,
  2693. .get_tx_stats = mwl8k_get_tx_stats,
  2694. .get_stats = mwl8k_get_stats,
  2695. };
  2696. static void mwl8k_tx_reclaim_handler(unsigned long data)
  2697. {
  2698. int i;
  2699. struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
  2700. struct mwl8k_priv *priv = hw->priv;
  2701. spin_lock_bh(&priv->tx_lock);
  2702. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2703. mwl8k_txq_reclaim(hw, i, 0);
  2704. if (priv->tx_wait != NULL && mwl8k_txq_busy(priv) == 0) {
  2705. complete(priv->tx_wait);
  2706. priv->tx_wait = NULL;
  2707. }
  2708. spin_unlock_bh(&priv->tx_lock);
  2709. }
  2710. static void mwl8k_finalize_join_worker(struct work_struct *work)
  2711. {
  2712. struct mwl8k_priv *priv =
  2713. container_of(work, struct mwl8k_priv, finalize_join_worker);
  2714. struct sk_buff *skb = priv->beacon_skb;
  2715. u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period;
  2716. mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim);
  2717. dev_kfree_skb(skb);
  2718. priv->beacon_skb = NULL;
  2719. }
  2720. static int __devinit mwl8k_probe(struct pci_dev *pdev,
  2721. const struct pci_device_id *id)
  2722. {
  2723. struct ieee80211_hw *hw;
  2724. struct mwl8k_priv *priv;
  2725. int rc;
  2726. int i;
  2727. u8 *fw;
  2728. rc = pci_enable_device(pdev);
  2729. if (rc) {
  2730. printk(KERN_ERR "%s: Cannot enable new PCI device\n",
  2731. MWL8K_NAME);
  2732. return rc;
  2733. }
  2734. rc = pci_request_regions(pdev, MWL8K_NAME);
  2735. if (rc) {
  2736. printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
  2737. MWL8K_NAME);
  2738. return rc;
  2739. }
  2740. pci_set_master(pdev);
  2741. hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
  2742. if (hw == NULL) {
  2743. printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
  2744. rc = -ENOMEM;
  2745. goto err_free_reg;
  2746. }
  2747. priv = hw->priv;
  2748. priv->hw = hw;
  2749. priv->pdev = pdev;
  2750. priv->hostcmd_wait = NULL;
  2751. priv->tx_wait = NULL;
  2752. priv->inconfig = false;
  2753. priv->wmm_enabled = false;
  2754. priv->pending_tx_pkts = 0;
  2755. strncpy(priv->name, MWL8K_NAME, sizeof(priv->name));
  2756. spin_lock_init(&priv->fw_lock);
  2757. SET_IEEE80211_DEV(hw, &pdev->dev);
  2758. pci_set_drvdata(pdev, hw);
  2759. priv->regs = pci_iomap(pdev, 1, 0x10000);
  2760. if (priv->regs == NULL) {
  2761. printk(KERN_ERR "%s: Cannot map device memory\n", priv->name);
  2762. goto err_iounmap;
  2763. }
  2764. memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
  2765. priv->band.band = IEEE80211_BAND_2GHZ;
  2766. priv->band.channels = priv->channels;
  2767. priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
  2768. priv->band.bitrates = priv->rates;
  2769. priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
  2770. hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
  2771. BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
  2772. memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
  2773. /*
  2774. * Extra headroom is the size of the required DMA header
  2775. * minus the size of the smallest 802.11 frame (CTS frame).
  2776. */
  2777. hw->extra_tx_headroom =
  2778. sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
  2779. hw->channel_change_time = 10;
  2780. hw->queues = MWL8K_TX_QUEUES;
  2781. hw->wiphy->interface_modes =
  2782. BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_MONITOR);
  2783. /* Set rssi and noise values to dBm */
  2784. hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
  2785. hw->vif_data_size = sizeof(struct mwl8k_vif);
  2786. priv->vif = NULL;
  2787. /* Set default radio state and preamble */
  2788. priv->radio_on = 0;
  2789. priv->radio_short_preamble = 0;
  2790. /* Finalize join worker */
  2791. INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
  2792. /* TX reclaim tasklet */
  2793. tasklet_init(&priv->tx_reclaim_task,
  2794. mwl8k_tx_reclaim_handler, (unsigned long)hw);
  2795. tasklet_disable(&priv->tx_reclaim_task);
  2796. /* Config workthread */
  2797. priv->config_wq = create_singlethread_workqueue("mwl8k_config");
  2798. if (priv->config_wq == NULL)
  2799. goto err_iounmap;
  2800. /* Power management cookie */
  2801. priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
  2802. if (priv->cookie == NULL)
  2803. goto err_iounmap;
  2804. rc = mwl8k_rxq_init(hw, 0);
  2805. if (rc)
  2806. goto err_iounmap;
  2807. rxq_refill(hw, 0, INT_MAX);
  2808. spin_lock_init(&priv->tx_lock);
  2809. for (i = 0; i < MWL8K_TX_QUEUES; i++) {
  2810. rc = mwl8k_txq_init(hw, i);
  2811. if (rc)
  2812. goto err_free_queues;
  2813. }
  2814. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  2815. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2816. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
  2817. iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
  2818. rc = request_irq(priv->pdev->irq, &mwl8k_interrupt,
  2819. IRQF_SHARED, MWL8K_NAME, hw);
  2820. if (rc) {
  2821. printk(KERN_ERR "%s: failed to register IRQ handler\n",
  2822. priv->name);
  2823. goto err_free_queues;
  2824. }
  2825. /* Reset firmware and hardware */
  2826. mwl8k_hw_reset(priv);
  2827. /* Ask userland hotplug daemon for the device firmware */
  2828. rc = mwl8k_request_firmware(priv, (u32)id->driver_data);
  2829. if (rc) {
  2830. printk(KERN_ERR "%s: Firmware files not found\n", priv->name);
  2831. goto err_free_irq;
  2832. }
  2833. /* Load firmware into hardware */
  2834. rc = mwl8k_load_firmware(priv);
  2835. if (rc) {
  2836. printk(KERN_ERR "%s: Cannot start firmware\n", priv->name);
  2837. goto err_stop_firmware;
  2838. }
  2839. /* Reclaim memory once firmware is successfully loaded */
  2840. mwl8k_release_firmware(priv);
  2841. /*
  2842. * Temporarily enable interrupts. Initial firmware host
  2843. * commands use interrupts and avoids polling. Disable
  2844. * interrupts when done.
  2845. */
  2846. iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2847. /* Get config data, mac addrs etc */
  2848. rc = mwl8k_cmd_get_hw_spec(hw);
  2849. if (rc) {
  2850. printk(KERN_ERR "%s: Cannot initialise firmware\n", priv->name);
  2851. goto err_stop_firmware;
  2852. }
  2853. /* Turn radio off */
  2854. rc = mwl8k_cmd_802_11_radio_disable(hw);
  2855. if (rc) {
  2856. printk(KERN_ERR "%s: Cannot disable\n", priv->name);
  2857. goto err_stop_firmware;
  2858. }
  2859. /* Disable interrupts */
  2860. spin_lock_irq(&priv->tx_lock);
  2861. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2862. spin_unlock_irq(&priv->tx_lock);
  2863. free_irq(priv->pdev->irq, hw);
  2864. rc = ieee80211_register_hw(hw);
  2865. if (rc) {
  2866. printk(KERN_ERR "%s: Cannot register device\n", priv->name);
  2867. goto err_stop_firmware;
  2868. }
  2869. fw = (u8 *)&priv->fw_rev;
  2870. printk(KERN_INFO "%s: 88W%u %s\n", priv->name, priv->part_num,
  2871. MWL8K_DESC);
  2872. printk(KERN_INFO "%s: Driver Ver:%s Firmware Ver:%u.%u.%u.%u\n",
  2873. priv->name, MWL8K_VERSION, fw[3], fw[2], fw[1], fw[0]);
  2874. printk(KERN_INFO "%s: MAC Address: %pM\n", priv->name,
  2875. hw->wiphy->perm_addr);
  2876. return 0;
  2877. err_stop_firmware:
  2878. mwl8k_hw_reset(priv);
  2879. mwl8k_release_firmware(priv);
  2880. err_free_irq:
  2881. spin_lock_irq(&priv->tx_lock);
  2882. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2883. spin_unlock_irq(&priv->tx_lock);
  2884. free_irq(priv->pdev->irq, hw);
  2885. err_free_queues:
  2886. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2887. mwl8k_txq_deinit(hw, i);
  2888. mwl8k_rxq_deinit(hw, 0);
  2889. err_iounmap:
  2890. if (priv->cookie != NULL)
  2891. pci_free_consistent(priv->pdev, 4,
  2892. priv->cookie, priv->cookie_dma);
  2893. if (priv->regs != NULL)
  2894. pci_iounmap(pdev, priv->regs);
  2895. if (priv->config_wq != NULL)
  2896. destroy_workqueue(priv->config_wq);
  2897. pci_set_drvdata(pdev, NULL);
  2898. ieee80211_free_hw(hw);
  2899. err_free_reg:
  2900. pci_release_regions(pdev);
  2901. pci_disable_device(pdev);
  2902. return rc;
  2903. }
  2904. static void __devexit mwl8k_shutdown(struct pci_dev *pdev)
  2905. {
  2906. printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__);
  2907. }
  2908. static void __devexit mwl8k_remove(struct pci_dev *pdev)
  2909. {
  2910. struct ieee80211_hw *hw = pci_get_drvdata(pdev);
  2911. struct mwl8k_priv *priv;
  2912. int i;
  2913. if (hw == NULL)
  2914. return;
  2915. priv = hw->priv;
  2916. ieee80211_stop_queues(hw);
  2917. ieee80211_unregister_hw(hw);
  2918. /* Remove tx reclaim tasklet */
  2919. tasklet_kill(&priv->tx_reclaim_task);
  2920. /* Stop config thread */
  2921. destroy_workqueue(priv->config_wq);
  2922. /* Stop hardware */
  2923. mwl8k_hw_reset(priv);
  2924. /* Return all skbs to mac80211 */
  2925. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2926. mwl8k_txq_reclaim(hw, i, 1);
  2927. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2928. mwl8k_txq_deinit(hw, i);
  2929. mwl8k_rxq_deinit(hw, 0);
  2930. pci_free_consistent(priv->pdev, 4,
  2931. priv->cookie, priv->cookie_dma);
  2932. pci_iounmap(pdev, priv->regs);
  2933. pci_set_drvdata(pdev, NULL);
  2934. ieee80211_free_hw(hw);
  2935. pci_release_regions(pdev);
  2936. pci_disable_device(pdev);
  2937. }
  2938. static struct pci_driver mwl8k_driver = {
  2939. .name = MWL8K_NAME,
  2940. .id_table = mwl8k_table,
  2941. .probe = mwl8k_probe,
  2942. .remove = __devexit_p(mwl8k_remove),
  2943. .shutdown = __devexit_p(mwl8k_shutdown),
  2944. };
  2945. static int __init mwl8k_init(void)
  2946. {
  2947. return pci_register_driver(&mwl8k_driver);
  2948. }
  2949. static void __exit mwl8k_exit(void)
  2950. {
  2951. pci_unregister_driver(&mwl8k_driver);
  2952. }
  2953. module_init(mwl8k_init);
  2954. module_exit(mwl8k_exit);