mwl8k.c 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693
  1. /*
  2. * drivers/net/wireless/mwl8k.c
  3. * Driver for Marvell TOPDOG 802.11 Wireless cards
  4. *
  5. * Copyright (C) 2008 Marvell Semiconductor Inc.
  6. *
  7. * This file is licensed under the terms of the GNU General Public
  8. * License version 2. This program is licensed "as is" without any
  9. * warranty of any kind, whether express or implied.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/list.h>
  16. #include <linux/pci.h>
  17. #include <linux/delay.h>
  18. #include <linux/completion.h>
  19. #include <linux/etherdevice.h>
  20. #include <net/mac80211.h>
  21. #include <linux/moduleparam.h>
  22. #include <linux/firmware.h>
  23. #include <linux/workqueue.h>
  24. #define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
  25. #define MWL8K_NAME KBUILD_MODNAME
  26. #define MWL8K_VERSION "0.9.1"
  27. MODULE_DESCRIPTION(MWL8K_DESC);
  28. MODULE_VERSION(MWL8K_VERSION);
  29. MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
  30. MODULE_LICENSE("GPL");
  31. static DEFINE_PCI_DEVICE_TABLE(mwl8k_table) = {
  32. { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = 8687, },
  33. { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = 8687, },
  34. { }
  35. };
  36. MODULE_DEVICE_TABLE(pci, mwl8k_table);
  37. /* Register definitions */
  38. #define MWL8K_HIU_GEN_PTR 0x00000c10
  39. #define MWL8K_MODE_STA 0x0000005a
  40. #define MWL8K_MODE_AP 0x000000a5
  41. #define MWL8K_HIU_INT_CODE 0x00000c14
  42. #define MWL8K_FWSTA_READY 0xf0f1f2f4
  43. #define MWL8K_FWAP_READY 0xf1f2f4a5
  44. #define MWL8K_INT_CODE_CMD_FINISHED 0x00000005
  45. #define MWL8K_HIU_SCRATCH 0x00000c40
  46. /* Host->device communications */
  47. #define MWL8K_HIU_H2A_INTERRUPT_EVENTS 0x00000c18
  48. #define MWL8K_HIU_H2A_INTERRUPT_STATUS 0x00000c1c
  49. #define MWL8K_HIU_H2A_INTERRUPT_MASK 0x00000c20
  50. #define MWL8K_HIU_H2A_INTERRUPT_CLEAR_SEL 0x00000c24
  51. #define MWL8K_HIU_H2A_INTERRUPT_STATUS_MASK 0x00000c28
  52. #define MWL8K_H2A_INT_DUMMY (1 << 20)
  53. #define MWL8K_H2A_INT_RESET (1 << 15)
  54. #define MWL8K_H2A_INT_DOORBELL (1 << 1)
  55. #define MWL8K_H2A_INT_PPA_READY (1 << 0)
  56. /* Device->host communications */
  57. #define MWL8K_HIU_A2H_INTERRUPT_EVENTS 0x00000c2c
  58. #define MWL8K_HIU_A2H_INTERRUPT_STATUS 0x00000c30
  59. #define MWL8K_HIU_A2H_INTERRUPT_MASK 0x00000c34
  60. #define MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL 0x00000c38
  61. #define MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK 0x00000c3c
  62. #define MWL8K_A2H_INT_DUMMY (1 << 20)
  63. #define MWL8K_A2H_INT_CHNL_SWITCHED (1 << 11)
  64. #define MWL8K_A2H_INT_QUEUE_EMPTY (1 << 10)
  65. #define MWL8K_A2H_INT_RADAR_DETECT (1 << 7)
  66. #define MWL8K_A2H_INT_RADIO_ON (1 << 6)
  67. #define MWL8K_A2H_INT_RADIO_OFF (1 << 5)
  68. #define MWL8K_A2H_INT_MAC_EVENT (1 << 3)
  69. #define MWL8K_A2H_INT_OPC_DONE (1 << 2)
  70. #define MWL8K_A2H_INT_RX_READY (1 << 1)
  71. #define MWL8K_A2H_INT_TX_DONE (1 << 0)
  72. #define MWL8K_A2H_EVENTS (MWL8K_A2H_INT_DUMMY | \
  73. MWL8K_A2H_INT_CHNL_SWITCHED | \
  74. MWL8K_A2H_INT_QUEUE_EMPTY | \
  75. MWL8K_A2H_INT_RADAR_DETECT | \
  76. MWL8K_A2H_INT_RADIO_ON | \
  77. MWL8K_A2H_INT_RADIO_OFF | \
  78. MWL8K_A2H_INT_MAC_EVENT | \
  79. MWL8K_A2H_INT_OPC_DONE | \
  80. MWL8K_A2H_INT_RX_READY | \
  81. MWL8K_A2H_INT_TX_DONE)
  82. /* WME stream classes */
  83. #define WME_AC_BE 0 /* best effort */
  84. #define WME_AC_BK 1 /* background */
  85. #define WME_AC_VI 2 /* video */
  86. #define WME_AC_VO 3 /* voice */
  87. #define MWL8K_RX_QUEUES 1
  88. #define MWL8K_TX_QUEUES 4
  89. struct mwl8k_rx_queue {
  90. int rx_desc_count;
  91. /* hw receives here */
  92. int rx_head;
  93. /* refill descs here */
  94. int rx_tail;
  95. struct mwl8k_rx_desc *rx_desc_area;
  96. dma_addr_t rx_desc_dma;
  97. struct sk_buff **rx_skb;
  98. };
  99. struct mwl8k_skb {
  100. /*
  101. * The DMA engine requires a modification to the payload.
  102. * If the skbuff is shared/cloned, it needs to be unshared.
  103. * This method is used to ensure the stack always gets back
  104. * the skbuff it sent for transmission.
  105. */
  106. struct sk_buff *clone;
  107. struct sk_buff *skb;
  108. };
  109. struct mwl8k_tx_queue {
  110. /* hw transmits here */
  111. int tx_head;
  112. /* sw appends here */
  113. int tx_tail;
  114. struct ieee80211_tx_queue_stats tx_stats;
  115. struct mwl8k_tx_desc *tx_desc_area;
  116. dma_addr_t tx_desc_dma;
  117. struct mwl8k_skb *tx_skb;
  118. };
  119. /* Pointers to the firmware data and meta information about it. */
  120. struct mwl8k_firmware {
  121. /* Microcode */
  122. struct firmware *ucode;
  123. /* Boot helper code */
  124. struct firmware *helper;
  125. };
  126. struct mwl8k_priv {
  127. void __iomem *regs;
  128. struct ieee80211_hw *hw;
  129. struct pci_dev *pdev;
  130. u8 name[16];
  131. /* firmware access lock */
  132. spinlock_t fw_lock;
  133. /* firmware files and meta data */
  134. struct mwl8k_firmware fw;
  135. u32 part_num;
  136. /* lock held over TX and TX reap */
  137. spinlock_t tx_lock;
  138. struct ieee80211_vif *vif;
  139. struct ieee80211_channel *current_channel;
  140. /* power management status cookie from firmware */
  141. u32 *cookie;
  142. dma_addr_t cookie_dma;
  143. u16 num_mcaddrs;
  144. u8 hw_rev;
  145. __le32 fw_rev;
  146. /*
  147. * Running count of TX packets in flight, to avoid
  148. * iterating over the transmit rings each time.
  149. */
  150. int pending_tx_pkts;
  151. struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
  152. struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
  153. /* PHY parameters */
  154. struct ieee80211_supported_band band;
  155. struct ieee80211_channel channels[14];
  156. struct ieee80211_rate rates[12];
  157. bool radio_on;
  158. bool radio_short_preamble;
  159. bool wmm_enabled;
  160. /* Set if PHY config is in progress */
  161. bool inconfig;
  162. /* XXX need to convert this to handle multiple interfaces */
  163. bool capture_beacon;
  164. u8 capture_bssid[ETH_ALEN];
  165. struct sk_buff *beacon_skb;
  166. /*
  167. * This FJ worker has to be global as it is scheduled from the
  168. * RX handler. At this point we don't know which interface it
  169. * belongs to until the list of bssids waiting to complete join
  170. * is checked.
  171. */
  172. struct work_struct finalize_join_worker;
  173. /* Tasklet to reclaim TX descriptors and buffers after tx */
  174. struct tasklet_struct tx_reclaim_task;
  175. /* Work thread to serialize configuration requests */
  176. struct workqueue_struct *config_wq;
  177. struct completion *hostcmd_wait;
  178. struct completion *tx_wait;
  179. };
  180. /* Per interface specific private data */
  181. struct mwl8k_vif {
  182. /* backpointer to parent config block */
  183. struct mwl8k_priv *priv;
  184. /* BSS config of AP or IBSS from mac80211*/
  185. struct ieee80211_bss_conf bss_info;
  186. /* BSSID of AP or IBSS */
  187. u8 bssid[ETH_ALEN];
  188. u8 mac_addr[ETH_ALEN];
  189. /*
  190. * Subset of supported legacy rates.
  191. * Intersection of AP and STA supported rates.
  192. */
  193. struct ieee80211_rate legacy_rates[12];
  194. /* number of supported legacy rates */
  195. u8 legacy_nrates;
  196. /* Index into station database.Returned by update_sta_db call */
  197. u8 peer_id;
  198. /* Non AMPDU sequence number assigned by driver */
  199. u16 seqno;
  200. };
  201. #define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
  202. static const struct ieee80211_channel mwl8k_channels[] = {
  203. { .center_freq = 2412, .hw_value = 1, },
  204. { .center_freq = 2417, .hw_value = 2, },
  205. { .center_freq = 2422, .hw_value = 3, },
  206. { .center_freq = 2427, .hw_value = 4, },
  207. { .center_freq = 2432, .hw_value = 5, },
  208. { .center_freq = 2437, .hw_value = 6, },
  209. { .center_freq = 2442, .hw_value = 7, },
  210. { .center_freq = 2447, .hw_value = 8, },
  211. { .center_freq = 2452, .hw_value = 9, },
  212. { .center_freq = 2457, .hw_value = 10, },
  213. { .center_freq = 2462, .hw_value = 11, },
  214. };
  215. static const struct ieee80211_rate mwl8k_rates[] = {
  216. { .bitrate = 10, .hw_value = 2, },
  217. { .bitrate = 20, .hw_value = 4, },
  218. { .bitrate = 55, .hw_value = 11, },
  219. { .bitrate = 60, .hw_value = 12, },
  220. { .bitrate = 90, .hw_value = 18, },
  221. { .bitrate = 110, .hw_value = 22, },
  222. { .bitrate = 120, .hw_value = 24, },
  223. { .bitrate = 180, .hw_value = 36, },
  224. { .bitrate = 240, .hw_value = 48, },
  225. { .bitrate = 360, .hw_value = 72, },
  226. { .bitrate = 480, .hw_value = 96, },
  227. { .bitrate = 540, .hw_value = 108, },
  228. };
  229. /* Set or get info from Firmware */
  230. #define MWL8K_CMD_SET 0x0001
  231. #define MWL8K_CMD_GET 0x0000
  232. /* Firmware command codes */
  233. #define MWL8K_CMD_CODE_DNLD 0x0001
  234. #define MWL8K_CMD_GET_HW_SPEC 0x0003
  235. #define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
  236. #define MWL8K_CMD_GET_STAT 0x0014
  237. #define MWL8K_CMD_RADIO_CONTROL 0x001c
  238. #define MWL8K_CMD_RF_TX_POWER 0x001e
  239. #define MWL8K_CMD_SET_PRE_SCAN 0x0107
  240. #define MWL8K_CMD_SET_POST_SCAN 0x0108
  241. #define MWL8K_CMD_SET_RF_CHANNEL 0x010a
  242. #define MWL8K_CMD_SET_AID 0x010d
  243. #define MWL8K_CMD_SET_RATE 0x0110
  244. #define MWL8K_CMD_SET_FINALIZE_JOIN 0x0111
  245. #define MWL8K_CMD_RTS_THRESHOLD 0x0113
  246. #define MWL8K_CMD_SET_SLOT 0x0114
  247. #define MWL8K_CMD_SET_EDCA_PARAMS 0x0115
  248. #define MWL8K_CMD_SET_WMM_MODE 0x0123
  249. #define MWL8K_CMD_MIMO_CONFIG 0x0125
  250. #define MWL8K_CMD_USE_FIXED_RATE 0x0126
  251. #define MWL8K_CMD_ENABLE_SNIFFER 0x0150
  252. #define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
  253. #define MWL8K_CMD_UPDATE_STADB 0x1123
  254. static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
  255. {
  256. #define MWL8K_CMDNAME(x) case MWL8K_CMD_##x: do {\
  257. snprintf(buf, bufsize, "%s", #x);\
  258. return buf;\
  259. } while (0)
  260. switch (cmd & ~0x8000) {
  261. MWL8K_CMDNAME(CODE_DNLD);
  262. MWL8K_CMDNAME(GET_HW_SPEC);
  263. MWL8K_CMDNAME(MAC_MULTICAST_ADR);
  264. MWL8K_CMDNAME(GET_STAT);
  265. MWL8K_CMDNAME(RADIO_CONTROL);
  266. MWL8K_CMDNAME(RF_TX_POWER);
  267. MWL8K_CMDNAME(SET_PRE_SCAN);
  268. MWL8K_CMDNAME(SET_POST_SCAN);
  269. MWL8K_CMDNAME(SET_RF_CHANNEL);
  270. MWL8K_CMDNAME(SET_AID);
  271. MWL8K_CMDNAME(SET_RATE);
  272. MWL8K_CMDNAME(SET_FINALIZE_JOIN);
  273. MWL8K_CMDNAME(RTS_THRESHOLD);
  274. MWL8K_CMDNAME(SET_SLOT);
  275. MWL8K_CMDNAME(SET_EDCA_PARAMS);
  276. MWL8K_CMDNAME(SET_WMM_MODE);
  277. MWL8K_CMDNAME(MIMO_CONFIG);
  278. MWL8K_CMDNAME(USE_FIXED_RATE);
  279. MWL8K_CMDNAME(ENABLE_SNIFFER);
  280. MWL8K_CMDNAME(SET_RATEADAPT_MODE);
  281. MWL8K_CMDNAME(UPDATE_STADB);
  282. default:
  283. snprintf(buf, bufsize, "0x%x", cmd);
  284. }
  285. #undef MWL8K_CMDNAME
  286. return buf;
  287. }
  288. /* Hardware and firmware reset */
  289. static void mwl8k_hw_reset(struct mwl8k_priv *priv)
  290. {
  291. iowrite32(MWL8K_H2A_INT_RESET,
  292. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  293. iowrite32(MWL8K_H2A_INT_RESET,
  294. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  295. msleep(20);
  296. }
  297. /* Release fw image */
  298. static void mwl8k_release_fw(struct firmware **fw)
  299. {
  300. if (*fw == NULL)
  301. return;
  302. release_firmware(*fw);
  303. *fw = NULL;
  304. }
  305. static void mwl8k_release_firmware(struct mwl8k_priv *priv)
  306. {
  307. mwl8k_release_fw(&priv->fw.ucode);
  308. mwl8k_release_fw(&priv->fw.helper);
  309. }
  310. /* Request fw image */
  311. static int mwl8k_request_fw(struct mwl8k_priv *priv,
  312. const char *fname, struct firmware **fw)
  313. {
  314. /* release current image */
  315. if (*fw != NULL)
  316. mwl8k_release_fw(fw);
  317. return request_firmware((const struct firmware **)fw,
  318. fname, &priv->pdev->dev);
  319. }
  320. static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num)
  321. {
  322. u8 filename[64];
  323. int rc;
  324. priv->part_num = part_num;
  325. snprintf(filename, sizeof(filename),
  326. "mwl8k/helper_%u.fw", priv->part_num);
  327. rc = mwl8k_request_fw(priv, filename, &priv->fw.helper);
  328. if (rc) {
  329. printk(KERN_ERR
  330. "%s Error requesting helper firmware file %s\n",
  331. pci_name(priv->pdev), filename);
  332. return rc;
  333. }
  334. snprintf(filename, sizeof(filename),
  335. "mwl8k/fmimage_%u.fw", priv->part_num);
  336. rc = mwl8k_request_fw(priv, filename, &priv->fw.ucode);
  337. if (rc) {
  338. printk(KERN_ERR "%s Error requesting firmware file %s\n",
  339. pci_name(priv->pdev), filename);
  340. mwl8k_release_fw(&priv->fw.helper);
  341. return rc;
  342. }
  343. return 0;
  344. }
  345. struct mwl8k_cmd_pkt {
  346. __le16 code;
  347. __le16 length;
  348. __le16 seq_num;
  349. __le16 result;
  350. char payload[0];
  351. } __attribute__((packed));
  352. /*
  353. * Firmware loading.
  354. */
  355. static int
  356. mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
  357. {
  358. void __iomem *regs = priv->regs;
  359. dma_addr_t dma_addr;
  360. int rc;
  361. int loops;
  362. dma_addr = pci_map_single(priv->pdev, data, length, PCI_DMA_TODEVICE);
  363. if (pci_dma_mapping_error(priv->pdev, dma_addr))
  364. return -ENOMEM;
  365. iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
  366. iowrite32(0, regs + MWL8K_HIU_INT_CODE);
  367. iowrite32(MWL8K_H2A_INT_DOORBELL,
  368. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  369. iowrite32(MWL8K_H2A_INT_DUMMY,
  370. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  371. rc = -ETIMEDOUT;
  372. loops = 1000;
  373. do {
  374. u32 int_code;
  375. int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
  376. if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
  377. iowrite32(0, regs + MWL8K_HIU_INT_CODE);
  378. rc = 0;
  379. break;
  380. }
  381. udelay(1);
  382. } while (--loops);
  383. pci_unmap_single(priv->pdev, dma_addr, length, PCI_DMA_TODEVICE);
  384. /*
  385. * Clear 'command done' interrupt bit.
  386. */
  387. loops = 1000;
  388. do {
  389. u32 status;
  390. status = ioread32(priv->regs +
  391. MWL8K_HIU_A2H_INTERRUPT_STATUS);
  392. if (status & MWL8K_A2H_INT_OPC_DONE) {
  393. iowrite32(~MWL8K_A2H_INT_OPC_DONE,
  394. priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  395. ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  396. break;
  397. }
  398. udelay(1);
  399. } while (--loops);
  400. return rc;
  401. }
  402. static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
  403. const u8 *data, size_t length)
  404. {
  405. struct mwl8k_cmd_pkt *cmd;
  406. int done;
  407. int rc = 0;
  408. cmd = kmalloc(sizeof(*cmd) + 256, GFP_KERNEL);
  409. if (cmd == NULL)
  410. return -ENOMEM;
  411. cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
  412. cmd->seq_num = 0;
  413. cmd->result = 0;
  414. done = 0;
  415. while (length) {
  416. int block_size = length > 256 ? 256 : length;
  417. memcpy(cmd->payload, data + done, block_size);
  418. cmd->length = cpu_to_le16(block_size);
  419. rc = mwl8k_send_fw_load_cmd(priv, cmd,
  420. sizeof(*cmd) + block_size);
  421. if (rc)
  422. break;
  423. done += block_size;
  424. length -= block_size;
  425. }
  426. if (!rc) {
  427. cmd->length = 0;
  428. rc = mwl8k_send_fw_load_cmd(priv, cmd, sizeof(*cmd));
  429. }
  430. kfree(cmd);
  431. return rc;
  432. }
  433. static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
  434. const u8 *data, size_t length)
  435. {
  436. unsigned char *buffer;
  437. int may_continue, rc = 0;
  438. u32 done, prev_block_size;
  439. buffer = kmalloc(1024, GFP_KERNEL);
  440. if (buffer == NULL)
  441. return -ENOMEM;
  442. done = 0;
  443. prev_block_size = 0;
  444. may_continue = 1000;
  445. while (may_continue > 0) {
  446. u32 block_size;
  447. block_size = ioread32(priv->regs + MWL8K_HIU_SCRATCH);
  448. if (block_size & 1) {
  449. block_size &= ~1;
  450. may_continue--;
  451. } else {
  452. done += prev_block_size;
  453. length -= prev_block_size;
  454. }
  455. if (block_size > 1024 || block_size > length) {
  456. rc = -EOVERFLOW;
  457. break;
  458. }
  459. if (length == 0) {
  460. rc = 0;
  461. break;
  462. }
  463. if (block_size == 0) {
  464. rc = -EPROTO;
  465. may_continue--;
  466. udelay(1);
  467. continue;
  468. }
  469. prev_block_size = block_size;
  470. memcpy(buffer, data + done, block_size);
  471. rc = mwl8k_send_fw_load_cmd(priv, buffer, block_size);
  472. if (rc)
  473. break;
  474. }
  475. if (!rc && length != 0)
  476. rc = -EREMOTEIO;
  477. kfree(buffer);
  478. return rc;
  479. }
  480. static int mwl8k_load_firmware(struct mwl8k_priv *priv)
  481. {
  482. int loops, rc;
  483. const u8 *ucode = priv->fw.ucode->data;
  484. size_t ucode_len = priv->fw.ucode->size;
  485. const u8 *helper = priv->fw.helper->data;
  486. size_t helper_len = priv->fw.helper->size;
  487. if (!memcmp(ucode, "\x01\x00\x00\x00", 4)) {
  488. rc = mwl8k_load_fw_image(priv, helper, helper_len);
  489. if (rc) {
  490. printk(KERN_ERR "%s: unable to load firmware "
  491. "helper image\n", pci_name(priv->pdev));
  492. return rc;
  493. }
  494. msleep(1);
  495. rc = mwl8k_feed_fw_image(priv, ucode, ucode_len);
  496. } else {
  497. rc = mwl8k_load_fw_image(priv, ucode, ucode_len);
  498. }
  499. if (rc) {
  500. printk(KERN_ERR "%s: unable to load firmware data\n",
  501. pci_name(priv->pdev));
  502. return rc;
  503. }
  504. iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
  505. msleep(1);
  506. loops = 200000;
  507. do {
  508. if (ioread32(priv->regs + MWL8K_HIU_INT_CODE)
  509. == MWL8K_FWSTA_READY)
  510. break;
  511. udelay(1);
  512. } while (--loops);
  513. return loops ? 0 : -ETIMEDOUT;
  514. }
  515. /*
  516. * Defines shared between transmission and reception.
  517. */
  518. /* HT control fields for firmware */
  519. struct ewc_ht_info {
  520. __le16 control1;
  521. __le16 control2;
  522. __le16 control3;
  523. } __attribute__((packed));
  524. /* Firmware Station database operations */
  525. #define MWL8K_STA_DB_ADD_ENTRY 0
  526. #define MWL8K_STA_DB_MODIFY_ENTRY 1
  527. #define MWL8K_STA_DB_DEL_ENTRY 2
  528. #define MWL8K_STA_DB_FLUSH 3
  529. /* Peer Entry flags - used to define the type of the peer node */
  530. #define MWL8K_PEER_TYPE_ACCESSPOINT 2
  531. #define MWL8K_IEEE_LEGACY_DATA_RATES 12
  532. #define MWL8K_MCS_BITMAP_SIZE 16
  533. struct peer_capability_info {
  534. /* Peer type - AP vs. STA. */
  535. __u8 peer_type;
  536. /* Basic 802.11 capabilities from assoc resp. */
  537. __le16 basic_caps;
  538. /* Set if peer supports 802.11n high throughput (HT). */
  539. __u8 ht_support;
  540. /* Valid if HT is supported. */
  541. __le16 ht_caps;
  542. __u8 extended_ht_caps;
  543. struct ewc_ht_info ewc_info;
  544. /* Legacy rate table. Intersection of our rates and peer rates. */
  545. __u8 legacy_rates[MWL8K_IEEE_LEGACY_DATA_RATES];
  546. /* HT rate table. Intersection of our rates and peer rates. */
  547. __u8 ht_rates[MWL8K_MCS_BITMAP_SIZE];
  548. __u8 pad[16];
  549. /* If set, interoperability mode, no proprietary extensions. */
  550. __u8 interop;
  551. __u8 pad2;
  552. __u8 station_id;
  553. __le16 amsdu_enabled;
  554. } __attribute__((packed));
  555. /* Inline functions to manipulate QoS field in data descriptor. */
  556. static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
  557. {
  558. u16 val_mask = 1 << 4;
  559. /* End of Service Period Bit 4 */
  560. return qos | val_mask;
  561. }
  562. static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
  563. {
  564. u16 val_mask = 0x3;
  565. u8 shift = 5;
  566. u16 qos_mask = ~(val_mask << shift);
  567. /* Ack Policy Bit 5-6 */
  568. return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
  569. }
  570. static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
  571. {
  572. u16 val_mask = 1 << 7;
  573. /* AMSDU present Bit 7 */
  574. return qos | val_mask;
  575. }
  576. static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
  577. {
  578. u16 val_mask = 0xff;
  579. u8 shift = 8;
  580. u16 qos_mask = ~(val_mask << shift);
  581. /* Queue Length Bits 8-15 */
  582. return (qos & qos_mask) | ((len & val_mask) << shift);
  583. }
  584. /* DMA header used by firmware and hardware. */
  585. struct mwl8k_dma_data {
  586. __le16 fwlen;
  587. struct ieee80211_hdr wh;
  588. } __attribute__((packed));
  589. /* Routines to add/remove DMA header from skb. */
  590. static inline int mwl8k_remove_dma_header(struct sk_buff *skb)
  591. {
  592. struct mwl8k_dma_data *tr = (struct mwl8k_dma_data *)(skb->data);
  593. void *dst, *src = &tr->wh;
  594. __le16 fc = tr->wh.frame_control;
  595. int hdrlen = ieee80211_hdrlen(fc);
  596. u16 space = sizeof(struct mwl8k_dma_data) - hdrlen;
  597. dst = (void *)tr + space;
  598. if (dst != src) {
  599. memmove(dst, src, hdrlen);
  600. skb_pull(skb, space);
  601. }
  602. return 0;
  603. }
  604. static inline struct sk_buff *mwl8k_add_dma_header(struct sk_buff *skb)
  605. {
  606. struct ieee80211_hdr *wh;
  607. u32 hdrlen, pktlen;
  608. struct mwl8k_dma_data *tr;
  609. wh = (struct ieee80211_hdr *)skb->data;
  610. hdrlen = ieee80211_hdrlen(wh->frame_control);
  611. pktlen = skb->len;
  612. /*
  613. * Copy up/down the 802.11 header; the firmware requires
  614. * we present a 2-byte payload length followed by a
  615. * 4-address header (w/o QoS), followed (optionally) by
  616. * any WEP/ExtIV header (but only filled in for CCMP).
  617. */
  618. if (hdrlen != sizeof(struct mwl8k_dma_data))
  619. skb_push(skb, sizeof(struct mwl8k_dma_data) - hdrlen);
  620. tr = (struct mwl8k_dma_data *)skb->data;
  621. if (wh != &tr->wh)
  622. memmove(&tr->wh, wh, hdrlen);
  623. /* Clear addr4 */
  624. memset(tr->wh.addr4, 0, ETH_ALEN);
  625. /*
  626. * Firmware length is the length of the fully formed "802.11
  627. * payload". That is, everything except for the 802.11 header.
  628. * This includes all crypto material including the MIC.
  629. */
  630. tr->fwlen = cpu_to_le16(pktlen - hdrlen);
  631. return skb;
  632. }
  633. /*
  634. * Packet reception.
  635. */
  636. #define MWL8K_RX_CTRL_OWNED_BY_HOST 0x02
  637. struct mwl8k_rx_desc {
  638. __le16 pkt_len;
  639. __u8 link_quality;
  640. __u8 noise_level;
  641. __le32 pkt_phys_addr;
  642. __le32 next_rx_desc_phys_addr;
  643. __le16 qos_control;
  644. __le16 rate_info;
  645. __le32 pad0[4];
  646. __u8 rssi;
  647. __u8 channel;
  648. __le16 pad1;
  649. __u8 rx_ctrl;
  650. __u8 rx_status;
  651. __u8 pad2[2];
  652. } __attribute__((packed));
  653. #define MWL8K_RX_DESCS 256
  654. #define MWL8K_RX_MAXSZ 3800
  655. static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
  656. {
  657. struct mwl8k_priv *priv = hw->priv;
  658. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  659. int size;
  660. int i;
  661. rxq->rx_desc_count = 0;
  662. rxq->rx_head = 0;
  663. rxq->rx_tail = 0;
  664. size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc);
  665. rxq->rx_desc_area =
  666. pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma);
  667. if (rxq->rx_desc_area == NULL) {
  668. printk(KERN_ERR "%s: failed to alloc RX descriptors\n",
  669. priv->name);
  670. return -ENOMEM;
  671. }
  672. memset(rxq->rx_desc_area, 0, size);
  673. rxq->rx_skb = kmalloc(MWL8K_RX_DESCS *
  674. sizeof(*rxq->rx_skb), GFP_KERNEL);
  675. if (rxq->rx_skb == NULL) {
  676. printk(KERN_ERR "%s: failed to alloc RX skbuff list\n",
  677. priv->name);
  678. pci_free_consistent(priv->pdev, size,
  679. rxq->rx_desc_area, rxq->rx_desc_dma);
  680. return -ENOMEM;
  681. }
  682. memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb));
  683. for (i = 0; i < MWL8K_RX_DESCS; i++) {
  684. struct mwl8k_rx_desc *rx_desc;
  685. int nexti;
  686. rx_desc = rxq->rx_desc_area + i;
  687. nexti = (i + 1) % MWL8K_RX_DESCS;
  688. rx_desc->next_rx_desc_phys_addr =
  689. cpu_to_le32(rxq->rx_desc_dma
  690. + nexti * sizeof(*rx_desc));
  691. rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
  692. }
  693. return 0;
  694. }
  695. static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
  696. {
  697. struct mwl8k_priv *priv = hw->priv;
  698. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  699. int refilled;
  700. refilled = 0;
  701. while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) {
  702. struct sk_buff *skb;
  703. int rx;
  704. skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
  705. if (skb == NULL)
  706. break;
  707. rxq->rx_desc_count++;
  708. rx = rxq->rx_tail;
  709. rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS;
  710. rxq->rx_desc_area[rx].pkt_phys_addr =
  711. cpu_to_le32(pci_map_single(priv->pdev, skb->data,
  712. MWL8K_RX_MAXSZ, DMA_FROM_DEVICE));
  713. rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ);
  714. rxq->rx_skb[rx] = skb;
  715. wmb();
  716. rxq->rx_desc_area[rx].rx_ctrl = 0;
  717. refilled++;
  718. }
  719. return refilled;
  720. }
  721. /* Must be called only when the card's reception is completely halted */
  722. static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
  723. {
  724. struct mwl8k_priv *priv = hw->priv;
  725. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  726. int i;
  727. for (i = 0; i < MWL8K_RX_DESCS; i++) {
  728. if (rxq->rx_skb[i] != NULL) {
  729. unsigned long addr;
  730. addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr);
  731. pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ,
  732. PCI_DMA_FROMDEVICE);
  733. kfree_skb(rxq->rx_skb[i]);
  734. rxq->rx_skb[i] = NULL;
  735. }
  736. }
  737. kfree(rxq->rx_skb);
  738. rxq->rx_skb = NULL;
  739. pci_free_consistent(priv->pdev,
  740. MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc),
  741. rxq->rx_desc_area, rxq->rx_desc_dma);
  742. rxq->rx_desc_area = NULL;
  743. }
  744. /*
  745. * Scan a list of BSSIDs to process for finalize join.
  746. * Allows for extension to process multiple BSSIDs.
  747. */
  748. static inline int
  749. mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
  750. {
  751. return priv->capture_beacon &&
  752. ieee80211_is_beacon(wh->frame_control) &&
  753. !compare_ether_addr(wh->addr3, priv->capture_bssid);
  754. }
  755. static inline void mwl8k_save_beacon(struct mwl8k_priv *priv,
  756. struct sk_buff *skb)
  757. {
  758. priv->capture_beacon = false;
  759. memset(priv->capture_bssid, 0, ETH_ALEN);
  760. /*
  761. * Use GFP_ATOMIC as rxq_process is called from
  762. * the primary interrupt handler, memory allocation call
  763. * must not sleep.
  764. */
  765. priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
  766. if (priv->beacon_skb != NULL)
  767. queue_work(priv->config_wq,
  768. &priv->finalize_join_worker);
  769. }
  770. static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
  771. {
  772. struct mwl8k_priv *priv = hw->priv;
  773. struct mwl8k_rx_queue *rxq = priv->rxq + index;
  774. int processed;
  775. processed = 0;
  776. while (rxq->rx_desc_count && limit--) {
  777. struct mwl8k_rx_desc *rx_desc;
  778. struct sk_buff *skb;
  779. struct ieee80211_rx_status status;
  780. unsigned long addr;
  781. struct ieee80211_hdr *wh;
  782. rx_desc = rxq->rx_desc_area + rxq->rx_head;
  783. if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST))
  784. break;
  785. rmb();
  786. skb = rxq->rx_skb[rxq->rx_head];
  787. if (skb == NULL)
  788. break;
  789. rxq->rx_skb[rxq->rx_head] = NULL;
  790. rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS;
  791. rxq->rx_desc_count--;
  792. addr = le32_to_cpu(rx_desc->pkt_phys_addr);
  793. pci_unmap_single(priv->pdev, addr,
  794. MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
  795. skb_put(skb, le16_to_cpu(rx_desc->pkt_len));
  796. if (mwl8k_remove_dma_header(skb)) {
  797. dev_kfree_skb(skb);
  798. continue;
  799. }
  800. wh = (struct ieee80211_hdr *)skb->data;
  801. /*
  802. * Check for pending join operation. save a copy of
  803. * the beacon and schedule a tasklet to send finalize
  804. * join command to the firmware.
  805. */
  806. if (mwl8k_capture_bssid(priv, wh))
  807. mwl8k_save_beacon(priv, skb);
  808. memset(&status, 0, sizeof(status));
  809. status.mactime = 0;
  810. status.signal = -rx_desc->rssi;
  811. status.noise = -rx_desc->noise_level;
  812. status.qual = rx_desc->link_quality;
  813. status.antenna = 1;
  814. status.rate_idx = 1;
  815. status.flag = 0;
  816. status.band = IEEE80211_BAND_2GHZ;
  817. status.freq = ieee80211_channel_to_frequency(rx_desc->channel);
  818. memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
  819. ieee80211_rx_irqsafe(hw, skb);
  820. processed++;
  821. }
  822. return processed;
  823. }
  824. /*
  825. * Packet transmission.
  826. */
  827. /* Transmit queue assignment. */
  828. enum {
  829. MWL8K_WME_AC_BK = 0, /* background access */
  830. MWL8K_WME_AC_BE = 1, /* best effort access */
  831. MWL8K_WME_AC_VI = 2, /* video access */
  832. MWL8K_WME_AC_VO = 3, /* voice access */
  833. };
  834. /* Transmit packet ACK policy */
  835. #define MWL8K_TXD_ACK_POLICY_NORMAL 0
  836. #define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
  837. #define GET_TXQ(_ac) (\
  838. ((_ac) == WME_AC_VO) ? MWL8K_WME_AC_VO : \
  839. ((_ac) == WME_AC_VI) ? MWL8K_WME_AC_VI : \
  840. ((_ac) == WME_AC_BK) ? MWL8K_WME_AC_BK : \
  841. MWL8K_WME_AC_BE)
  842. #define MWL8K_TXD_STATUS_OK 0x00000001
  843. #define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
  844. #define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
  845. #define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
  846. #define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
  847. struct mwl8k_tx_desc {
  848. __le32 status;
  849. __u8 data_rate;
  850. __u8 tx_priority;
  851. __le16 qos_control;
  852. __le32 pkt_phys_addr;
  853. __le16 pkt_len;
  854. __u8 dest_MAC_addr[ETH_ALEN];
  855. __le32 next_tx_desc_phys_addr;
  856. __le32 reserved;
  857. __le16 rate_info;
  858. __u8 peer_id;
  859. __u8 tx_frag_cnt;
  860. } __attribute__((packed));
  861. #define MWL8K_TX_DESCS 128
  862. static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
  863. {
  864. struct mwl8k_priv *priv = hw->priv;
  865. struct mwl8k_tx_queue *txq = priv->txq + index;
  866. int size;
  867. int i;
  868. memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats));
  869. txq->tx_stats.limit = MWL8K_TX_DESCS;
  870. txq->tx_head = 0;
  871. txq->tx_tail = 0;
  872. size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
  873. txq->tx_desc_area =
  874. pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma);
  875. if (txq->tx_desc_area == NULL) {
  876. printk(KERN_ERR "%s: failed to alloc TX descriptors\n",
  877. priv->name);
  878. return -ENOMEM;
  879. }
  880. memset(txq->tx_desc_area, 0, size);
  881. txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb),
  882. GFP_KERNEL);
  883. if (txq->tx_skb == NULL) {
  884. printk(KERN_ERR "%s: failed to alloc TX skbuff list\n",
  885. priv->name);
  886. pci_free_consistent(priv->pdev, size,
  887. txq->tx_desc_area, txq->tx_desc_dma);
  888. return -ENOMEM;
  889. }
  890. memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb));
  891. for (i = 0; i < MWL8K_TX_DESCS; i++) {
  892. struct mwl8k_tx_desc *tx_desc;
  893. int nexti;
  894. tx_desc = txq->tx_desc_area + i;
  895. nexti = (i + 1) % MWL8K_TX_DESCS;
  896. tx_desc->status = 0;
  897. tx_desc->next_tx_desc_phys_addr =
  898. cpu_to_le32(txq->tx_desc_dma +
  899. nexti * sizeof(*tx_desc));
  900. }
  901. return 0;
  902. }
  903. static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
  904. {
  905. iowrite32(MWL8K_H2A_INT_PPA_READY,
  906. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  907. iowrite32(MWL8K_H2A_INT_DUMMY,
  908. priv->regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  909. ioread32(priv->regs + MWL8K_HIU_INT_CODE);
  910. }
  911. static inline int mwl8k_txq_busy(struct mwl8k_priv *priv)
  912. {
  913. return priv->pending_tx_pkts;
  914. }
  915. struct mwl8k_txq_info {
  916. u32 fw_owned;
  917. u32 drv_owned;
  918. u32 unused;
  919. u32 len;
  920. u32 head;
  921. u32 tail;
  922. };
  923. static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
  924. struct mwl8k_txq_info txinfo[],
  925. u32 num_queues)
  926. {
  927. int count, desc, status;
  928. struct mwl8k_tx_queue *txq;
  929. struct mwl8k_tx_desc *tx_desc;
  930. int ndescs = 0;
  931. memset(txinfo, 0, num_queues * sizeof(struct mwl8k_txq_info));
  932. spin_lock_bh(&priv->tx_lock);
  933. for (count = 0; count < num_queues; count++) {
  934. txq = priv->txq + count;
  935. txinfo[count].len = txq->tx_stats.len;
  936. txinfo[count].head = txq->tx_head;
  937. txinfo[count].tail = txq->tx_tail;
  938. for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
  939. tx_desc = txq->tx_desc_area + desc;
  940. status = le32_to_cpu(tx_desc->status);
  941. if (status & MWL8K_TXD_STATUS_FW_OWNED)
  942. txinfo[count].fw_owned++;
  943. else
  944. txinfo[count].drv_owned++;
  945. if (tx_desc->pkt_len == 0)
  946. txinfo[count].unused++;
  947. }
  948. }
  949. spin_unlock_bh(&priv->tx_lock);
  950. return ndescs;
  951. }
  952. static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw, u32 delay_ms)
  953. {
  954. struct mwl8k_priv *priv = hw->priv;
  955. DECLARE_COMPLETION_ONSTACK(cmd_wait);
  956. u32 count;
  957. unsigned long timeout;
  958. might_sleep();
  959. if (priv->tx_wait != NULL)
  960. printk(KERN_ERR "WARNING Previous TXWaitEmpty instance\n");
  961. spin_lock_bh(&priv->tx_lock);
  962. count = mwl8k_txq_busy(priv);
  963. if (count) {
  964. priv->tx_wait = &cmd_wait;
  965. if (priv->radio_on)
  966. mwl8k_tx_start(priv);
  967. }
  968. spin_unlock_bh(&priv->tx_lock);
  969. if (count) {
  970. struct mwl8k_txq_info txinfo[4];
  971. int index;
  972. int newcount;
  973. timeout = wait_for_completion_timeout(&cmd_wait,
  974. msecs_to_jiffies(delay_ms));
  975. if (timeout)
  976. return 0;
  977. spin_lock_bh(&priv->tx_lock);
  978. priv->tx_wait = NULL;
  979. newcount = mwl8k_txq_busy(priv);
  980. spin_unlock_bh(&priv->tx_lock);
  981. printk(KERN_ERR "%s(%u) TIMEDOUT:%ums Pend:%u-->%u\n",
  982. __func__, __LINE__, delay_ms, count, newcount);
  983. mwl8k_scan_tx_ring(priv, txinfo, 4);
  984. for (index = 0; index < 4; index++)
  985. printk(KERN_ERR
  986. "TXQ:%u L:%u H:%u T:%u FW:%u DRV:%u U:%u\n",
  987. index,
  988. txinfo[index].len,
  989. txinfo[index].head,
  990. txinfo[index].tail,
  991. txinfo[index].fw_owned,
  992. txinfo[index].drv_owned,
  993. txinfo[index].unused);
  994. return -ETIMEDOUT;
  995. }
  996. return 0;
  997. }
  998. #define MWL8K_TXD_SUCCESS(status) \
  999. ((status) & (MWL8K_TXD_STATUS_OK | \
  1000. MWL8K_TXD_STATUS_OK_RETRY | \
  1001. MWL8K_TXD_STATUS_OK_MORE_RETRY))
  1002. static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
  1003. {
  1004. struct mwl8k_priv *priv = hw->priv;
  1005. struct mwl8k_tx_queue *txq = priv->txq + index;
  1006. int wake = 0;
  1007. while (txq->tx_stats.len > 0) {
  1008. int tx;
  1009. int rc;
  1010. struct mwl8k_tx_desc *tx_desc;
  1011. unsigned long addr;
  1012. int size;
  1013. struct sk_buff *skb;
  1014. struct ieee80211_tx_info *info;
  1015. u32 status;
  1016. rc = 0;
  1017. tx = txq->tx_head;
  1018. tx_desc = txq->tx_desc_area + tx;
  1019. status = le32_to_cpu(tx_desc->status);
  1020. if (status & MWL8K_TXD_STATUS_FW_OWNED) {
  1021. if (!force)
  1022. break;
  1023. tx_desc->status &=
  1024. ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
  1025. }
  1026. txq->tx_head = (tx + 1) % MWL8K_TX_DESCS;
  1027. BUG_ON(txq->tx_stats.len == 0);
  1028. txq->tx_stats.len--;
  1029. priv->pending_tx_pkts--;
  1030. addr = le32_to_cpu(tx_desc->pkt_phys_addr);
  1031. size = le16_to_cpu(tx_desc->pkt_len);
  1032. skb = txq->tx_skb[tx].skb;
  1033. txq->tx_skb[tx].skb = NULL;
  1034. BUG_ON(skb == NULL);
  1035. pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
  1036. rc = mwl8k_remove_dma_header(skb);
  1037. /* Mark descriptor as unused */
  1038. tx_desc->pkt_phys_addr = 0;
  1039. tx_desc->pkt_len = 0;
  1040. if (txq->tx_skb[tx].clone) {
  1041. /* Replace with original skb
  1042. * before returning to stack
  1043. * as buffer has been cloned
  1044. */
  1045. dev_kfree_skb(skb);
  1046. skb = txq->tx_skb[tx].clone;
  1047. txq->tx_skb[tx].clone = NULL;
  1048. }
  1049. if (rc) {
  1050. /* Something has gone wrong here.
  1051. * Failed to remove DMA header.
  1052. * Print error message and drop packet.
  1053. */
  1054. printk(KERN_ERR "%s: Error removing DMA header from "
  1055. "tx skb 0x%p.\n", priv->name, skb);
  1056. dev_kfree_skb(skb);
  1057. continue;
  1058. }
  1059. info = IEEE80211_SKB_CB(skb);
  1060. ieee80211_tx_info_clear_status(info);
  1061. if (MWL8K_TXD_SUCCESS(status))
  1062. info->flags |= IEEE80211_TX_STAT_ACK;
  1063. ieee80211_tx_status_irqsafe(hw, skb);
  1064. wake = !priv->inconfig && priv->radio_on;
  1065. }
  1066. if (wake)
  1067. ieee80211_wake_queue(hw, index);
  1068. }
  1069. /* must be called only when the card's transmit is completely halted */
  1070. static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
  1071. {
  1072. struct mwl8k_priv *priv = hw->priv;
  1073. struct mwl8k_tx_queue *txq = priv->txq + index;
  1074. mwl8k_txq_reclaim(hw, index, 1);
  1075. kfree(txq->tx_skb);
  1076. txq->tx_skb = NULL;
  1077. pci_free_consistent(priv->pdev,
  1078. MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
  1079. txq->tx_desc_area, txq->tx_desc_dma);
  1080. txq->tx_desc_area = NULL;
  1081. }
  1082. static int
  1083. mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
  1084. {
  1085. struct mwl8k_priv *priv = hw->priv;
  1086. struct ieee80211_tx_info *tx_info;
  1087. struct ieee80211_hdr *wh;
  1088. struct mwl8k_tx_queue *txq;
  1089. struct mwl8k_tx_desc *tx;
  1090. struct mwl8k_dma_data *tr;
  1091. struct mwl8k_vif *mwl8k_vif;
  1092. struct sk_buff *org_skb = skb;
  1093. dma_addr_t dma;
  1094. u16 qos = 0;
  1095. bool qosframe = false, ampduframe = false;
  1096. bool mcframe = false, eapolframe = false;
  1097. bool amsduframe = false;
  1098. __le16 fc;
  1099. txq = priv->txq + index;
  1100. tx = txq->tx_desc_area + txq->tx_tail;
  1101. BUG_ON(txq->tx_skb[txq->tx_tail].skb != NULL);
  1102. /*
  1103. * Append HW DMA header to start of packet. Drop packet if
  1104. * there is not enough space or a failure to unshare/unclone
  1105. * the skb.
  1106. */
  1107. skb = mwl8k_add_dma_header(skb);
  1108. if (skb == NULL) {
  1109. printk(KERN_DEBUG "%s: failed to prepend HW DMA "
  1110. "header, dropping TX frame.\n", priv->name);
  1111. dev_kfree_skb(org_skb);
  1112. return NETDEV_TX_OK;
  1113. }
  1114. tx_info = IEEE80211_SKB_CB(skb);
  1115. mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
  1116. tr = (struct mwl8k_dma_data *)skb->data;
  1117. wh = &tr->wh;
  1118. fc = wh->frame_control;
  1119. qosframe = ieee80211_is_data_qos(fc);
  1120. mcframe = is_multicast_ether_addr(wh->addr1);
  1121. ampduframe = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
  1122. if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
  1123. u16 seqno = mwl8k_vif->seqno;
  1124. wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  1125. wh->seq_ctrl |= cpu_to_le16(seqno << 4);
  1126. mwl8k_vif->seqno = seqno++ % 4096;
  1127. }
  1128. if (qosframe)
  1129. qos = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(wh)));
  1130. dma = pci_map_single(priv->pdev, skb->data,
  1131. skb->len, PCI_DMA_TODEVICE);
  1132. if (pci_dma_mapping_error(priv->pdev, dma)) {
  1133. printk(KERN_DEBUG "%s: failed to dma map skb, "
  1134. "dropping TX frame.\n", priv->name);
  1135. if (org_skb != NULL)
  1136. dev_kfree_skb(org_skb);
  1137. if (skb != NULL)
  1138. dev_kfree_skb(skb);
  1139. return NETDEV_TX_OK;
  1140. }
  1141. /* Set desc header, cpu bit order. */
  1142. tx->status = 0;
  1143. tx->data_rate = 0;
  1144. tx->tx_priority = index;
  1145. tx->qos_control = 0;
  1146. tx->rate_info = 0;
  1147. tx->peer_id = mwl8k_vif->peer_id;
  1148. amsduframe = !!(qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT);
  1149. /* Setup firmware control bit fields for each frame type. */
  1150. if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)) {
  1151. tx->data_rate = 0;
  1152. qos = mwl8k_qos_setbit_eosp(qos);
  1153. /* Set Queue size to unspecified */
  1154. qos = mwl8k_qos_setbit_qlen(qos, 0xff);
  1155. } else if (ieee80211_is_data(fc)) {
  1156. tx->data_rate = 1;
  1157. if (mcframe)
  1158. tx->status |= MWL8K_TXD_STATUS_MULTICAST_TX;
  1159. /*
  1160. * Tell firmware to not send EAPOL pkts in an
  1161. * aggregate. Verify against mac80211 tx path. If
  1162. * stack turns off AMPDU for an EAPOL frame this
  1163. * check will be removed.
  1164. */
  1165. if (eapolframe) {
  1166. qos = mwl8k_qos_setbit_ack(qos,
  1167. MWL8K_TXD_ACK_POLICY_NORMAL);
  1168. } else {
  1169. /* Send pkt in an aggregate if AMPDU frame. */
  1170. if (ampduframe)
  1171. qos = mwl8k_qos_setbit_ack(qos,
  1172. MWL8K_TXD_ACK_POLICY_BLOCKACK);
  1173. else
  1174. qos = mwl8k_qos_setbit_ack(qos,
  1175. MWL8K_TXD_ACK_POLICY_NORMAL);
  1176. if (amsduframe)
  1177. qos = mwl8k_qos_setbit_amsdu(qos);
  1178. }
  1179. }
  1180. /* Convert to little endian */
  1181. tx->qos_control = cpu_to_le16(qos);
  1182. tx->status = cpu_to_le32(tx->status);
  1183. tx->pkt_phys_addr = cpu_to_le32(dma);
  1184. tx->pkt_len = cpu_to_le16(skb->len);
  1185. txq->tx_skb[txq->tx_tail].skb = skb;
  1186. txq->tx_skb[txq->tx_tail].clone =
  1187. skb == org_skb ? NULL : org_skb;
  1188. spin_lock_bh(&priv->tx_lock);
  1189. tx->status = cpu_to_le32(MWL8K_TXD_STATUS_OK |
  1190. MWL8K_TXD_STATUS_FW_OWNED);
  1191. wmb();
  1192. txq->tx_stats.len++;
  1193. priv->pending_tx_pkts++;
  1194. txq->tx_stats.count++;
  1195. txq->tx_tail++;
  1196. if (txq->tx_tail == MWL8K_TX_DESCS)
  1197. txq->tx_tail = 0;
  1198. if (txq->tx_head == txq->tx_tail)
  1199. ieee80211_stop_queue(hw, index);
  1200. if (priv->inconfig) {
  1201. /*
  1202. * Silently queue packet when we are in the middle of
  1203. * a config cycle. Notify firmware only if we are
  1204. * waiting for TXQs to empty. If a packet is sent
  1205. * before .config() is complete, perhaps it is better
  1206. * to drop the packet, as the channel is being changed
  1207. * and the packet will end up on the wrong channel.
  1208. */
  1209. printk(KERN_ERR "%s(): WARNING TX activity while "
  1210. "in config\n", __func__);
  1211. if (priv->tx_wait != NULL)
  1212. mwl8k_tx_start(priv);
  1213. } else
  1214. mwl8k_tx_start(priv);
  1215. spin_unlock_bh(&priv->tx_lock);
  1216. return NETDEV_TX_OK;
  1217. }
  1218. /*
  1219. * Command processing.
  1220. */
  1221. /* Timeout firmware commands after 2000ms */
  1222. #define MWL8K_CMD_TIMEOUT_MS 2000
  1223. static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
  1224. {
  1225. DECLARE_COMPLETION_ONSTACK(cmd_wait);
  1226. struct mwl8k_priv *priv = hw->priv;
  1227. void __iomem *regs = priv->regs;
  1228. dma_addr_t dma_addr;
  1229. unsigned int dma_size;
  1230. int rc;
  1231. unsigned long timeout = 0;
  1232. u8 buf[32];
  1233. cmd->result = 0xFFFF;
  1234. dma_size = le16_to_cpu(cmd->length);
  1235. dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
  1236. PCI_DMA_BIDIRECTIONAL);
  1237. if (pci_dma_mapping_error(priv->pdev, dma_addr))
  1238. return -ENOMEM;
  1239. if (priv->hostcmd_wait != NULL)
  1240. printk(KERN_ERR "WARNING host command in progress\n");
  1241. spin_lock_irq(&priv->fw_lock);
  1242. priv->hostcmd_wait = &cmd_wait;
  1243. iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
  1244. iowrite32(MWL8K_H2A_INT_DOORBELL,
  1245. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  1246. iowrite32(MWL8K_H2A_INT_DUMMY,
  1247. regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
  1248. spin_unlock_irq(&priv->fw_lock);
  1249. timeout = wait_for_completion_timeout(&cmd_wait,
  1250. msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
  1251. pci_unmap_single(priv->pdev, dma_addr, dma_size,
  1252. PCI_DMA_BIDIRECTIONAL);
  1253. if (!timeout) {
  1254. spin_lock_irq(&priv->fw_lock);
  1255. priv->hostcmd_wait = NULL;
  1256. spin_unlock_irq(&priv->fw_lock);
  1257. printk(KERN_ERR "%s: Command %s timeout after %u ms\n",
  1258. priv->name,
  1259. mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
  1260. MWL8K_CMD_TIMEOUT_MS);
  1261. rc = -ETIMEDOUT;
  1262. } else {
  1263. rc = cmd->result ? -EINVAL : 0;
  1264. if (rc)
  1265. printk(KERN_ERR "%s: Command %s error 0x%x\n",
  1266. priv->name,
  1267. mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
  1268. cmd->result);
  1269. }
  1270. return rc;
  1271. }
  1272. /*
  1273. * GET_HW_SPEC.
  1274. */
  1275. struct mwl8k_cmd_get_hw_spec {
  1276. struct mwl8k_cmd_pkt header;
  1277. __u8 hw_rev;
  1278. __u8 host_interface;
  1279. __le16 num_mcaddrs;
  1280. __u8 perm_addr[ETH_ALEN];
  1281. __le16 region_code;
  1282. __le32 fw_rev;
  1283. __le32 ps_cookie;
  1284. __le32 caps;
  1285. __u8 mcs_bitmap[16];
  1286. __le32 rx_queue_ptr;
  1287. __le32 num_tx_queues;
  1288. __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
  1289. __le32 caps2;
  1290. __le32 num_tx_desc_per_queue;
  1291. __le32 total_rx_desc;
  1292. } __attribute__((packed));
  1293. static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
  1294. {
  1295. struct mwl8k_priv *priv = hw->priv;
  1296. struct mwl8k_cmd_get_hw_spec *cmd;
  1297. int rc;
  1298. int i;
  1299. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1300. if (cmd == NULL)
  1301. return -ENOMEM;
  1302. cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
  1303. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1304. memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
  1305. cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
  1306. cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma);
  1307. cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
  1308. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  1309. cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma);
  1310. cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
  1311. cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS);
  1312. rc = mwl8k_post_cmd(hw, &cmd->header);
  1313. if (!rc) {
  1314. SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
  1315. priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
  1316. priv->fw_rev = le32_to_cpu(cmd->fw_rev);
  1317. priv->hw_rev = cmd->hw_rev;
  1318. }
  1319. kfree(cmd);
  1320. return rc;
  1321. }
  1322. /*
  1323. * CMD_MAC_MULTICAST_ADR.
  1324. */
  1325. struct mwl8k_cmd_mac_multicast_adr {
  1326. struct mwl8k_cmd_pkt header;
  1327. __le16 action;
  1328. __le16 numaddr;
  1329. __u8 addr[0][ETH_ALEN];
  1330. };
  1331. #define MWL8K_ENABLE_RX_MULTICAST 0x000F
  1332. static int mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
  1333. int mc_count,
  1334. struct dev_addr_list *mclist)
  1335. {
  1336. struct mwl8k_cmd_mac_multicast_adr *cmd;
  1337. int index = 0;
  1338. int rc;
  1339. int size = sizeof(*cmd) + mc_count * ETH_ALEN;
  1340. cmd = kzalloc(size, GFP_KERNEL);
  1341. if (cmd == NULL)
  1342. return -ENOMEM;
  1343. cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR);
  1344. cmd->header.length = cpu_to_le16(size);
  1345. cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
  1346. cmd->numaddr = cpu_to_le16(mc_count);
  1347. while (index < mc_count && mclist) {
  1348. if (mclist->da_addrlen != ETH_ALEN) {
  1349. rc = -EINVAL;
  1350. goto mwl8k_cmd_mac_multicast_adr_exit;
  1351. }
  1352. memcpy(cmd->addr[index++], mclist->da_addr, ETH_ALEN);
  1353. mclist = mclist->next;
  1354. }
  1355. rc = mwl8k_post_cmd(hw, &cmd->header);
  1356. mwl8k_cmd_mac_multicast_adr_exit:
  1357. kfree(cmd);
  1358. return rc;
  1359. }
  1360. /*
  1361. * CMD_802_11_GET_STAT.
  1362. */
  1363. struct mwl8k_cmd_802_11_get_stat {
  1364. struct mwl8k_cmd_pkt header;
  1365. __le16 action;
  1366. __le32 stats[64];
  1367. } __attribute__((packed));
  1368. #define MWL8K_STAT_ACK_FAILURE 9
  1369. #define MWL8K_STAT_RTS_FAILURE 12
  1370. #define MWL8K_STAT_FCS_ERROR 24
  1371. #define MWL8K_STAT_RTS_SUCCESS 11
  1372. static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
  1373. struct ieee80211_low_level_stats *stats)
  1374. {
  1375. struct mwl8k_cmd_802_11_get_stat *cmd;
  1376. int rc;
  1377. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1378. if (cmd == NULL)
  1379. return -ENOMEM;
  1380. cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT);
  1381. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1382. cmd->action = cpu_to_le16(MWL8K_CMD_GET);
  1383. rc = mwl8k_post_cmd(hw, &cmd->header);
  1384. if (!rc) {
  1385. stats->dot11ACKFailureCount =
  1386. le32_to_cpu(cmd->stats[MWL8K_STAT_ACK_FAILURE]);
  1387. stats->dot11RTSFailureCount =
  1388. le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_FAILURE]);
  1389. stats->dot11FCSErrorCount =
  1390. le32_to_cpu(cmd->stats[MWL8K_STAT_FCS_ERROR]);
  1391. stats->dot11RTSSuccessCount =
  1392. le32_to_cpu(cmd->stats[MWL8K_STAT_RTS_SUCCESS]);
  1393. }
  1394. kfree(cmd);
  1395. return rc;
  1396. }
  1397. /*
  1398. * CMD_802_11_RADIO_CONTROL.
  1399. */
  1400. struct mwl8k_cmd_802_11_radio_control {
  1401. struct mwl8k_cmd_pkt header;
  1402. __le16 action;
  1403. __le16 control;
  1404. __le16 radio_on;
  1405. } __attribute__((packed));
  1406. static int
  1407. mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
  1408. {
  1409. struct mwl8k_priv *priv = hw->priv;
  1410. struct mwl8k_cmd_802_11_radio_control *cmd;
  1411. int rc;
  1412. if (enable == priv->radio_on && !force)
  1413. return 0;
  1414. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1415. if (cmd == NULL)
  1416. return -ENOMEM;
  1417. cmd->header.code = cpu_to_le16(MWL8K_CMD_RADIO_CONTROL);
  1418. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1419. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1420. cmd->control = cpu_to_le16(priv->radio_short_preamble ? 3 : 1);
  1421. cmd->radio_on = cpu_to_le16(enable ? 0x0001 : 0x0000);
  1422. rc = mwl8k_post_cmd(hw, &cmd->header);
  1423. kfree(cmd);
  1424. if (!rc)
  1425. priv->radio_on = enable;
  1426. return rc;
  1427. }
  1428. static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw)
  1429. {
  1430. return mwl8k_cmd_802_11_radio_control(hw, 0, 0);
  1431. }
  1432. static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw)
  1433. {
  1434. return mwl8k_cmd_802_11_radio_control(hw, 1, 0);
  1435. }
  1436. static int
  1437. mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
  1438. {
  1439. struct mwl8k_priv *priv;
  1440. if (hw == NULL || hw->priv == NULL)
  1441. return -EINVAL;
  1442. priv = hw->priv;
  1443. priv->radio_short_preamble = short_preamble;
  1444. return mwl8k_cmd_802_11_radio_control(hw, 1, 1);
  1445. }
  1446. /*
  1447. * CMD_802_11_RF_TX_POWER.
  1448. */
  1449. #define MWL8K_TX_POWER_LEVEL_TOTAL 8
  1450. struct mwl8k_cmd_802_11_rf_tx_power {
  1451. struct mwl8k_cmd_pkt header;
  1452. __le16 action;
  1453. __le16 support_level;
  1454. __le16 current_level;
  1455. __le16 reserved;
  1456. __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
  1457. } __attribute__((packed));
  1458. static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
  1459. {
  1460. struct mwl8k_cmd_802_11_rf_tx_power *cmd;
  1461. int rc;
  1462. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1463. if (cmd == NULL)
  1464. return -ENOMEM;
  1465. cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_TX_POWER);
  1466. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1467. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1468. cmd->support_level = cpu_to_le16(dBm);
  1469. rc = mwl8k_post_cmd(hw, &cmd->header);
  1470. kfree(cmd);
  1471. return rc;
  1472. }
  1473. /*
  1474. * CMD_SET_PRE_SCAN.
  1475. */
  1476. struct mwl8k_cmd_set_pre_scan {
  1477. struct mwl8k_cmd_pkt header;
  1478. } __attribute__((packed));
  1479. static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
  1480. {
  1481. struct mwl8k_cmd_set_pre_scan *cmd;
  1482. int rc;
  1483. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1484. if (cmd == NULL)
  1485. return -ENOMEM;
  1486. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_PRE_SCAN);
  1487. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1488. rc = mwl8k_post_cmd(hw, &cmd->header);
  1489. kfree(cmd);
  1490. return rc;
  1491. }
  1492. /*
  1493. * CMD_SET_POST_SCAN.
  1494. */
  1495. struct mwl8k_cmd_set_post_scan {
  1496. struct mwl8k_cmd_pkt header;
  1497. __le32 isibss;
  1498. __u8 bssid[ETH_ALEN];
  1499. } __attribute__((packed));
  1500. static int
  1501. mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
  1502. {
  1503. struct mwl8k_cmd_set_post_scan *cmd;
  1504. int rc;
  1505. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1506. if (cmd == NULL)
  1507. return -ENOMEM;
  1508. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_POST_SCAN);
  1509. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1510. cmd->isibss = 0;
  1511. memcpy(cmd->bssid, mac, ETH_ALEN);
  1512. rc = mwl8k_post_cmd(hw, &cmd->header);
  1513. kfree(cmd);
  1514. return rc;
  1515. }
  1516. /*
  1517. * CMD_SET_RF_CHANNEL.
  1518. */
  1519. struct mwl8k_cmd_set_rf_channel {
  1520. struct mwl8k_cmd_pkt header;
  1521. __le16 action;
  1522. __u8 current_channel;
  1523. __le32 channel_flags;
  1524. } __attribute__((packed));
  1525. static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
  1526. struct ieee80211_channel *channel)
  1527. {
  1528. struct mwl8k_cmd_set_rf_channel *cmd;
  1529. int rc;
  1530. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1531. if (cmd == NULL)
  1532. return -ENOMEM;
  1533. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RF_CHANNEL);
  1534. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1535. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1536. cmd->current_channel = channel->hw_value;
  1537. if (channel->band == IEEE80211_BAND_2GHZ)
  1538. cmd->channel_flags = cpu_to_le32(0x00000081);
  1539. else
  1540. cmd->channel_flags = cpu_to_le32(0x00000000);
  1541. rc = mwl8k_post_cmd(hw, &cmd->header);
  1542. kfree(cmd);
  1543. return rc;
  1544. }
  1545. /*
  1546. * CMD_SET_SLOT.
  1547. */
  1548. struct mwl8k_cmd_set_slot {
  1549. struct mwl8k_cmd_pkt header;
  1550. __le16 action;
  1551. __u8 short_slot;
  1552. } __attribute__((packed));
  1553. static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
  1554. {
  1555. struct mwl8k_cmd_set_slot *cmd;
  1556. int rc;
  1557. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1558. if (cmd == NULL)
  1559. return -ENOMEM;
  1560. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
  1561. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1562. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1563. cmd->short_slot = short_slot_time;
  1564. rc = mwl8k_post_cmd(hw, &cmd->header);
  1565. kfree(cmd);
  1566. return rc;
  1567. }
  1568. /*
  1569. * CMD_MIMO_CONFIG.
  1570. */
  1571. struct mwl8k_cmd_mimo_config {
  1572. struct mwl8k_cmd_pkt header;
  1573. __le32 action;
  1574. __u8 rx_antenna_map;
  1575. __u8 tx_antenna_map;
  1576. } __attribute__((packed));
  1577. static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
  1578. {
  1579. struct mwl8k_cmd_mimo_config *cmd;
  1580. int rc;
  1581. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1582. if (cmd == NULL)
  1583. return -ENOMEM;
  1584. cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
  1585. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1586. cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
  1587. cmd->rx_antenna_map = rx;
  1588. cmd->tx_antenna_map = tx;
  1589. rc = mwl8k_post_cmd(hw, &cmd->header);
  1590. kfree(cmd);
  1591. return rc;
  1592. }
  1593. /*
  1594. * CMD_ENABLE_SNIFFER.
  1595. */
  1596. struct mwl8k_cmd_enable_sniffer {
  1597. struct mwl8k_cmd_pkt header;
  1598. __le32 action;
  1599. } __attribute__((packed));
  1600. static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
  1601. {
  1602. struct mwl8k_cmd_enable_sniffer *cmd;
  1603. int rc;
  1604. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1605. if (cmd == NULL)
  1606. return -ENOMEM;
  1607. cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
  1608. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1609. cmd->action = cpu_to_le32(!!enable);
  1610. rc = mwl8k_post_cmd(hw, &cmd->header);
  1611. kfree(cmd);
  1612. return rc;
  1613. }
  1614. /*
  1615. * CMD_SET_RATEADAPT_MODE.
  1616. */
  1617. struct mwl8k_cmd_set_rate_adapt_mode {
  1618. struct mwl8k_cmd_pkt header;
  1619. __le16 action;
  1620. __le16 mode;
  1621. } __attribute__((packed));
  1622. static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
  1623. {
  1624. struct mwl8k_cmd_set_rate_adapt_mode *cmd;
  1625. int rc;
  1626. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1627. if (cmd == NULL)
  1628. return -ENOMEM;
  1629. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
  1630. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1631. cmd->action = cpu_to_le16(MWL8K_CMD_SET);
  1632. cmd->mode = cpu_to_le16(mode);
  1633. rc = mwl8k_post_cmd(hw, &cmd->header);
  1634. kfree(cmd);
  1635. return rc;
  1636. }
  1637. /*
  1638. * CMD_SET_WMM_MODE.
  1639. */
  1640. struct mwl8k_cmd_set_wmm {
  1641. struct mwl8k_cmd_pkt header;
  1642. __le16 action;
  1643. } __attribute__((packed));
  1644. static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable)
  1645. {
  1646. struct mwl8k_priv *priv = hw->priv;
  1647. struct mwl8k_cmd_set_wmm *cmd;
  1648. int rc;
  1649. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1650. if (cmd == NULL)
  1651. return -ENOMEM;
  1652. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
  1653. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1654. cmd->action = cpu_to_le16(!!enable);
  1655. rc = mwl8k_post_cmd(hw, &cmd->header);
  1656. kfree(cmd);
  1657. if (!rc)
  1658. priv->wmm_enabled = enable;
  1659. return rc;
  1660. }
  1661. /*
  1662. * CMD_SET_RTS_THRESHOLD.
  1663. */
  1664. struct mwl8k_cmd_rts_threshold {
  1665. struct mwl8k_cmd_pkt header;
  1666. __le16 action;
  1667. __le16 threshold;
  1668. } __attribute__((packed));
  1669. static int mwl8k_rts_threshold(struct ieee80211_hw *hw,
  1670. u16 action, u16 *threshold)
  1671. {
  1672. struct mwl8k_cmd_rts_threshold *cmd;
  1673. int rc;
  1674. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1675. if (cmd == NULL)
  1676. return -ENOMEM;
  1677. cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
  1678. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1679. cmd->action = cpu_to_le16(action);
  1680. cmd->threshold = cpu_to_le16(*threshold);
  1681. rc = mwl8k_post_cmd(hw, &cmd->header);
  1682. kfree(cmd);
  1683. return rc;
  1684. }
  1685. /*
  1686. * CMD_SET_EDCA_PARAMS.
  1687. */
  1688. struct mwl8k_cmd_set_edca_params {
  1689. struct mwl8k_cmd_pkt header;
  1690. /* See MWL8K_SET_EDCA_XXX below */
  1691. __le16 action;
  1692. /* TX opportunity in units of 32 us */
  1693. __le16 txop;
  1694. /* Log exponent of max contention period: 0...15*/
  1695. __u8 log_cw_max;
  1696. /* Log exponent of min contention period: 0...15 */
  1697. __u8 log_cw_min;
  1698. /* Adaptive interframe spacing in units of 32us */
  1699. __u8 aifs;
  1700. /* TX queue to configure */
  1701. __u8 txq;
  1702. } __attribute__((packed));
  1703. #define MWL8K_SET_EDCA_CW 0x01
  1704. #define MWL8K_SET_EDCA_TXOP 0x02
  1705. #define MWL8K_SET_EDCA_AIFS 0x04
  1706. #define MWL8K_SET_EDCA_ALL (MWL8K_SET_EDCA_CW | \
  1707. MWL8K_SET_EDCA_TXOP | \
  1708. MWL8K_SET_EDCA_AIFS)
  1709. static int
  1710. mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
  1711. __u16 cw_min, __u16 cw_max,
  1712. __u8 aifs, __u16 txop)
  1713. {
  1714. struct mwl8k_cmd_set_edca_params *cmd;
  1715. int rc;
  1716. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1717. if (cmd == NULL)
  1718. return -ENOMEM;
  1719. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
  1720. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1721. cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
  1722. cmd->txop = cpu_to_le16(txop);
  1723. cmd->log_cw_max = (u8)ilog2(cw_max + 1);
  1724. cmd->log_cw_min = (u8)ilog2(cw_min + 1);
  1725. cmd->aifs = aifs;
  1726. cmd->txq = qnum;
  1727. rc = mwl8k_post_cmd(hw, &cmd->header);
  1728. kfree(cmd);
  1729. return rc;
  1730. }
  1731. /*
  1732. * CMD_FINALIZE_JOIN.
  1733. */
  1734. /* FJ beacon buffer size is compiled into the firmware. */
  1735. #define MWL8K_FJ_BEACON_MAXLEN 128
  1736. struct mwl8k_cmd_finalize_join {
  1737. struct mwl8k_cmd_pkt header;
  1738. __le32 sleep_interval; /* Number of beacon periods to sleep */
  1739. __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
  1740. } __attribute__((packed));
  1741. static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
  1742. __u16 framelen, __u16 dtim)
  1743. {
  1744. struct mwl8k_cmd_finalize_join *cmd;
  1745. struct ieee80211_mgmt *payload = frame;
  1746. u16 hdrlen;
  1747. u32 payload_len;
  1748. int rc;
  1749. if (frame == NULL)
  1750. return -EINVAL;
  1751. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1752. if (cmd == NULL)
  1753. return -ENOMEM;
  1754. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
  1755. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1756. cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
  1757. hdrlen = ieee80211_hdrlen(payload->frame_control);
  1758. payload_len = framelen > hdrlen ? framelen - hdrlen : 0;
  1759. /* XXX TBD Might just have to abort and return an error */
  1760. if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
  1761. printk(KERN_ERR "%s(): WARNING: Incomplete beacon "
  1762. "sent to firmware. Sz=%u MAX=%u\n", __func__,
  1763. payload_len, MWL8K_FJ_BEACON_MAXLEN);
  1764. if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
  1765. payload_len = MWL8K_FJ_BEACON_MAXLEN;
  1766. if (payload && payload_len)
  1767. memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
  1768. rc = mwl8k_post_cmd(hw, &cmd->header);
  1769. kfree(cmd);
  1770. return rc;
  1771. }
  1772. /*
  1773. * CMD_UPDATE_STADB.
  1774. */
  1775. struct mwl8k_cmd_update_sta_db {
  1776. struct mwl8k_cmd_pkt header;
  1777. /* See STADB_ACTION_TYPE */
  1778. __le32 action;
  1779. /* Peer MAC address */
  1780. __u8 peer_addr[ETH_ALEN];
  1781. __le32 reserved;
  1782. /* Peer info - valid during add/update. */
  1783. struct peer_capability_info peer_info;
  1784. } __attribute__((packed));
  1785. static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
  1786. struct ieee80211_vif *vif, __u32 action)
  1787. {
  1788. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  1789. struct ieee80211_bss_conf *info = &mv_vif->bss_info;
  1790. struct mwl8k_cmd_update_sta_db *cmd;
  1791. struct peer_capability_info *peer_info;
  1792. struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
  1793. int rc;
  1794. __u8 count, *rates;
  1795. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1796. if (cmd == NULL)
  1797. return -ENOMEM;
  1798. cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
  1799. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1800. cmd->action = cpu_to_le32(action);
  1801. peer_info = &cmd->peer_info;
  1802. memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
  1803. switch (action) {
  1804. case MWL8K_STA_DB_ADD_ENTRY:
  1805. case MWL8K_STA_DB_MODIFY_ENTRY:
  1806. /* Build peer_info block */
  1807. peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
  1808. peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
  1809. peer_info->interop = 1;
  1810. peer_info->amsdu_enabled = 0;
  1811. rates = peer_info->legacy_rates;
  1812. for (count = 0; count < mv_vif->legacy_nrates; count++)
  1813. rates[count] = bitrates[count].hw_value;
  1814. rc = mwl8k_post_cmd(hw, &cmd->header);
  1815. if (rc == 0)
  1816. mv_vif->peer_id = peer_info->station_id;
  1817. break;
  1818. case MWL8K_STA_DB_DEL_ENTRY:
  1819. case MWL8K_STA_DB_FLUSH:
  1820. default:
  1821. rc = mwl8k_post_cmd(hw, &cmd->header);
  1822. if (rc == 0)
  1823. mv_vif->peer_id = 0;
  1824. break;
  1825. }
  1826. kfree(cmd);
  1827. return rc;
  1828. }
  1829. /*
  1830. * CMD_SET_AID.
  1831. */
  1832. #define MWL8K_RATE_INDEX_MAX_ARRAY 14
  1833. #define MWL8K_FRAME_PROT_DISABLED 0x00
  1834. #define MWL8K_FRAME_PROT_11G 0x07
  1835. #define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
  1836. #define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
  1837. struct mwl8k_cmd_update_set_aid {
  1838. struct mwl8k_cmd_pkt header;
  1839. __le16 aid;
  1840. /* AP's MAC address (BSSID) */
  1841. __u8 bssid[ETH_ALEN];
  1842. __le16 protection_mode;
  1843. __u8 supp_rates[MWL8K_RATE_INDEX_MAX_ARRAY];
  1844. } __attribute__((packed));
  1845. static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
  1846. struct ieee80211_vif *vif)
  1847. {
  1848. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  1849. struct ieee80211_bss_conf *info = &mv_vif->bss_info;
  1850. struct mwl8k_cmd_update_set_aid *cmd;
  1851. struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
  1852. int count;
  1853. u16 prot_mode;
  1854. int rc;
  1855. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1856. if (cmd == NULL)
  1857. return -ENOMEM;
  1858. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
  1859. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1860. cmd->aid = cpu_to_le16(info->aid);
  1861. memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
  1862. if (info->use_cts_prot) {
  1863. prot_mode = MWL8K_FRAME_PROT_11G;
  1864. } else {
  1865. switch (info->ht_operation_mode &
  1866. IEEE80211_HT_OP_MODE_PROTECTION) {
  1867. case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
  1868. prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
  1869. break;
  1870. case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
  1871. prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
  1872. break;
  1873. default:
  1874. prot_mode = MWL8K_FRAME_PROT_DISABLED;
  1875. break;
  1876. }
  1877. }
  1878. cmd->protection_mode = cpu_to_le16(prot_mode);
  1879. for (count = 0; count < mv_vif->legacy_nrates; count++)
  1880. cmd->supp_rates[count] = bitrates[count].hw_value;
  1881. rc = mwl8k_post_cmd(hw, &cmd->header);
  1882. kfree(cmd);
  1883. return rc;
  1884. }
  1885. /*
  1886. * CMD_SET_RATE.
  1887. */
  1888. struct mwl8k_cmd_update_rateset {
  1889. struct mwl8k_cmd_pkt header;
  1890. __u8 legacy_rates[MWL8K_RATE_INDEX_MAX_ARRAY];
  1891. /* Bitmap for supported MCS codes. */
  1892. __u8 mcs_set[MWL8K_IEEE_LEGACY_DATA_RATES];
  1893. __u8 reserved[MWL8K_IEEE_LEGACY_DATA_RATES];
  1894. } __attribute__((packed));
  1895. static int mwl8k_update_rateset(struct ieee80211_hw *hw,
  1896. struct ieee80211_vif *vif)
  1897. {
  1898. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  1899. struct mwl8k_cmd_update_rateset *cmd;
  1900. struct ieee80211_rate *bitrates = mv_vif->legacy_rates;
  1901. int count;
  1902. int rc;
  1903. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1904. if (cmd == NULL)
  1905. return -ENOMEM;
  1906. cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
  1907. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1908. for (count = 0; count < mv_vif->legacy_nrates; count++)
  1909. cmd->legacy_rates[count] = bitrates[count].hw_value;
  1910. rc = mwl8k_post_cmd(hw, &cmd->header);
  1911. kfree(cmd);
  1912. return rc;
  1913. }
  1914. /*
  1915. * CMD_USE_FIXED_RATE.
  1916. */
  1917. #define MWL8K_RATE_TABLE_SIZE 8
  1918. #define MWL8K_UCAST_RATE 0
  1919. #define MWL8K_USE_AUTO_RATE 0x0002
  1920. struct mwl8k_rate_entry {
  1921. /* Set to 1 if HT rate, 0 if legacy. */
  1922. __le32 is_ht_rate;
  1923. /* Set to 1 to use retry_count field. */
  1924. __le32 enable_retry;
  1925. /* Specified legacy rate or MCS. */
  1926. __le32 rate;
  1927. /* Number of allowed retries. */
  1928. __le32 retry_count;
  1929. } __attribute__((packed));
  1930. struct mwl8k_rate_table {
  1931. /* 1 to allow specified rate and below */
  1932. __le32 allow_rate_drop;
  1933. __le32 num_rates;
  1934. struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
  1935. } __attribute__((packed));
  1936. struct mwl8k_cmd_use_fixed_rate {
  1937. struct mwl8k_cmd_pkt header;
  1938. __le32 action;
  1939. struct mwl8k_rate_table rate_table;
  1940. /* Unicast, Broadcast or Multicast */
  1941. __le32 rate_type;
  1942. __le32 reserved1;
  1943. __le32 reserved2;
  1944. } __attribute__((packed));
  1945. static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
  1946. u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
  1947. {
  1948. struct mwl8k_cmd_use_fixed_rate *cmd;
  1949. int count;
  1950. int rc;
  1951. cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
  1952. if (cmd == NULL)
  1953. return -ENOMEM;
  1954. cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
  1955. cmd->header.length = cpu_to_le16(sizeof(*cmd));
  1956. cmd->action = cpu_to_le32(action);
  1957. cmd->rate_type = cpu_to_le32(rate_type);
  1958. if (rate_table != NULL) {
  1959. /* Copy over each field manually so
  1960. * that bitflipping can be done
  1961. */
  1962. cmd->rate_table.allow_rate_drop =
  1963. cpu_to_le32(rate_table->allow_rate_drop);
  1964. cmd->rate_table.num_rates =
  1965. cpu_to_le32(rate_table->num_rates);
  1966. for (count = 0; count < rate_table->num_rates; count++) {
  1967. struct mwl8k_rate_entry *dst =
  1968. &cmd->rate_table.rate_entry[count];
  1969. struct mwl8k_rate_entry *src =
  1970. &rate_table->rate_entry[count];
  1971. dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
  1972. dst->enable_retry = cpu_to_le32(src->enable_retry);
  1973. dst->rate = cpu_to_le32(src->rate);
  1974. dst->retry_count = cpu_to_le32(src->retry_count);
  1975. }
  1976. }
  1977. rc = mwl8k_post_cmd(hw, &cmd->header);
  1978. kfree(cmd);
  1979. return rc;
  1980. }
  1981. /*
  1982. * Interrupt handling.
  1983. */
  1984. static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
  1985. {
  1986. struct ieee80211_hw *hw = dev_id;
  1987. struct mwl8k_priv *priv = hw->priv;
  1988. u32 status;
  1989. status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  1990. iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  1991. if (!status)
  1992. return IRQ_NONE;
  1993. if (status & MWL8K_A2H_INT_TX_DONE)
  1994. tasklet_schedule(&priv->tx_reclaim_task);
  1995. if (status & MWL8K_A2H_INT_RX_READY) {
  1996. while (rxq_process(hw, 0, 1))
  1997. rxq_refill(hw, 0, 1);
  1998. }
  1999. if (status & MWL8K_A2H_INT_OPC_DONE) {
  2000. if (priv->hostcmd_wait != NULL) {
  2001. complete(priv->hostcmd_wait);
  2002. priv->hostcmd_wait = NULL;
  2003. }
  2004. }
  2005. if (status & MWL8K_A2H_INT_QUEUE_EMPTY) {
  2006. if (!priv->inconfig &&
  2007. priv->radio_on &&
  2008. mwl8k_txq_busy(priv))
  2009. mwl8k_tx_start(priv);
  2010. }
  2011. return IRQ_HANDLED;
  2012. }
  2013. /*
  2014. * Core driver operations.
  2015. */
  2016. static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
  2017. {
  2018. struct mwl8k_priv *priv = hw->priv;
  2019. int index = skb_get_queue_mapping(skb);
  2020. int rc;
  2021. if (priv->current_channel == NULL) {
  2022. printk(KERN_DEBUG "%s: dropped TX frame since radio "
  2023. "disabled\n", priv->name);
  2024. dev_kfree_skb(skb);
  2025. return NETDEV_TX_OK;
  2026. }
  2027. rc = mwl8k_txq_xmit(hw, index, skb);
  2028. return rc;
  2029. }
  2030. struct mwl8k_work_struct {
  2031. /* Initialized by mwl8k_queue_work(). */
  2032. struct work_struct wt;
  2033. /* Required field passed in to mwl8k_queue_work(). */
  2034. struct ieee80211_hw *hw;
  2035. /* Required field passed in to mwl8k_queue_work(). */
  2036. int (*wfunc)(struct work_struct *w);
  2037. /* Initialized by mwl8k_queue_work(). */
  2038. struct completion *cmd_wait;
  2039. /* Result code. */
  2040. int rc;
  2041. /*
  2042. * Optional field. Refer to explanation of MWL8K_WQ_XXX_XXX
  2043. * flags for explanation. Defaults to MWL8K_WQ_DEFAULT_OPTIONS.
  2044. */
  2045. u32 options;
  2046. /* Optional field. Defaults to MWL8K_CONFIG_TIMEOUT_MS. */
  2047. unsigned long timeout_ms;
  2048. /* Optional field. Defaults to MWL8K_WQ_TXWAIT_ATTEMPTS. */
  2049. u32 txwait_attempts;
  2050. /* Optional field. Defaults to MWL8K_TXWAIT_MS. */
  2051. u32 tx_timeout_ms;
  2052. u32 step;
  2053. };
  2054. /* Flags controlling behavior of config queue requests */
  2055. /* Caller spins while waiting for completion. */
  2056. #define MWL8K_WQ_SPIN 0x00000001
  2057. /* Wait for TX queues to empty before proceeding with configuration. */
  2058. #define MWL8K_WQ_TX_WAIT_EMPTY 0x00000002
  2059. /* Queue request and return immediately. */
  2060. #define MWL8K_WQ_POST_REQUEST 0x00000004
  2061. /*
  2062. * Caller sleeps and waits for task complete notification.
  2063. * Do not use in atomic context.
  2064. */
  2065. #define MWL8K_WQ_SLEEP 0x00000008
  2066. /* Free work struct when task is done. */
  2067. #define MWL8K_WQ_FREE_WORKSTRUCT 0x00000010
  2068. /*
  2069. * Config request is queued and returns to caller imediately. Use
  2070. * this in atomic context. Work struct is freed by mwl8k_queue_work()
  2071. * when this flag is set.
  2072. */
  2073. #define MWL8K_WQ_QUEUE_ONLY (MWL8K_WQ_POST_REQUEST | \
  2074. MWL8K_WQ_FREE_WORKSTRUCT)
  2075. /* Default work queue behavior is to sleep and wait for tx completion. */
  2076. #define MWL8K_WQ_DEFAULT_OPTIONS (MWL8K_WQ_SLEEP | MWL8K_WQ_TX_WAIT_EMPTY)
  2077. /*
  2078. * Default config request timeout. Add adjustments to make sure the
  2079. * config thread waits long enough for both tx wait and cmd wait before
  2080. * timing out.
  2081. */
  2082. /* Time to wait for all TXQs to drain. TX Doorbell is pressed each time. */
  2083. #define MWL8K_TXWAIT_TIMEOUT_MS 1000
  2084. /* Default number of TX wait attempts. */
  2085. #define MWL8K_WQ_TXWAIT_ATTEMPTS 4
  2086. /* Total time to wait for TXQ to drain. */
  2087. #define MWL8K_TXWAIT_MS (MWL8K_TXWAIT_TIMEOUT_MS * \
  2088. MWL8K_WQ_TXWAIT_ATTEMPTS)
  2089. /* Scheduling slop. */
  2090. #define MWL8K_OS_SCHEDULE_OVERHEAD_MS 200
  2091. #define MWL8K_CONFIG_TIMEOUT_MS (MWL8K_CMD_TIMEOUT_MS + \
  2092. MWL8K_TXWAIT_MS + \
  2093. MWL8K_OS_SCHEDULE_OVERHEAD_MS)
  2094. static void mwl8k_config_thread(struct work_struct *wt)
  2095. {
  2096. struct mwl8k_work_struct *worker = (struct mwl8k_work_struct *)wt;
  2097. struct ieee80211_hw *hw = worker->hw;
  2098. struct mwl8k_priv *priv = hw->priv;
  2099. int rc = 0;
  2100. spin_lock_irq(&priv->tx_lock);
  2101. priv->inconfig = true;
  2102. spin_unlock_irq(&priv->tx_lock);
  2103. ieee80211_stop_queues(hw);
  2104. /*
  2105. * Wait for host queues to drain before doing PHY
  2106. * reconfiguration. This avoids interrupting any in-flight
  2107. * DMA transfers to the hardware.
  2108. */
  2109. if (worker->options & MWL8K_WQ_TX_WAIT_EMPTY) {
  2110. u32 timeout;
  2111. u32 time_remaining;
  2112. u32 iter;
  2113. u32 tx_wait_attempts = worker->txwait_attempts;
  2114. time_remaining = worker->tx_timeout_ms;
  2115. if (!tx_wait_attempts)
  2116. tx_wait_attempts = 1;
  2117. timeout = worker->tx_timeout_ms/tx_wait_attempts;
  2118. if (!timeout)
  2119. timeout = 1;
  2120. iter = tx_wait_attempts;
  2121. do {
  2122. int wait_time;
  2123. if (time_remaining > timeout) {
  2124. time_remaining -= timeout;
  2125. wait_time = timeout;
  2126. } else
  2127. wait_time = time_remaining;
  2128. if (!wait_time)
  2129. wait_time = 1;
  2130. rc = mwl8k_tx_wait_empty(hw, wait_time);
  2131. if (rc)
  2132. printk(KERN_ERR "%s() txwait timeout=%ums "
  2133. "Retry:%u/%u\n", __func__, timeout,
  2134. tx_wait_attempts - iter + 1,
  2135. tx_wait_attempts);
  2136. } while (rc && --iter);
  2137. rc = iter ? 0 : -ETIMEDOUT;
  2138. }
  2139. if (!rc)
  2140. rc = worker->wfunc(wt);
  2141. spin_lock_irq(&priv->tx_lock);
  2142. priv->inconfig = false;
  2143. if (priv->pending_tx_pkts && priv->radio_on)
  2144. mwl8k_tx_start(priv);
  2145. spin_unlock_irq(&priv->tx_lock);
  2146. ieee80211_wake_queues(hw);
  2147. worker->rc = rc;
  2148. if (worker->options & MWL8K_WQ_SLEEP)
  2149. complete(worker->cmd_wait);
  2150. if (worker->options & MWL8K_WQ_FREE_WORKSTRUCT)
  2151. kfree(wt);
  2152. }
  2153. static int mwl8k_queue_work(struct ieee80211_hw *hw,
  2154. struct mwl8k_work_struct *worker,
  2155. struct workqueue_struct *wqueue,
  2156. int (*wfunc)(struct work_struct *w))
  2157. {
  2158. unsigned long timeout = 0;
  2159. int rc = 0;
  2160. DECLARE_COMPLETION_ONSTACK(cmd_wait);
  2161. if (!worker->timeout_ms)
  2162. worker->timeout_ms = MWL8K_CONFIG_TIMEOUT_MS;
  2163. if (!worker->options)
  2164. worker->options = MWL8K_WQ_DEFAULT_OPTIONS;
  2165. if (!worker->txwait_attempts)
  2166. worker->txwait_attempts = MWL8K_WQ_TXWAIT_ATTEMPTS;
  2167. if (!worker->tx_timeout_ms)
  2168. worker->tx_timeout_ms = MWL8K_TXWAIT_MS;
  2169. worker->hw = hw;
  2170. worker->cmd_wait = &cmd_wait;
  2171. worker->rc = 1;
  2172. worker->wfunc = wfunc;
  2173. INIT_WORK(&worker->wt, mwl8k_config_thread);
  2174. queue_work(wqueue, &worker->wt);
  2175. if (worker->options & MWL8K_WQ_POST_REQUEST) {
  2176. rc = 0;
  2177. } else {
  2178. if (worker->options & MWL8K_WQ_SPIN) {
  2179. timeout = worker->timeout_ms;
  2180. while (timeout && (worker->rc > 0)) {
  2181. mdelay(1);
  2182. timeout--;
  2183. }
  2184. } else if (worker->options & MWL8K_WQ_SLEEP)
  2185. timeout = wait_for_completion_timeout(&cmd_wait,
  2186. msecs_to_jiffies(worker->timeout_ms));
  2187. if (timeout)
  2188. rc = worker->rc;
  2189. else {
  2190. cancel_work_sync(&worker->wt);
  2191. rc = -ETIMEDOUT;
  2192. }
  2193. }
  2194. return rc;
  2195. }
  2196. struct mwl8k_start_worker {
  2197. struct mwl8k_work_struct header;
  2198. };
  2199. static int mwl8k_start_wt(struct work_struct *wt)
  2200. {
  2201. struct mwl8k_start_worker *worker = (struct mwl8k_start_worker *)wt;
  2202. struct ieee80211_hw *hw = worker->header.hw;
  2203. struct mwl8k_priv *priv = hw->priv;
  2204. int rc = 0;
  2205. if (priv->vif != NULL) {
  2206. rc = -EIO;
  2207. goto mwl8k_start_exit;
  2208. }
  2209. /* Turn on radio */
  2210. if (mwl8k_cmd_802_11_radio_enable(hw)) {
  2211. rc = -EIO;
  2212. goto mwl8k_start_exit;
  2213. }
  2214. /* Purge TX/RX HW queues */
  2215. if (mwl8k_cmd_set_pre_scan(hw)) {
  2216. rc = -EIO;
  2217. goto mwl8k_start_exit;
  2218. }
  2219. if (mwl8k_cmd_set_post_scan(hw, "\x00\x00\x00\x00\x00\x00")) {
  2220. rc = -EIO;
  2221. goto mwl8k_start_exit;
  2222. }
  2223. /* Enable firmware rate adaptation */
  2224. if (mwl8k_cmd_setrateadaptmode(hw, 0)) {
  2225. rc = -EIO;
  2226. goto mwl8k_start_exit;
  2227. }
  2228. /* Disable WMM. WMM gets enabled when stack sends WMM parms */
  2229. if (mwl8k_set_wmm(hw, 0)) {
  2230. rc = -EIO;
  2231. goto mwl8k_start_exit;
  2232. }
  2233. /* Disable sniffer mode */
  2234. if (mwl8k_enable_sniffer(hw, 0))
  2235. rc = -EIO;
  2236. mwl8k_start_exit:
  2237. return rc;
  2238. }
  2239. static int mwl8k_start(struct ieee80211_hw *hw)
  2240. {
  2241. struct mwl8k_start_worker *worker;
  2242. struct mwl8k_priv *priv = hw->priv;
  2243. int rc;
  2244. /* Enable tx reclaim tasklet */
  2245. tasklet_enable(&priv->tx_reclaim_task);
  2246. rc = request_irq(priv->pdev->irq, &mwl8k_interrupt,
  2247. IRQF_SHARED, MWL8K_NAME, hw);
  2248. if (rc) {
  2249. printk(KERN_ERR "%s: failed to register IRQ handler\n",
  2250. priv->name);
  2251. rc = -EIO;
  2252. goto mwl8k_start_disable_tasklet;
  2253. }
  2254. /* Enable interrupts */
  2255. iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2256. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2257. if (worker == NULL) {
  2258. rc = -ENOMEM;
  2259. goto mwl8k_start_disable_irq;
  2260. }
  2261. rc = mwl8k_queue_work(hw, &worker->header,
  2262. priv->config_wq, mwl8k_start_wt);
  2263. kfree(worker);
  2264. if (!rc)
  2265. return rc;
  2266. if (rc == -ETIMEDOUT)
  2267. printk(KERN_ERR "%s() timed out\n", __func__);
  2268. rc = -EIO;
  2269. mwl8k_start_disable_irq:
  2270. spin_lock_irq(&priv->tx_lock);
  2271. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2272. spin_unlock_irq(&priv->tx_lock);
  2273. free_irq(priv->pdev->irq, hw);
  2274. mwl8k_start_disable_tasklet:
  2275. tasklet_disable(&priv->tx_reclaim_task);
  2276. return rc;
  2277. }
  2278. struct mwl8k_stop_worker {
  2279. struct mwl8k_work_struct header;
  2280. };
  2281. static int mwl8k_stop_wt(struct work_struct *wt)
  2282. {
  2283. struct mwl8k_stop_worker *worker = (struct mwl8k_stop_worker *)wt;
  2284. struct ieee80211_hw *hw = worker->header.hw;
  2285. return mwl8k_cmd_802_11_radio_disable(hw);
  2286. }
  2287. static void mwl8k_stop(struct ieee80211_hw *hw)
  2288. {
  2289. int rc;
  2290. struct mwl8k_stop_worker *worker;
  2291. struct mwl8k_priv *priv = hw->priv;
  2292. int i;
  2293. if (priv->vif != NULL)
  2294. return;
  2295. ieee80211_stop_queues(hw);
  2296. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2297. if (worker == NULL)
  2298. return;
  2299. rc = mwl8k_queue_work(hw, &worker->header,
  2300. priv->config_wq, mwl8k_stop_wt);
  2301. kfree(worker);
  2302. if (rc == -ETIMEDOUT)
  2303. printk(KERN_ERR "%s() timed out\n", __func__);
  2304. /* Disable interrupts */
  2305. spin_lock_irq(&priv->tx_lock);
  2306. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2307. spin_unlock_irq(&priv->tx_lock);
  2308. free_irq(priv->pdev->irq, hw);
  2309. /* Stop finalize join worker */
  2310. cancel_work_sync(&priv->finalize_join_worker);
  2311. if (priv->beacon_skb != NULL)
  2312. dev_kfree_skb(priv->beacon_skb);
  2313. /* Stop tx reclaim tasklet */
  2314. tasklet_disable(&priv->tx_reclaim_task);
  2315. /* Stop config thread */
  2316. flush_workqueue(priv->config_wq);
  2317. /* Return all skbs to mac80211 */
  2318. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2319. mwl8k_txq_reclaim(hw, i, 1);
  2320. }
  2321. static int mwl8k_add_interface(struct ieee80211_hw *hw,
  2322. struct ieee80211_if_init_conf *conf)
  2323. {
  2324. struct mwl8k_priv *priv = hw->priv;
  2325. struct mwl8k_vif *mwl8k_vif;
  2326. /*
  2327. * We only support one active interface at a time.
  2328. */
  2329. if (priv->vif != NULL)
  2330. return -EBUSY;
  2331. /*
  2332. * We only support managed interfaces for now.
  2333. */
  2334. if (conf->type != NL80211_IFTYPE_STATION &&
  2335. conf->type != NL80211_IFTYPE_MONITOR)
  2336. return -EINVAL;
  2337. /* Clean out driver private area */
  2338. mwl8k_vif = MWL8K_VIF(conf->vif);
  2339. memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
  2340. /* Save the mac address */
  2341. memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
  2342. /* Back pointer to parent config block */
  2343. mwl8k_vif->priv = priv;
  2344. /* Setup initial PHY parameters */
  2345. memcpy(mwl8k_vif->legacy_rates,
  2346. priv->rates, sizeof(mwl8k_vif->legacy_rates));
  2347. mwl8k_vif->legacy_nrates = ARRAY_SIZE(priv->rates);
  2348. /* Set Initial sequence number to zero */
  2349. mwl8k_vif->seqno = 0;
  2350. priv->vif = conf->vif;
  2351. priv->current_channel = NULL;
  2352. return 0;
  2353. }
  2354. static void mwl8k_remove_interface(struct ieee80211_hw *hw,
  2355. struct ieee80211_if_init_conf *conf)
  2356. {
  2357. struct mwl8k_priv *priv = hw->priv;
  2358. if (priv->vif == NULL)
  2359. return;
  2360. priv->vif = NULL;
  2361. }
  2362. struct mwl8k_config_worker {
  2363. struct mwl8k_work_struct header;
  2364. u32 changed;
  2365. };
  2366. static int mwl8k_config_wt(struct work_struct *wt)
  2367. {
  2368. struct mwl8k_config_worker *worker =
  2369. (struct mwl8k_config_worker *)wt;
  2370. struct ieee80211_hw *hw = worker->header.hw;
  2371. struct ieee80211_conf *conf = &hw->conf;
  2372. struct mwl8k_priv *priv = hw->priv;
  2373. int rc = 0;
  2374. if (mwl8k_cmd_802_11_radio_enable(hw)) {
  2375. rc = -EINVAL;
  2376. goto mwl8k_config_exit;
  2377. }
  2378. priv->current_channel = conf->channel;
  2379. if (mwl8k_cmd_set_rf_channel(hw, conf->channel)) {
  2380. rc = -EINVAL;
  2381. goto mwl8k_config_exit;
  2382. }
  2383. if (conf->power_level > 18)
  2384. conf->power_level = 18;
  2385. if (mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level)) {
  2386. rc = -EINVAL;
  2387. goto mwl8k_config_exit;
  2388. }
  2389. if (mwl8k_cmd_mimo_config(hw, 0x7, 0x7))
  2390. rc = -EINVAL;
  2391. mwl8k_config_exit:
  2392. return rc;
  2393. }
  2394. static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
  2395. {
  2396. int rc = 0;
  2397. struct mwl8k_config_worker *worker;
  2398. struct mwl8k_priv *priv = hw->priv;
  2399. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2400. if (worker == NULL)
  2401. return -ENOMEM;
  2402. worker->changed = changed;
  2403. rc = mwl8k_queue_work(hw, &worker->header,
  2404. priv->config_wq, mwl8k_config_wt);
  2405. if (rc == -ETIMEDOUT) {
  2406. printk(KERN_ERR "%s() timed out.\n", __func__);
  2407. rc = -EINVAL;
  2408. }
  2409. kfree(worker);
  2410. /*
  2411. * mac80211 will crash on anything other than -EINVAL on
  2412. * error. Looks like wireless extensions which calls mac80211
  2413. * may be the actual culprit...
  2414. */
  2415. return rc ? -EINVAL : 0;
  2416. }
  2417. struct mwl8k_bss_info_changed_worker {
  2418. struct mwl8k_work_struct header;
  2419. struct ieee80211_vif *vif;
  2420. struct ieee80211_bss_conf *info;
  2421. u32 changed;
  2422. };
  2423. static int mwl8k_bss_info_changed_wt(struct work_struct *wt)
  2424. {
  2425. struct mwl8k_bss_info_changed_worker *worker =
  2426. (struct mwl8k_bss_info_changed_worker *)wt;
  2427. struct ieee80211_hw *hw = worker->header.hw;
  2428. struct ieee80211_vif *vif = worker->vif;
  2429. struct ieee80211_bss_conf *info = worker->info;
  2430. u32 changed;
  2431. int rc;
  2432. struct mwl8k_priv *priv = hw->priv;
  2433. struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
  2434. changed = worker->changed;
  2435. priv->capture_beacon = false;
  2436. if (info->assoc) {
  2437. memcpy(&mwl8k_vif->bss_info, info,
  2438. sizeof(struct ieee80211_bss_conf));
  2439. /* Install rates */
  2440. if (mwl8k_update_rateset(hw, vif))
  2441. goto mwl8k_bss_info_changed_exit;
  2442. /* Turn on rate adaptation */
  2443. if (mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
  2444. MWL8K_UCAST_RATE, NULL))
  2445. goto mwl8k_bss_info_changed_exit;
  2446. /* Set radio preamble */
  2447. if (mwl8k_set_radio_preamble(hw, info->use_short_preamble))
  2448. goto mwl8k_bss_info_changed_exit;
  2449. /* Set slot time */
  2450. if (mwl8k_cmd_set_slot(hw, info->use_short_slot))
  2451. goto mwl8k_bss_info_changed_exit;
  2452. /* Update peer rate info */
  2453. if (mwl8k_cmd_update_sta_db(hw, vif,
  2454. MWL8K_STA_DB_MODIFY_ENTRY))
  2455. goto mwl8k_bss_info_changed_exit;
  2456. /* Set AID */
  2457. if (mwl8k_cmd_set_aid(hw, vif))
  2458. goto mwl8k_bss_info_changed_exit;
  2459. /*
  2460. * Finalize the join. Tell rx handler to process
  2461. * next beacon from our BSSID.
  2462. */
  2463. memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
  2464. priv->capture_beacon = true;
  2465. } else {
  2466. mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
  2467. memset(&mwl8k_vif->bss_info, 0,
  2468. sizeof(struct ieee80211_bss_conf));
  2469. memset(mwl8k_vif->bssid, 0, ETH_ALEN);
  2470. }
  2471. mwl8k_bss_info_changed_exit:
  2472. rc = 0;
  2473. return rc;
  2474. }
  2475. static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
  2476. struct ieee80211_vif *vif,
  2477. struct ieee80211_bss_conf *info,
  2478. u32 changed)
  2479. {
  2480. struct mwl8k_bss_info_changed_worker *worker;
  2481. struct mwl8k_priv *priv = hw->priv;
  2482. struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
  2483. int rc;
  2484. if (changed & BSS_CHANGED_BSSID)
  2485. memcpy(mv_vif->bssid, info->bssid, ETH_ALEN);
  2486. if ((changed & BSS_CHANGED_ASSOC) == 0)
  2487. return;
  2488. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2489. if (worker == NULL)
  2490. return;
  2491. worker->vif = vif;
  2492. worker->info = info;
  2493. worker->changed = changed;
  2494. rc = mwl8k_queue_work(hw, &worker->header,
  2495. priv->config_wq,
  2496. mwl8k_bss_info_changed_wt);
  2497. kfree(worker);
  2498. if (rc == -ETIMEDOUT)
  2499. printk(KERN_ERR "%s() timed out\n", __func__);
  2500. }
  2501. struct mwl8k_configure_filter_worker {
  2502. struct mwl8k_work_struct header;
  2503. unsigned int changed_flags;
  2504. unsigned int *total_flags;
  2505. int mc_count;
  2506. struct dev_addr_list *mclist;
  2507. };
  2508. #define MWL8K_SUPPORTED_IF_FLAGS FIF_BCN_PRBRESP_PROMISC
  2509. static int mwl8k_configure_filter_wt(struct work_struct *wt)
  2510. {
  2511. struct mwl8k_configure_filter_worker *worker =
  2512. (struct mwl8k_configure_filter_worker *)wt;
  2513. struct ieee80211_hw *hw = worker->header.hw;
  2514. unsigned int changed_flags = worker->changed_flags;
  2515. unsigned int *total_flags = worker->total_flags;
  2516. int mc_count = worker->mc_count;
  2517. struct dev_addr_list *mclist = worker->mclist;
  2518. struct mwl8k_priv *priv = hw->priv;
  2519. int rc = 0;
  2520. if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
  2521. if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
  2522. rc = mwl8k_cmd_set_pre_scan(hw);
  2523. else {
  2524. u8 *bssid;
  2525. bssid = "\x00\x00\x00\x00\x00\x00";
  2526. if (priv->vif != NULL)
  2527. bssid = MWL8K_VIF(priv->vif)->bssid;
  2528. rc = mwl8k_cmd_set_post_scan(hw, bssid);
  2529. }
  2530. }
  2531. if (rc)
  2532. goto mwl8k_configure_filter_exit;
  2533. if (mc_count) {
  2534. if (mc_count > priv->num_mcaddrs)
  2535. mc_count = priv->num_mcaddrs;
  2536. rc = mwl8k_cmd_mac_multicast_adr(hw, mc_count, mclist);
  2537. if (rc)
  2538. printk(KERN_ERR
  2539. "%s()Error setting multicast addresses\n",
  2540. __func__);
  2541. }
  2542. mwl8k_configure_filter_exit:
  2543. return rc;
  2544. }
  2545. static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
  2546. int mc_count, struct dev_addr_list *mclist)
  2547. {
  2548. struct mwl8k_configure_filter_worker *worker;
  2549. worker = kzalloc(sizeof(*worker), GFP_ATOMIC);
  2550. if (!worker)
  2551. return 0;
  2552. /*
  2553. * XXX: This is _HORRIBLY_ broken!!
  2554. *
  2555. * No locking, the mclist pointer might be invalid as soon as this
  2556. * function returns, something in the list might be invalidated
  2557. * once we get to the worker, etc...
  2558. */
  2559. worker->mc_count = mc_count;
  2560. worker->mclist = mclist;
  2561. return (u64)worker;
  2562. }
  2563. static void mwl8k_configure_filter(struct ieee80211_hw *hw,
  2564. unsigned int changed_flags,
  2565. unsigned int *total_flags,
  2566. u64 multicast)
  2567. {
  2568. struct mwl8k_configure_filter_worker *worker = (void *)multicast;
  2569. struct mwl8k_priv *priv = hw->priv;
  2570. /* Clear unsupported feature flags */
  2571. *total_flags &= MWL8K_SUPPORTED_IF_FLAGS;
  2572. if (!(changed_flags & MWL8K_SUPPORTED_IF_FLAGS))
  2573. return;
  2574. if (worker == NULL)
  2575. return;
  2576. worker->header.options = MWL8K_WQ_QUEUE_ONLY | MWL8K_WQ_TX_WAIT_EMPTY;
  2577. worker->changed_flags = changed_flags;
  2578. worker->total_flags = total_flags;
  2579. mwl8k_queue_work(hw, &worker->header, priv->config_wq,
  2580. mwl8k_configure_filter_wt);
  2581. }
  2582. struct mwl8k_set_rts_threshold_worker {
  2583. struct mwl8k_work_struct header;
  2584. u32 value;
  2585. };
  2586. static int mwl8k_set_rts_threshold_wt(struct work_struct *wt)
  2587. {
  2588. struct mwl8k_set_rts_threshold_worker *worker =
  2589. (struct mwl8k_set_rts_threshold_worker *)wt;
  2590. struct ieee80211_hw *hw = worker->header.hw;
  2591. u16 threshold = (u16)(worker->value);
  2592. int rc;
  2593. rc = mwl8k_rts_threshold(hw, MWL8K_CMD_SET, &threshold);
  2594. return rc;
  2595. }
  2596. static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
  2597. {
  2598. int rc;
  2599. struct mwl8k_set_rts_threshold_worker *worker;
  2600. struct mwl8k_priv *priv = hw->priv;
  2601. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2602. if (worker == NULL)
  2603. return -ENOMEM;
  2604. worker->value = value;
  2605. rc = mwl8k_queue_work(hw, &worker->header,
  2606. priv->config_wq,
  2607. mwl8k_set_rts_threshold_wt);
  2608. kfree(worker);
  2609. if (rc == -ETIMEDOUT) {
  2610. printk(KERN_ERR "%s() timed out\n", __func__);
  2611. rc = -EINVAL;
  2612. }
  2613. return rc;
  2614. }
  2615. struct mwl8k_conf_tx_worker {
  2616. struct mwl8k_work_struct header;
  2617. u16 queue;
  2618. const struct ieee80211_tx_queue_params *params;
  2619. };
  2620. static int mwl8k_conf_tx_wt(struct work_struct *wt)
  2621. {
  2622. struct mwl8k_conf_tx_worker *worker =
  2623. (struct mwl8k_conf_tx_worker *)wt;
  2624. struct ieee80211_hw *hw = worker->header.hw;
  2625. u16 queue = worker->queue;
  2626. const struct ieee80211_tx_queue_params *params = worker->params;
  2627. struct mwl8k_priv *priv = hw->priv;
  2628. int rc = 0;
  2629. if (!priv->wmm_enabled) {
  2630. if (mwl8k_set_wmm(hw, 1)) {
  2631. rc = -EINVAL;
  2632. goto mwl8k_conf_tx_exit;
  2633. }
  2634. }
  2635. if (mwl8k_set_edca_params(hw, GET_TXQ(queue), params->cw_min,
  2636. params->cw_max, params->aifs, params->txop))
  2637. rc = -EINVAL;
  2638. mwl8k_conf_tx_exit:
  2639. return rc;
  2640. }
  2641. static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
  2642. const struct ieee80211_tx_queue_params *params)
  2643. {
  2644. int rc;
  2645. struct mwl8k_conf_tx_worker *worker;
  2646. struct mwl8k_priv *priv = hw->priv;
  2647. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2648. if (worker == NULL)
  2649. return -ENOMEM;
  2650. worker->queue = queue;
  2651. worker->params = params;
  2652. rc = mwl8k_queue_work(hw, &worker->header,
  2653. priv->config_wq, mwl8k_conf_tx_wt);
  2654. kfree(worker);
  2655. if (rc == -ETIMEDOUT) {
  2656. printk(KERN_ERR "%s() timed out\n", __func__);
  2657. rc = -EINVAL;
  2658. }
  2659. return rc;
  2660. }
  2661. static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
  2662. struct ieee80211_tx_queue_stats *stats)
  2663. {
  2664. struct mwl8k_priv *priv = hw->priv;
  2665. struct mwl8k_tx_queue *txq;
  2666. int index;
  2667. spin_lock_bh(&priv->tx_lock);
  2668. for (index = 0; index < MWL8K_TX_QUEUES; index++) {
  2669. txq = priv->txq + index;
  2670. memcpy(&stats[index], &txq->tx_stats,
  2671. sizeof(struct ieee80211_tx_queue_stats));
  2672. }
  2673. spin_unlock_bh(&priv->tx_lock);
  2674. return 0;
  2675. }
  2676. struct mwl8k_get_stats_worker {
  2677. struct mwl8k_work_struct header;
  2678. struct ieee80211_low_level_stats *stats;
  2679. };
  2680. static int mwl8k_get_stats_wt(struct work_struct *wt)
  2681. {
  2682. struct mwl8k_get_stats_worker *worker =
  2683. (struct mwl8k_get_stats_worker *)wt;
  2684. return mwl8k_cmd_802_11_get_stat(worker->header.hw, worker->stats);
  2685. }
  2686. static int mwl8k_get_stats(struct ieee80211_hw *hw,
  2687. struct ieee80211_low_level_stats *stats)
  2688. {
  2689. int rc;
  2690. struct mwl8k_get_stats_worker *worker;
  2691. struct mwl8k_priv *priv = hw->priv;
  2692. worker = kzalloc(sizeof(*worker), GFP_KERNEL);
  2693. if (worker == NULL)
  2694. return -ENOMEM;
  2695. worker->stats = stats;
  2696. rc = mwl8k_queue_work(hw, &worker->header,
  2697. priv->config_wq, mwl8k_get_stats_wt);
  2698. kfree(worker);
  2699. if (rc == -ETIMEDOUT) {
  2700. printk(KERN_ERR "%s() timed out\n", __func__);
  2701. rc = -EINVAL;
  2702. }
  2703. return rc;
  2704. }
  2705. static const struct ieee80211_ops mwl8k_ops = {
  2706. .tx = mwl8k_tx,
  2707. .start = mwl8k_start,
  2708. .stop = mwl8k_stop,
  2709. .add_interface = mwl8k_add_interface,
  2710. .remove_interface = mwl8k_remove_interface,
  2711. .config = mwl8k_config,
  2712. .bss_info_changed = mwl8k_bss_info_changed,
  2713. .prepare_multicast = mwl8k_prepare_multicast,
  2714. .configure_filter = mwl8k_configure_filter,
  2715. .set_rts_threshold = mwl8k_set_rts_threshold,
  2716. .conf_tx = mwl8k_conf_tx,
  2717. .get_tx_stats = mwl8k_get_tx_stats,
  2718. .get_stats = mwl8k_get_stats,
  2719. };
  2720. static void mwl8k_tx_reclaim_handler(unsigned long data)
  2721. {
  2722. int i;
  2723. struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
  2724. struct mwl8k_priv *priv = hw->priv;
  2725. spin_lock_bh(&priv->tx_lock);
  2726. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2727. mwl8k_txq_reclaim(hw, i, 0);
  2728. if (priv->tx_wait != NULL && mwl8k_txq_busy(priv) == 0) {
  2729. complete(priv->tx_wait);
  2730. priv->tx_wait = NULL;
  2731. }
  2732. spin_unlock_bh(&priv->tx_lock);
  2733. }
  2734. static void mwl8k_finalize_join_worker(struct work_struct *work)
  2735. {
  2736. struct mwl8k_priv *priv =
  2737. container_of(work, struct mwl8k_priv, finalize_join_worker);
  2738. struct sk_buff *skb = priv->beacon_skb;
  2739. u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period;
  2740. mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim);
  2741. dev_kfree_skb(skb);
  2742. priv->beacon_skb = NULL;
  2743. }
  2744. static int __devinit mwl8k_probe(struct pci_dev *pdev,
  2745. const struct pci_device_id *id)
  2746. {
  2747. struct ieee80211_hw *hw;
  2748. struct mwl8k_priv *priv;
  2749. int rc;
  2750. int i;
  2751. u8 *fw;
  2752. rc = pci_enable_device(pdev);
  2753. if (rc) {
  2754. printk(KERN_ERR "%s: Cannot enable new PCI device\n",
  2755. MWL8K_NAME);
  2756. return rc;
  2757. }
  2758. rc = pci_request_regions(pdev, MWL8K_NAME);
  2759. if (rc) {
  2760. printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
  2761. MWL8K_NAME);
  2762. return rc;
  2763. }
  2764. pci_set_master(pdev);
  2765. hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
  2766. if (hw == NULL) {
  2767. printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
  2768. rc = -ENOMEM;
  2769. goto err_free_reg;
  2770. }
  2771. priv = hw->priv;
  2772. priv->hw = hw;
  2773. priv->pdev = pdev;
  2774. priv->hostcmd_wait = NULL;
  2775. priv->tx_wait = NULL;
  2776. priv->inconfig = false;
  2777. priv->wmm_enabled = false;
  2778. priv->pending_tx_pkts = 0;
  2779. strncpy(priv->name, MWL8K_NAME, sizeof(priv->name));
  2780. spin_lock_init(&priv->fw_lock);
  2781. SET_IEEE80211_DEV(hw, &pdev->dev);
  2782. pci_set_drvdata(pdev, hw);
  2783. priv->regs = pci_iomap(pdev, 1, 0x10000);
  2784. if (priv->regs == NULL) {
  2785. printk(KERN_ERR "%s: Cannot map device memory\n", priv->name);
  2786. goto err_iounmap;
  2787. }
  2788. memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
  2789. priv->band.band = IEEE80211_BAND_2GHZ;
  2790. priv->band.channels = priv->channels;
  2791. priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
  2792. priv->band.bitrates = priv->rates;
  2793. priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
  2794. hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
  2795. BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
  2796. memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
  2797. /*
  2798. * Extra headroom is the size of the required DMA header
  2799. * minus the size of the smallest 802.11 frame (CTS frame).
  2800. */
  2801. hw->extra_tx_headroom =
  2802. sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
  2803. hw->channel_change_time = 10;
  2804. hw->queues = MWL8K_TX_QUEUES;
  2805. hw->wiphy->interface_modes =
  2806. BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_MONITOR);
  2807. /* Set rssi and noise values to dBm */
  2808. hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
  2809. hw->vif_data_size = sizeof(struct mwl8k_vif);
  2810. priv->vif = NULL;
  2811. /* Set default radio state and preamble */
  2812. priv->radio_on = 0;
  2813. priv->radio_short_preamble = 0;
  2814. /* Finalize join worker */
  2815. INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
  2816. /* TX reclaim tasklet */
  2817. tasklet_init(&priv->tx_reclaim_task,
  2818. mwl8k_tx_reclaim_handler, (unsigned long)hw);
  2819. tasklet_disable(&priv->tx_reclaim_task);
  2820. /* Config workthread */
  2821. priv->config_wq = create_singlethread_workqueue("mwl8k_config");
  2822. if (priv->config_wq == NULL)
  2823. goto err_iounmap;
  2824. /* Power management cookie */
  2825. priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
  2826. if (priv->cookie == NULL)
  2827. goto err_iounmap;
  2828. rc = mwl8k_rxq_init(hw, 0);
  2829. if (rc)
  2830. goto err_iounmap;
  2831. rxq_refill(hw, 0, INT_MAX);
  2832. spin_lock_init(&priv->tx_lock);
  2833. for (i = 0; i < MWL8K_TX_QUEUES; i++) {
  2834. rc = mwl8k_txq_init(hw, i);
  2835. if (rc)
  2836. goto err_free_queues;
  2837. }
  2838. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
  2839. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2840. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
  2841. iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
  2842. rc = request_irq(priv->pdev->irq, &mwl8k_interrupt,
  2843. IRQF_SHARED, MWL8K_NAME, hw);
  2844. if (rc) {
  2845. printk(KERN_ERR "%s: failed to register IRQ handler\n",
  2846. priv->name);
  2847. goto err_free_queues;
  2848. }
  2849. /* Reset firmware and hardware */
  2850. mwl8k_hw_reset(priv);
  2851. /* Ask userland hotplug daemon for the device firmware */
  2852. rc = mwl8k_request_firmware(priv, (u32)id->driver_data);
  2853. if (rc) {
  2854. printk(KERN_ERR "%s: Firmware files not found\n", priv->name);
  2855. goto err_free_irq;
  2856. }
  2857. /* Load firmware into hardware */
  2858. rc = mwl8k_load_firmware(priv);
  2859. if (rc) {
  2860. printk(KERN_ERR "%s: Cannot start firmware\n", priv->name);
  2861. goto err_stop_firmware;
  2862. }
  2863. /* Reclaim memory once firmware is successfully loaded */
  2864. mwl8k_release_firmware(priv);
  2865. /*
  2866. * Temporarily enable interrupts. Initial firmware host
  2867. * commands use interrupts and avoids polling. Disable
  2868. * interrupts when done.
  2869. */
  2870. iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2871. /* Get config data, mac addrs etc */
  2872. rc = mwl8k_cmd_get_hw_spec(hw);
  2873. if (rc) {
  2874. printk(KERN_ERR "%s: Cannot initialise firmware\n", priv->name);
  2875. goto err_stop_firmware;
  2876. }
  2877. /* Turn radio off */
  2878. rc = mwl8k_cmd_802_11_radio_disable(hw);
  2879. if (rc) {
  2880. printk(KERN_ERR "%s: Cannot disable\n", priv->name);
  2881. goto err_stop_firmware;
  2882. }
  2883. /* Disable interrupts */
  2884. spin_lock_irq(&priv->tx_lock);
  2885. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2886. spin_unlock_irq(&priv->tx_lock);
  2887. free_irq(priv->pdev->irq, hw);
  2888. rc = ieee80211_register_hw(hw);
  2889. if (rc) {
  2890. printk(KERN_ERR "%s: Cannot register device\n", priv->name);
  2891. goto err_stop_firmware;
  2892. }
  2893. fw = (u8 *)&priv->fw_rev;
  2894. printk(KERN_INFO "%s: 88W%u %s\n", priv->name, priv->part_num,
  2895. MWL8K_DESC);
  2896. printk(KERN_INFO "%s: Driver Ver:%s Firmware Ver:%u.%u.%u.%u\n",
  2897. priv->name, MWL8K_VERSION, fw[3], fw[2], fw[1], fw[0]);
  2898. printk(KERN_INFO "%s: MAC Address: %pM\n", priv->name,
  2899. hw->wiphy->perm_addr);
  2900. return 0;
  2901. err_stop_firmware:
  2902. mwl8k_hw_reset(priv);
  2903. mwl8k_release_firmware(priv);
  2904. err_free_irq:
  2905. spin_lock_irq(&priv->tx_lock);
  2906. iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
  2907. spin_unlock_irq(&priv->tx_lock);
  2908. free_irq(priv->pdev->irq, hw);
  2909. err_free_queues:
  2910. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2911. mwl8k_txq_deinit(hw, i);
  2912. mwl8k_rxq_deinit(hw, 0);
  2913. err_iounmap:
  2914. if (priv->cookie != NULL)
  2915. pci_free_consistent(priv->pdev, 4,
  2916. priv->cookie, priv->cookie_dma);
  2917. if (priv->regs != NULL)
  2918. pci_iounmap(pdev, priv->regs);
  2919. if (priv->config_wq != NULL)
  2920. destroy_workqueue(priv->config_wq);
  2921. pci_set_drvdata(pdev, NULL);
  2922. ieee80211_free_hw(hw);
  2923. err_free_reg:
  2924. pci_release_regions(pdev);
  2925. pci_disable_device(pdev);
  2926. return rc;
  2927. }
  2928. static void __devexit mwl8k_shutdown(struct pci_dev *pdev)
  2929. {
  2930. printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__);
  2931. }
  2932. static void __devexit mwl8k_remove(struct pci_dev *pdev)
  2933. {
  2934. struct ieee80211_hw *hw = pci_get_drvdata(pdev);
  2935. struct mwl8k_priv *priv;
  2936. int i;
  2937. if (hw == NULL)
  2938. return;
  2939. priv = hw->priv;
  2940. ieee80211_stop_queues(hw);
  2941. ieee80211_unregister_hw(hw);
  2942. /* Remove tx reclaim tasklet */
  2943. tasklet_kill(&priv->tx_reclaim_task);
  2944. /* Stop config thread */
  2945. destroy_workqueue(priv->config_wq);
  2946. /* Stop hardware */
  2947. mwl8k_hw_reset(priv);
  2948. /* Return all skbs to mac80211 */
  2949. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2950. mwl8k_txq_reclaim(hw, i, 1);
  2951. for (i = 0; i < MWL8K_TX_QUEUES; i++)
  2952. mwl8k_txq_deinit(hw, i);
  2953. mwl8k_rxq_deinit(hw, 0);
  2954. pci_free_consistent(priv->pdev, 4,
  2955. priv->cookie, priv->cookie_dma);
  2956. pci_iounmap(pdev, priv->regs);
  2957. pci_set_drvdata(pdev, NULL);
  2958. ieee80211_free_hw(hw);
  2959. pci_release_regions(pdev);
  2960. pci_disable_device(pdev);
  2961. }
  2962. static struct pci_driver mwl8k_driver = {
  2963. .name = MWL8K_NAME,
  2964. .id_table = mwl8k_table,
  2965. .probe = mwl8k_probe,
  2966. .remove = __devexit_p(mwl8k_remove),
  2967. .shutdown = __devexit_p(mwl8k_shutdown),
  2968. };
  2969. static int __init mwl8k_init(void)
  2970. {
  2971. return pci_register_driver(&mwl8k_driver);
  2972. }
  2973. static void __exit mwl8k_exit(void)
  2974. {
  2975. pci_unregister_driver(&mwl8k_driver);
  2976. }
  2977. module_init(mwl8k_init);
  2978. module_exit(mwl8k_exit);