iwl-3945.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934
  1. /******************************************************************************
  2. *
  3. * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2 of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  17. *
  18. * The full GNU General Public License is included in this distribution in the
  19. * file called LICENSE.
  20. *
  21. * Contact Information:
  22. * Intel Linux Wireless <ilw@linux.intel.com>
  23. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  24. *
  25. *****************************************************************************/
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/init.h>
  29. #include <linux/slab.h>
  30. #include <linux/pci.h>
  31. #include <linux/dma-mapping.h>
  32. #include <linux/delay.h>
  33. #include <linux/sched.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/wireless.h>
  37. #include <linux/firmware.h>
  38. #include <linux/etherdevice.h>
  39. #include <asm/unaligned.h>
  40. #include <net/mac80211.h>
  41. #include "iwl-fh.h"
  42. #include "iwl-3945-fh.h"
  43. #include "iwl-commands.h"
  44. #include "iwl-sta.h"
  45. #include "iwl-3945.h"
  46. #include "iwl-eeprom.h"
  47. #include "iwl-core.h"
  48. #include "iwl-helpers.h"
  49. #include "iwl-led.h"
  50. #include "iwl-3945-led.h"
  51. #include "iwl-3945-debugfs.h"
  52. #define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
  53. [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
  54. IWL_RATE_##r##M_IEEE, \
  55. IWL_RATE_##ip##M_INDEX, \
  56. IWL_RATE_##in##M_INDEX, \
  57. IWL_RATE_##rp##M_INDEX, \
  58. IWL_RATE_##rn##M_INDEX, \
  59. IWL_RATE_##pp##M_INDEX, \
  60. IWL_RATE_##np##M_INDEX, \
  61. IWL_RATE_##r##M_INDEX_TABLE, \
  62. IWL_RATE_##ip##M_INDEX_TABLE }
  63. /*
  64. * Parameter order:
  65. * rate, prev rate, next rate, prev tgg rate, next tgg rate
  66. *
  67. * If there isn't a valid next or previous rate then INV is used which
  68. * maps to IWL_RATE_INVALID
  69. *
  70. */
  71. const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
  72. IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
  73. IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
  74. IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
  75. IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
  76. IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
  77. IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
  78. IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
  79. IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
  80. IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
  81. IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
  82. IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
  83. IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
  84. };
  85. /* 1 = enable the iwl3945_disable_events() function */
  86. #define IWL_EVT_DISABLE (0)
  87. #define IWL_EVT_DISABLE_SIZE (1532/32)
  88. /**
  89. * iwl3945_disable_events - Disable selected events in uCode event log
  90. *
  91. * Disable an event by writing "1"s into "disable"
  92. * bitmap in SRAM. Bit position corresponds to Event # (id/type).
  93. * Default values of 0 enable uCode events to be logged.
  94. * Use for only special debugging. This function is just a placeholder as-is,
  95. * you'll need to provide the special bits! ...
  96. * ... and set IWL_EVT_DISABLE to 1. */
  97. void iwl3945_disable_events(struct iwl_priv *priv)
  98. {
  99. int i;
  100. u32 base; /* SRAM address of event log header */
  101. u32 disable_ptr; /* SRAM address of event-disable bitmap array */
  102. u32 array_size; /* # of u32 entries in array */
  103. u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
  104. 0x00000000, /* 31 - 0 Event id numbers */
  105. 0x00000000, /* 63 - 32 */
  106. 0x00000000, /* 95 - 64 */
  107. 0x00000000, /* 127 - 96 */
  108. 0x00000000, /* 159 - 128 */
  109. 0x00000000, /* 191 - 160 */
  110. 0x00000000, /* 223 - 192 */
  111. 0x00000000, /* 255 - 224 */
  112. 0x00000000, /* 287 - 256 */
  113. 0x00000000, /* 319 - 288 */
  114. 0x00000000, /* 351 - 320 */
  115. 0x00000000, /* 383 - 352 */
  116. 0x00000000, /* 415 - 384 */
  117. 0x00000000, /* 447 - 416 */
  118. 0x00000000, /* 479 - 448 */
  119. 0x00000000, /* 511 - 480 */
  120. 0x00000000, /* 543 - 512 */
  121. 0x00000000, /* 575 - 544 */
  122. 0x00000000, /* 607 - 576 */
  123. 0x00000000, /* 639 - 608 */
  124. 0x00000000, /* 671 - 640 */
  125. 0x00000000, /* 703 - 672 */
  126. 0x00000000, /* 735 - 704 */
  127. 0x00000000, /* 767 - 736 */
  128. 0x00000000, /* 799 - 768 */
  129. 0x00000000, /* 831 - 800 */
  130. 0x00000000, /* 863 - 832 */
  131. 0x00000000, /* 895 - 864 */
  132. 0x00000000, /* 927 - 896 */
  133. 0x00000000, /* 959 - 928 */
  134. 0x00000000, /* 991 - 960 */
  135. 0x00000000, /* 1023 - 992 */
  136. 0x00000000, /* 1055 - 1024 */
  137. 0x00000000, /* 1087 - 1056 */
  138. 0x00000000, /* 1119 - 1088 */
  139. 0x00000000, /* 1151 - 1120 */
  140. 0x00000000, /* 1183 - 1152 */
  141. 0x00000000, /* 1215 - 1184 */
  142. 0x00000000, /* 1247 - 1216 */
  143. 0x00000000, /* 1279 - 1248 */
  144. 0x00000000, /* 1311 - 1280 */
  145. 0x00000000, /* 1343 - 1312 */
  146. 0x00000000, /* 1375 - 1344 */
  147. 0x00000000, /* 1407 - 1376 */
  148. 0x00000000, /* 1439 - 1408 */
  149. 0x00000000, /* 1471 - 1440 */
  150. 0x00000000, /* 1503 - 1472 */
  151. };
  152. base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
  153. if (!iwl3945_hw_valid_rtc_data_addr(base)) {
  154. IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
  155. return;
  156. }
  157. disable_ptr = iwl_read_targ_mem(priv, base + (4 * sizeof(u32)));
  158. array_size = iwl_read_targ_mem(priv, base + (5 * sizeof(u32)));
  159. if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
  160. IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
  161. disable_ptr);
  162. for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
  163. iwl_write_targ_mem(priv,
  164. disable_ptr + (i * sizeof(u32)),
  165. evt_disable[i]);
  166. } else {
  167. IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
  168. IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
  169. IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
  170. disable_ptr, array_size);
  171. }
  172. }
  173. static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
  174. {
  175. int idx;
  176. for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
  177. if (iwl3945_rates[idx].plcp == plcp)
  178. return idx;
  179. return -1;
  180. }
  181. #ifdef CONFIG_IWLWIFI_DEBUG
  182. #define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
  183. static const char *iwl3945_get_tx_fail_reason(u32 status)
  184. {
  185. switch (status & TX_STATUS_MSK) {
  186. case TX_3945_STATUS_SUCCESS:
  187. return "SUCCESS";
  188. TX_STATUS_ENTRY(SHORT_LIMIT);
  189. TX_STATUS_ENTRY(LONG_LIMIT);
  190. TX_STATUS_ENTRY(FIFO_UNDERRUN);
  191. TX_STATUS_ENTRY(MGMNT_ABORT);
  192. TX_STATUS_ENTRY(NEXT_FRAG);
  193. TX_STATUS_ENTRY(LIFE_EXPIRE);
  194. TX_STATUS_ENTRY(DEST_PS);
  195. TX_STATUS_ENTRY(ABORTED);
  196. TX_STATUS_ENTRY(BT_RETRY);
  197. TX_STATUS_ENTRY(STA_INVALID);
  198. TX_STATUS_ENTRY(FRAG_DROPPED);
  199. TX_STATUS_ENTRY(TID_DISABLE);
  200. TX_STATUS_ENTRY(FRAME_FLUSHED);
  201. TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
  202. TX_STATUS_ENTRY(TX_LOCKED);
  203. TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
  204. }
  205. return "UNKNOWN";
  206. }
  207. #else
  208. static inline const char *iwl3945_get_tx_fail_reason(u32 status)
  209. {
  210. return "";
  211. }
  212. #endif
  213. /*
  214. * get ieee prev rate from rate scale table.
  215. * for A and B mode we need to overright prev
  216. * value
  217. */
  218. int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
  219. {
  220. int next_rate = iwl3945_get_prev_ieee_rate(rate);
  221. switch (priv->band) {
  222. case IEEE80211_BAND_5GHZ:
  223. if (rate == IWL_RATE_12M_INDEX)
  224. next_rate = IWL_RATE_9M_INDEX;
  225. else if (rate == IWL_RATE_6M_INDEX)
  226. next_rate = IWL_RATE_6M_INDEX;
  227. break;
  228. case IEEE80211_BAND_2GHZ:
  229. if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
  230. iwl_is_associated(priv)) {
  231. if (rate == IWL_RATE_11M_INDEX)
  232. next_rate = IWL_RATE_5M_INDEX;
  233. }
  234. break;
  235. default:
  236. break;
  237. }
  238. return next_rate;
  239. }
  240. /**
  241. * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
  242. *
  243. * When FW advances 'R' index, all entries between old and new 'R' index
  244. * need to be reclaimed. As result, some free space forms. If there is
  245. * enough free space (> low mark), wake the stack that feeds us.
  246. */
  247. static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
  248. int txq_id, int index)
  249. {
  250. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  251. struct iwl_queue *q = &txq->q;
  252. struct iwl_tx_info *tx_info;
  253. BUG_ON(txq_id == IWL_CMD_QUEUE_NUM);
  254. for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
  255. q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
  256. tx_info = &txq->txb[txq->q.read_ptr];
  257. ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]);
  258. tx_info->skb[0] = NULL;
  259. priv->cfg->ops->lib->txq_free_tfd(priv, txq);
  260. }
  261. if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
  262. (txq_id != IWL_CMD_QUEUE_NUM) &&
  263. priv->mac80211_registered)
  264. iwl_wake_queue(priv, txq_id);
  265. }
  266. /**
  267. * iwl3945_rx_reply_tx - Handle Tx response
  268. */
  269. static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
  270. struct iwl_rx_mem_buffer *rxb)
  271. {
  272. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  273. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  274. int txq_id = SEQ_TO_QUEUE(sequence);
  275. int index = SEQ_TO_INDEX(sequence);
  276. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  277. struct ieee80211_tx_info *info;
  278. struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
  279. u32 status = le32_to_cpu(tx_resp->status);
  280. int rate_idx;
  281. int fail;
  282. if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
  283. IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
  284. "is out of range [0-%d] %d %d\n", txq_id,
  285. index, txq->q.n_bd, txq->q.write_ptr,
  286. txq->q.read_ptr);
  287. return;
  288. }
  289. info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
  290. ieee80211_tx_info_clear_status(info);
  291. /* Fill the MRR chain with some info about on-chip retransmissions */
  292. rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
  293. if (info->band == IEEE80211_BAND_5GHZ)
  294. rate_idx -= IWL_FIRST_OFDM_RATE;
  295. fail = tx_resp->failure_frame;
  296. info->status.rates[0].idx = rate_idx;
  297. info->status.rates[0].count = fail + 1; /* add final attempt */
  298. /* tx_status->rts_retry_count = tx_resp->failure_rts; */
  299. info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
  300. IEEE80211_TX_STAT_ACK : 0;
  301. IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
  302. txq_id, iwl3945_get_tx_fail_reason(status), status,
  303. tx_resp->rate, tx_resp->failure_frame);
  304. IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
  305. iwl3945_tx_queue_reclaim(priv, txq_id, index);
  306. if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
  307. IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
  308. }
  309. /*****************************************************************************
  310. *
  311. * Intel PRO/Wireless 3945ABG/BG Network Connection
  312. *
  313. * RX handler implementations
  314. *
  315. *****************************************************************************/
  316. #ifdef CONFIG_IWLWIFI_DEBUG
  317. /*
  318. * based on the assumption of all statistics counter are in DWORD
  319. * FIXME: This function is for debugging, do not deal with
  320. * the case of counters roll-over.
  321. */
  322. static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
  323. __le32 *stats)
  324. {
  325. int i;
  326. __le32 *prev_stats;
  327. u32 *accum_stats;
  328. u32 *delta, *max_delta;
  329. prev_stats = (__le32 *)&priv->_3945.statistics;
  330. accum_stats = (u32 *)&priv->_3945.accum_statistics;
  331. delta = (u32 *)&priv->_3945.delta_statistics;
  332. max_delta = (u32 *)&priv->_3945.max_delta;
  333. for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
  334. i += sizeof(__le32), stats++, prev_stats++, delta++,
  335. max_delta++, accum_stats++) {
  336. if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
  337. *delta = (le32_to_cpu(*stats) -
  338. le32_to_cpu(*prev_stats));
  339. *accum_stats += *delta;
  340. if (*delta > *max_delta)
  341. *max_delta = *delta;
  342. }
  343. }
  344. /* reset accumulative statistics for "no-counter" type statistics */
  345. priv->_3945.accum_statistics.general.temperature =
  346. priv->_3945.statistics.general.temperature;
  347. priv->_3945.accum_statistics.general.ttl_timestamp =
  348. priv->_3945.statistics.general.ttl_timestamp;
  349. }
  350. #endif
  351. /**
  352. * iwl3945_good_plcp_health - checks for plcp error.
  353. *
  354. * When the plcp error is exceeding the thresholds, reset the radio
  355. * to improve the throughput.
  356. */
  357. static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
  358. struct iwl_rx_packet *pkt)
  359. {
  360. bool rc = true;
  361. struct iwl3945_notif_statistics current_stat;
  362. int combined_plcp_delta;
  363. unsigned int plcp_msec;
  364. unsigned long plcp_received_jiffies;
  365. memcpy(&current_stat, pkt->u.raw, sizeof(struct
  366. iwl3945_notif_statistics));
  367. /*
  368. * check for plcp_err and trigger radio reset if it exceeds
  369. * the plcp error threshold plcp_delta.
  370. */
  371. plcp_received_jiffies = jiffies;
  372. plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
  373. (long) priv->plcp_jiffies);
  374. priv->plcp_jiffies = plcp_received_jiffies;
  375. /*
  376. * check to make sure plcp_msec is not 0 to prevent division
  377. * by zero.
  378. */
  379. if (plcp_msec) {
  380. combined_plcp_delta =
  381. (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
  382. le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
  383. if ((combined_plcp_delta > 0) &&
  384. ((combined_plcp_delta * 100) / plcp_msec) >
  385. priv->cfg->plcp_delta_threshold) {
  386. /*
  387. * if plcp_err exceed the threshold, the following
  388. * data is printed in csv format:
  389. * Text: plcp_err exceeded %d,
  390. * Received ofdm.plcp_err,
  391. * Current ofdm.plcp_err,
  392. * combined_plcp_delta,
  393. * plcp_msec
  394. */
  395. IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
  396. "%u, %d, %u mSecs\n",
  397. priv->cfg->plcp_delta_threshold,
  398. le32_to_cpu(current_stat.rx.ofdm.plcp_err),
  399. combined_plcp_delta, plcp_msec);
  400. /*
  401. * Reset the RF radio due to the high plcp
  402. * error rate
  403. */
  404. rc = false;
  405. }
  406. }
  407. return rc;
  408. }
  409. void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
  410. struct iwl_rx_mem_buffer *rxb)
  411. {
  412. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  413. IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
  414. (int)sizeof(struct iwl3945_notif_statistics),
  415. le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
  416. #ifdef CONFIG_IWLWIFI_DEBUG
  417. iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
  418. #endif
  419. iwl_recover_from_statistics(priv, pkt);
  420. memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
  421. }
  422. void iwl3945_reply_statistics(struct iwl_priv *priv,
  423. struct iwl_rx_mem_buffer *rxb)
  424. {
  425. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  426. __le32 *flag = (__le32 *)&pkt->u.raw;
  427. if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
  428. #ifdef CONFIG_IWLWIFI_DEBUG
  429. memset(&priv->_3945.accum_statistics, 0,
  430. sizeof(struct iwl3945_notif_statistics));
  431. memset(&priv->_3945.delta_statistics, 0,
  432. sizeof(struct iwl3945_notif_statistics));
  433. memset(&priv->_3945.max_delta, 0,
  434. sizeof(struct iwl3945_notif_statistics));
  435. #endif
  436. IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
  437. }
  438. iwl3945_hw_rx_statistics(priv, rxb);
  439. }
  440. /******************************************************************************
  441. *
  442. * Misc. internal state and helper functions
  443. *
  444. ******************************************************************************/
  445. #ifdef CONFIG_IWLWIFI_DEBUG
  446. /**
  447. * iwl3945_report_frame - dump frame to syslog during debug sessions
  448. *
  449. * You may hack this function to show different aspects of received frames,
  450. * including selective frame dumps.
  451. * group100 parameter selects whether to show 1 out of 100 good frames.
  452. */
  453. static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
  454. struct iwl_rx_packet *pkt,
  455. struct ieee80211_hdr *header, int group100)
  456. {
  457. u32 to_us;
  458. u32 print_summary = 0;
  459. u32 print_dump = 0; /* set to 1 to dump all frames' contents */
  460. u32 hundred = 0;
  461. u32 dataframe = 0;
  462. __le16 fc;
  463. u16 seq_ctl;
  464. u16 channel;
  465. u16 phy_flags;
  466. u16 length;
  467. u16 status;
  468. u16 bcn_tmr;
  469. u32 tsf_low;
  470. u64 tsf;
  471. u8 rssi;
  472. u8 agc;
  473. u16 sig_avg;
  474. u16 noise_diff;
  475. struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
  476. struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
  477. struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
  478. u8 *data = IWL_RX_DATA(pkt);
  479. /* MAC header */
  480. fc = header->frame_control;
  481. seq_ctl = le16_to_cpu(header->seq_ctrl);
  482. /* metadata */
  483. channel = le16_to_cpu(rx_hdr->channel);
  484. phy_flags = le16_to_cpu(rx_hdr->phy_flags);
  485. length = le16_to_cpu(rx_hdr->len);
  486. /* end-of-frame status and timestamp */
  487. status = le32_to_cpu(rx_end->status);
  488. bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
  489. tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
  490. tsf = le64_to_cpu(rx_end->timestamp);
  491. /* signal statistics */
  492. rssi = rx_stats->rssi;
  493. agc = rx_stats->agc;
  494. sig_avg = le16_to_cpu(rx_stats->sig_avg);
  495. noise_diff = le16_to_cpu(rx_stats->noise_diff);
  496. to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
  497. /* if data frame is to us and all is good,
  498. * (optionally) print summary for only 1 out of every 100 */
  499. if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
  500. cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
  501. dataframe = 1;
  502. if (!group100)
  503. print_summary = 1; /* print each frame */
  504. else if (priv->framecnt_to_us < 100) {
  505. priv->framecnt_to_us++;
  506. print_summary = 0;
  507. } else {
  508. priv->framecnt_to_us = 0;
  509. print_summary = 1;
  510. hundred = 1;
  511. }
  512. } else {
  513. /* print summary for all other frames */
  514. print_summary = 1;
  515. }
  516. if (print_summary) {
  517. char *title;
  518. int rate;
  519. if (hundred)
  520. title = "100Frames";
  521. else if (ieee80211_has_retry(fc))
  522. title = "Retry";
  523. else if (ieee80211_is_assoc_resp(fc))
  524. title = "AscRsp";
  525. else if (ieee80211_is_reassoc_resp(fc))
  526. title = "RasRsp";
  527. else if (ieee80211_is_probe_resp(fc)) {
  528. title = "PrbRsp";
  529. print_dump = 1; /* dump frame contents */
  530. } else if (ieee80211_is_beacon(fc)) {
  531. title = "Beacon";
  532. print_dump = 1; /* dump frame contents */
  533. } else if (ieee80211_is_atim(fc))
  534. title = "ATIM";
  535. else if (ieee80211_is_auth(fc))
  536. title = "Auth";
  537. else if (ieee80211_is_deauth(fc))
  538. title = "DeAuth";
  539. else if (ieee80211_is_disassoc(fc))
  540. title = "DisAssoc";
  541. else
  542. title = "Frame";
  543. rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
  544. if (rate == -1)
  545. rate = 0;
  546. else
  547. rate = iwl3945_rates[rate].ieee / 2;
  548. /* print frame summary.
  549. * MAC addresses show just the last byte (for brevity),
  550. * but you can hack it to show more, if you'd like to. */
  551. if (dataframe)
  552. IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
  553. "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
  554. title, le16_to_cpu(fc), header->addr1[5],
  555. length, rssi, channel, rate);
  556. else {
  557. /* src/dst addresses assume managed mode */
  558. IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, "
  559. "src=0x%02x, rssi=%u, tim=%lu usec, "
  560. "phy=0x%02x, chnl=%d\n",
  561. title, le16_to_cpu(fc), header->addr1[5],
  562. header->addr3[5], rssi,
  563. tsf_low - priv->scan_start_tsf,
  564. phy_flags, channel);
  565. }
  566. }
  567. if (print_dump)
  568. iwl_print_hex_dump(priv, IWL_DL_RX, data, length);
  569. }
  570. static void iwl3945_dbg_report_frame(struct iwl_priv *priv,
  571. struct iwl_rx_packet *pkt,
  572. struct ieee80211_hdr *header, int group100)
  573. {
  574. if (iwl_get_debug_level(priv) & IWL_DL_RX)
  575. _iwl3945_dbg_report_frame(priv, pkt, header, group100);
  576. }
  577. #else
  578. static inline void iwl3945_dbg_report_frame(struct iwl_priv *priv,
  579. struct iwl_rx_packet *pkt,
  580. struct ieee80211_hdr *header, int group100)
  581. {
  582. }
  583. #endif
  584. /* This is necessary only for a number of statistics, see the caller. */
  585. static int iwl3945_is_network_packet(struct iwl_priv *priv,
  586. struct ieee80211_hdr *header)
  587. {
  588. /* Filter incoming packets to determine if they are targeted toward
  589. * this network, discarding packets coming from ourselves */
  590. switch (priv->iw_mode) {
  591. case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
  592. /* packets to our IBSS update information */
  593. return !compare_ether_addr(header->addr3, priv->bssid);
  594. case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
  595. /* packets to our IBSS update information */
  596. return !compare_ether_addr(header->addr2, priv->bssid);
  597. default:
  598. return 1;
  599. }
  600. }
  601. static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
  602. struct iwl_rx_mem_buffer *rxb,
  603. struct ieee80211_rx_status *stats)
  604. {
  605. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  606. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
  607. struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
  608. struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
  609. u16 len = le16_to_cpu(rx_hdr->len);
  610. struct sk_buff *skb;
  611. __le16 fc = hdr->frame_control;
  612. /* We received data from the HW, so stop the watchdog */
  613. if (unlikely(len + IWL39_RX_FRAME_SIZE >
  614. PAGE_SIZE << priv->hw_params.rx_page_order)) {
  615. IWL_DEBUG_DROP(priv, "Corruption detected!\n");
  616. return;
  617. }
  618. /* We only process data packets if the interface is open */
  619. if (unlikely(!priv->is_open)) {
  620. IWL_DEBUG_DROP_LIMIT(priv,
  621. "Dropping packet while interface is not open.\n");
  622. return;
  623. }
  624. skb = dev_alloc_skb(128);
  625. if (!skb) {
  626. IWL_ERR(priv, "dev_alloc_skb failed\n");
  627. return;
  628. }
  629. if (!iwl3945_mod_params.sw_crypto)
  630. iwl_set_decrypted_flag(priv,
  631. (struct ieee80211_hdr *)rxb_addr(rxb),
  632. le32_to_cpu(rx_end->status), stats);
  633. skb_add_rx_frag(skb, 0, rxb->page,
  634. (void *)rx_hdr->payload - (void *)pkt, len);
  635. iwl_update_stats(priv, false, fc, len);
  636. memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
  637. ieee80211_rx(priv->hw, skb);
  638. priv->alloc_rxb_page--;
  639. rxb->page = NULL;
  640. }
  641. #define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
  642. static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
  643. struct iwl_rx_mem_buffer *rxb)
  644. {
  645. struct ieee80211_hdr *header;
  646. struct ieee80211_rx_status rx_status;
  647. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  648. struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
  649. struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
  650. struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
  651. u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
  652. u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
  653. u8 network_packet;
  654. rx_status.flag = 0;
  655. rx_status.mactime = le64_to_cpu(rx_end->timestamp);
  656. rx_status.freq =
  657. ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel));
  658. rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
  659. IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
  660. rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
  661. if (rx_status.band == IEEE80211_BAND_5GHZ)
  662. rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
  663. rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
  664. RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
  665. /* set the preamble flag if appropriate */
  666. if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
  667. rx_status.flag |= RX_FLAG_SHORTPRE;
  668. if ((unlikely(rx_stats->phy_count > 20))) {
  669. IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
  670. rx_stats->phy_count);
  671. return;
  672. }
  673. if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
  674. || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
  675. IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
  676. return;
  677. }
  678. /* Convert 3945's rssi indicator to dBm */
  679. rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
  680. IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
  681. rx_status.signal, rx_stats_sig_avg,
  682. rx_stats_noise_diff);
  683. header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
  684. network_packet = iwl3945_is_network_packet(priv, header);
  685. IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
  686. network_packet ? '*' : ' ',
  687. le16_to_cpu(rx_hdr->channel),
  688. rx_status.signal, rx_status.signal,
  689. rx_status.rate_idx);
  690. /* Set "1" to report good data frames in groups of 100 */
  691. iwl3945_dbg_report_frame(priv, pkt, header, 1);
  692. iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
  693. if (network_packet) {
  694. priv->_3945.last_beacon_time =
  695. le32_to_cpu(rx_end->beacon_timestamp);
  696. priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
  697. priv->_3945.last_rx_rssi = rx_status.signal;
  698. }
  699. iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
  700. }
  701. int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
  702. struct iwl_tx_queue *txq,
  703. dma_addr_t addr, u16 len, u8 reset, u8 pad)
  704. {
  705. int count;
  706. struct iwl_queue *q;
  707. struct iwl3945_tfd *tfd, *tfd_tmp;
  708. q = &txq->q;
  709. tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
  710. tfd = &tfd_tmp[q->write_ptr];
  711. if (reset)
  712. memset(tfd, 0, sizeof(*tfd));
  713. count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
  714. if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
  715. IWL_ERR(priv, "Error can not send more than %d chunks\n",
  716. NUM_TFD_CHUNKS);
  717. return -EINVAL;
  718. }
  719. tfd->tbs[count].addr = cpu_to_le32(addr);
  720. tfd->tbs[count].len = cpu_to_le32(len);
  721. count++;
  722. tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
  723. TFD_CTL_PAD_SET(pad));
  724. return 0;
  725. }
  726. /**
  727. * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
  728. *
  729. * Does NOT advance any indexes
  730. */
  731. void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  732. {
  733. struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
  734. int index = txq->q.read_ptr;
  735. struct iwl3945_tfd *tfd = &tfd_tmp[index];
  736. struct pci_dev *dev = priv->pci_dev;
  737. int i;
  738. int counter;
  739. /* sanity check */
  740. counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
  741. if (counter > NUM_TFD_CHUNKS) {
  742. IWL_ERR(priv, "Too many chunks: %i\n", counter);
  743. /* @todo issue fatal error, it is quite serious situation */
  744. return;
  745. }
  746. /* Unmap tx_cmd */
  747. if (counter)
  748. pci_unmap_single(dev,
  749. pci_unmap_addr(&txq->meta[index], mapping),
  750. pci_unmap_len(&txq->meta[index], len),
  751. PCI_DMA_TODEVICE);
  752. /* unmap chunks if any */
  753. for (i = 1; i < counter; i++) {
  754. pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
  755. le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
  756. if (txq->txb[txq->q.read_ptr].skb[0]) {
  757. struct sk_buff *skb = txq->txb[txq->q.read_ptr].skb[0];
  758. if (txq->txb[txq->q.read_ptr].skb[0]) {
  759. /* Can be called from interrupt context */
  760. dev_kfree_skb_any(skb);
  761. txq->txb[txq->q.read_ptr].skb[0] = NULL;
  762. }
  763. }
  764. }
  765. return ;
  766. }
  767. /**
  768. * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
  769. *
  770. */
  771. void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
  772. struct iwl_device_cmd *cmd,
  773. struct ieee80211_tx_info *info,
  774. struct ieee80211_hdr *hdr,
  775. int sta_id, int tx_id)
  776. {
  777. u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
  778. u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
  779. u16 rate_mask;
  780. int rate;
  781. u8 rts_retry_limit;
  782. u8 data_retry_limit;
  783. __le32 tx_flags;
  784. __le16 fc = hdr->frame_control;
  785. struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
  786. rate = iwl3945_rates[rate_index].plcp;
  787. tx_flags = tx_cmd->tx_flags;
  788. /* We need to figure out how to get the sta->supp_rates while
  789. * in this running context */
  790. rate_mask = IWL_RATES_MASK;
  791. /* Set retry limit on DATA packets and Probe Responses*/
  792. if (ieee80211_is_probe_resp(fc))
  793. data_retry_limit = 3;
  794. else
  795. data_retry_limit = IWL_DEFAULT_TX_RETRY;
  796. tx_cmd->data_retry_limit = data_retry_limit;
  797. if (tx_id >= IWL_CMD_QUEUE_NUM)
  798. rts_retry_limit = 3;
  799. else
  800. rts_retry_limit = 7;
  801. if (data_retry_limit < rts_retry_limit)
  802. rts_retry_limit = data_retry_limit;
  803. tx_cmd->rts_retry_limit = rts_retry_limit;
  804. if (ieee80211_is_mgmt(fc)) {
  805. switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
  806. case cpu_to_le16(IEEE80211_STYPE_AUTH):
  807. case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
  808. case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
  809. case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
  810. if (tx_flags & TX_CMD_FLG_RTS_MSK) {
  811. tx_flags &= ~TX_CMD_FLG_RTS_MSK;
  812. tx_flags |= TX_CMD_FLG_CTS_MSK;
  813. }
  814. break;
  815. default:
  816. break;
  817. }
  818. }
  819. tx_cmd->rate = rate;
  820. tx_cmd->tx_flags = tx_flags;
  821. /* OFDM */
  822. tx_cmd->supp_rates[0] =
  823. ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
  824. /* CCK */
  825. tx_cmd->supp_rates[1] = (rate_mask & 0xF);
  826. IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
  827. "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
  828. tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
  829. tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
  830. }
  831. static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
  832. u16 tx_rate, u8 flags)
  833. {
  834. unsigned long flags_spin;
  835. struct iwl_station_entry *station;
  836. if (sta_id == IWL_INVALID_STATION)
  837. return IWL_INVALID_STATION;
  838. spin_lock_irqsave(&priv->sta_lock, flags_spin);
  839. station = &priv->stations[sta_id];
  840. station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
  841. station->sta.rate_n_flags = cpu_to_le16(tx_rate);
  842. station->sta.mode = STA_CONTROL_MODIFY_MSK;
  843. spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
  844. iwl_send_add_sta(priv, &station->sta, flags);
  845. IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
  846. sta_id, tx_rate);
  847. return sta_id;
  848. }
  849. static int iwl3945_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
  850. {
  851. if (src == IWL_PWR_SRC_VAUX) {
  852. if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
  853. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  854. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  855. ~APMG_PS_CTRL_MSK_PWR_SRC);
  856. iwl_poll_bit(priv, CSR_GPIO_IN,
  857. CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
  858. CSR_GPIO_IN_BIT_AUX_POWER, 5000);
  859. }
  860. } else {
  861. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  862. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  863. ~APMG_PS_CTRL_MSK_PWR_SRC);
  864. iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
  865. CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
  866. }
  867. return 0;
  868. }
  869. static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  870. {
  871. iwl_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->dma_addr);
  872. iwl_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
  873. iwl_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
  874. iwl_write_direct32(priv, FH39_RCSR_CONFIG(0),
  875. FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
  876. FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
  877. FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
  878. FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
  879. (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
  880. FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
  881. (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
  882. FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
  883. /* fake read to flush all prev I/O */
  884. iwl_read_direct32(priv, FH39_RSSR_CTRL);
  885. return 0;
  886. }
  887. static int iwl3945_tx_reset(struct iwl_priv *priv)
  888. {
  889. /* bypass mode */
  890. iwl_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
  891. /* RA 0 is active */
  892. iwl_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
  893. /* all 6 fifo are active */
  894. iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
  895. iwl_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
  896. iwl_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
  897. iwl_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
  898. iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
  899. iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
  900. priv->_3945.shared_phys);
  901. iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
  902. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
  903. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
  904. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
  905. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
  906. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
  907. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
  908. FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
  909. return 0;
  910. }
  911. /**
  912. * iwl3945_txq_ctx_reset - Reset TX queue context
  913. *
  914. * Destroys all DMA structures and initialize them again
  915. */
  916. static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
  917. {
  918. int rc;
  919. int txq_id, slots_num;
  920. iwl3945_hw_txq_ctx_free(priv);
  921. /* allocate tx queue structure */
  922. rc = iwl_alloc_txq_mem(priv);
  923. if (rc)
  924. return rc;
  925. /* Tx CMD queue */
  926. rc = iwl3945_tx_reset(priv);
  927. if (rc)
  928. goto error;
  929. /* Tx queue(s) */
  930. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  931. slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
  932. TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
  933. rc = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
  934. txq_id);
  935. if (rc) {
  936. IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
  937. goto error;
  938. }
  939. }
  940. return rc;
  941. error:
  942. iwl3945_hw_txq_ctx_free(priv);
  943. return rc;
  944. }
  945. /*
  946. * Start up 3945's basic functionality after it has been reset
  947. * (e.g. after platform boot, or shutdown via iwl_apm_stop())
  948. * NOTE: This does not load uCode nor start the embedded processor
  949. */
  950. static int iwl3945_apm_init(struct iwl_priv *priv)
  951. {
  952. int ret = iwl_apm_init(priv);
  953. /* Clear APMG (NIC's internal power management) interrupts */
  954. iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
  955. iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
  956. /* Reset radio chip */
  957. iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
  958. udelay(5);
  959. iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
  960. return ret;
  961. }
  962. static void iwl3945_nic_config(struct iwl_priv *priv)
  963. {
  964. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  965. unsigned long flags;
  966. u8 rev_id = 0;
  967. spin_lock_irqsave(&priv->lock, flags);
  968. /* Determine HW type */
  969. pci_read_config_byte(priv->pci_dev, PCI_REVISION_ID, &rev_id);
  970. IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
  971. if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
  972. IWL_DEBUG_INFO(priv, "RTP type\n");
  973. else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
  974. IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
  975. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  976. CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
  977. } else {
  978. IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
  979. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  980. CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
  981. }
  982. if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
  983. IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
  984. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  985. CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
  986. } else
  987. IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
  988. if ((eeprom->board_revision & 0xF0) == 0xD0) {
  989. IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
  990. eeprom->board_revision);
  991. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  992. CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
  993. } else {
  994. IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
  995. eeprom->board_revision);
  996. iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
  997. CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
  998. }
  999. if (eeprom->almgor_m_version <= 1) {
  1000. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  1001. CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
  1002. IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
  1003. eeprom->almgor_m_version);
  1004. } else {
  1005. IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
  1006. eeprom->almgor_m_version);
  1007. iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
  1008. CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
  1009. }
  1010. spin_unlock_irqrestore(&priv->lock, flags);
  1011. if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
  1012. IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
  1013. if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
  1014. IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
  1015. }
  1016. int iwl3945_hw_nic_init(struct iwl_priv *priv)
  1017. {
  1018. int rc;
  1019. unsigned long flags;
  1020. struct iwl_rx_queue *rxq = &priv->rxq;
  1021. spin_lock_irqsave(&priv->lock, flags);
  1022. priv->cfg->ops->lib->apm_ops.init(priv);
  1023. spin_unlock_irqrestore(&priv->lock, flags);
  1024. rc = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
  1025. if (rc)
  1026. return rc;
  1027. priv->cfg->ops->lib->apm_ops.config(priv);
  1028. /* Allocate the RX queue, or reset if it is already allocated */
  1029. if (!rxq->bd) {
  1030. rc = iwl_rx_queue_alloc(priv);
  1031. if (rc) {
  1032. IWL_ERR(priv, "Unable to initialize Rx queue\n");
  1033. return -ENOMEM;
  1034. }
  1035. } else
  1036. iwl3945_rx_queue_reset(priv, rxq);
  1037. iwl3945_rx_replenish(priv);
  1038. iwl3945_rx_init(priv, rxq);
  1039. /* Look at using this instead:
  1040. rxq->need_update = 1;
  1041. iwl_rx_queue_update_write_ptr(priv, rxq);
  1042. */
  1043. iwl_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
  1044. rc = iwl3945_txq_ctx_reset(priv);
  1045. if (rc)
  1046. return rc;
  1047. set_bit(STATUS_INIT, &priv->status);
  1048. return 0;
  1049. }
  1050. /**
  1051. * iwl3945_hw_txq_ctx_free - Free TXQ Context
  1052. *
  1053. * Destroy all TX DMA queues and structures
  1054. */
  1055. void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
  1056. {
  1057. int txq_id;
  1058. /* Tx queues */
  1059. if (priv->txq)
  1060. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
  1061. txq_id++)
  1062. if (txq_id == IWL_CMD_QUEUE_NUM)
  1063. iwl_cmd_queue_free(priv);
  1064. else
  1065. iwl_tx_queue_free(priv, txq_id);
  1066. /* free tx queue structure */
  1067. iwl_free_txq_mem(priv);
  1068. }
  1069. void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
  1070. {
  1071. int txq_id;
  1072. /* stop SCD */
  1073. iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
  1074. iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
  1075. /* reset TFD queues */
  1076. for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
  1077. iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
  1078. iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
  1079. FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
  1080. 1000);
  1081. }
  1082. iwl3945_hw_txq_ctx_free(priv);
  1083. }
  1084. /**
  1085. * iwl3945_hw_reg_adjust_power_by_temp
  1086. * return index delta into power gain settings table
  1087. */
  1088. static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
  1089. {
  1090. return (new_reading - old_reading) * (-11) / 100;
  1091. }
  1092. /**
  1093. * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
  1094. */
  1095. static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
  1096. {
  1097. return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
  1098. }
  1099. int iwl3945_hw_get_temperature(struct iwl_priv *priv)
  1100. {
  1101. return iwl_read32(priv, CSR_UCODE_DRV_GP2);
  1102. }
  1103. /**
  1104. * iwl3945_hw_reg_txpower_get_temperature
  1105. * get the current temperature by reading from NIC
  1106. */
  1107. static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
  1108. {
  1109. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  1110. int temperature;
  1111. temperature = iwl3945_hw_get_temperature(priv);
  1112. /* driver's okay range is -260 to +25.
  1113. * human readable okay range is 0 to +285 */
  1114. IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
  1115. /* handle insane temp reading */
  1116. if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
  1117. IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
  1118. /* if really really hot(?),
  1119. * substitute the 3rd band/group's temp measured at factory */
  1120. if (priv->last_temperature > 100)
  1121. temperature = eeprom->groups[2].temperature;
  1122. else /* else use most recent "sane" value from driver */
  1123. temperature = priv->last_temperature;
  1124. }
  1125. return temperature; /* raw, not "human readable" */
  1126. }
  1127. /* Adjust Txpower only if temperature variance is greater than threshold.
  1128. *
  1129. * Both are lower than older versions' 9 degrees */
  1130. #define IWL_TEMPERATURE_LIMIT_TIMER 6
  1131. /**
  1132. * is_temp_calib_needed - determines if new calibration is needed
  1133. *
  1134. * records new temperature in tx_mgr->temperature.
  1135. * replaces tx_mgr->last_temperature *only* if calib needed
  1136. * (assumes caller will actually do the calibration!). */
  1137. static int is_temp_calib_needed(struct iwl_priv *priv)
  1138. {
  1139. int temp_diff;
  1140. priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
  1141. temp_diff = priv->temperature - priv->last_temperature;
  1142. /* get absolute value */
  1143. if (temp_diff < 0) {
  1144. IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
  1145. temp_diff = -temp_diff;
  1146. } else if (temp_diff == 0)
  1147. IWL_DEBUG_POWER(priv, "Same temp,\n");
  1148. else
  1149. IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
  1150. /* if we don't need calibration, *don't* update last_temperature */
  1151. if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
  1152. IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
  1153. return 0;
  1154. }
  1155. IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
  1156. /* assume that caller will actually do calib ...
  1157. * update the "last temperature" value */
  1158. priv->last_temperature = priv->temperature;
  1159. return 1;
  1160. }
  1161. #define IWL_MAX_GAIN_ENTRIES 78
  1162. #define IWL_CCK_FROM_OFDM_POWER_DIFF -5
  1163. #define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
  1164. /* radio and DSP power table, each step is 1/2 dB.
  1165. * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
  1166. static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
  1167. {
  1168. {251, 127}, /* 2.4 GHz, highest power */
  1169. {251, 127},
  1170. {251, 127},
  1171. {251, 127},
  1172. {251, 125},
  1173. {251, 110},
  1174. {251, 105},
  1175. {251, 98},
  1176. {187, 125},
  1177. {187, 115},
  1178. {187, 108},
  1179. {187, 99},
  1180. {243, 119},
  1181. {243, 111},
  1182. {243, 105},
  1183. {243, 97},
  1184. {243, 92},
  1185. {211, 106},
  1186. {211, 100},
  1187. {179, 120},
  1188. {179, 113},
  1189. {179, 107},
  1190. {147, 125},
  1191. {147, 119},
  1192. {147, 112},
  1193. {147, 106},
  1194. {147, 101},
  1195. {147, 97},
  1196. {147, 91},
  1197. {115, 107},
  1198. {235, 121},
  1199. {235, 115},
  1200. {235, 109},
  1201. {203, 127},
  1202. {203, 121},
  1203. {203, 115},
  1204. {203, 108},
  1205. {203, 102},
  1206. {203, 96},
  1207. {203, 92},
  1208. {171, 110},
  1209. {171, 104},
  1210. {171, 98},
  1211. {139, 116},
  1212. {227, 125},
  1213. {227, 119},
  1214. {227, 113},
  1215. {227, 107},
  1216. {227, 101},
  1217. {227, 96},
  1218. {195, 113},
  1219. {195, 106},
  1220. {195, 102},
  1221. {195, 95},
  1222. {163, 113},
  1223. {163, 106},
  1224. {163, 102},
  1225. {163, 95},
  1226. {131, 113},
  1227. {131, 106},
  1228. {131, 102},
  1229. {131, 95},
  1230. {99, 113},
  1231. {99, 106},
  1232. {99, 102},
  1233. {99, 95},
  1234. {67, 113},
  1235. {67, 106},
  1236. {67, 102},
  1237. {67, 95},
  1238. {35, 113},
  1239. {35, 106},
  1240. {35, 102},
  1241. {35, 95},
  1242. {3, 113},
  1243. {3, 106},
  1244. {3, 102},
  1245. {3, 95} }, /* 2.4 GHz, lowest power */
  1246. {
  1247. {251, 127}, /* 5.x GHz, highest power */
  1248. {251, 120},
  1249. {251, 114},
  1250. {219, 119},
  1251. {219, 101},
  1252. {187, 113},
  1253. {187, 102},
  1254. {155, 114},
  1255. {155, 103},
  1256. {123, 117},
  1257. {123, 107},
  1258. {123, 99},
  1259. {123, 92},
  1260. {91, 108},
  1261. {59, 125},
  1262. {59, 118},
  1263. {59, 109},
  1264. {59, 102},
  1265. {59, 96},
  1266. {59, 90},
  1267. {27, 104},
  1268. {27, 98},
  1269. {27, 92},
  1270. {115, 118},
  1271. {115, 111},
  1272. {115, 104},
  1273. {83, 126},
  1274. {83, 121},
  1275. {83, 113},
  1276. {83, 105},
  1277. {83, 99},
  1278. {51, 118},
  1279. {51, 111},
  1280. {51, 104},
  1281. {51, 98},
  1282. {19, 116},
  1283. {19, 109},
  1284. {19, 102},
  1285. {19, 98},
  1286. {19, 93},
  1287. {171, 113},
  1288. {171, 107},
  1289. {171, 99},
  1290. {139, 120},
  1291. {139, 113},
  1292. {139, 107},
  1293. {139, 99},
  1294. {107, 120},
  1295. {107, 113},
  1296. {107, 107},
  1297. {107, 99},
  1298. {75, 120},
  1299. {75, 113},
  1300. {75, 107},
  1301. {75, 99},
  1302. {43, 120},
  1303. {43, 113},
  1304. {43, 107},
  1305. {43, 99},
  1306. {11, 120},
  1307. {11, 113},
  1308. {11, 107},
  1309. {11, 99},
  1310. {131, 107},
  1311. {131, 99},
  1312. {99, 120},
  1313. {99, 113},
  1314. {99, 107},
  1315. {99, 99},
  1316. {67, 120},
  1317. {67, 113},
  1318. {67, 107},
  1319. {67, 99},
  1320. {35, 120},
  1321. {35, 113},
  1322. {35, 107},
  1323. {35, 99},
  1324. {3, 120} } /* 5.x GHz, lowest power */
  1325. };
  1326. static inline u8 iwl3945_hw_reg_fix_power_index(int index)
  1327. {
  1328. if (index < 0)
  1329. return 0;
  1330. if (index >= IWL_MAX_GAIN_ENTRIES)
  1331. return IWL_MAX_GAIN_ENTRIES - 1;
  1332. return (u8) index;
  1333. }
  1334. /* Kick off thermal recalibration check every 60 seconds */
  1335. #define REG_RECALIB_PERIOD (60)
  1336. /**
  1337. * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
  1338. *
  1339. * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
  1340. * or 6 Mbit (OFDM) rates.
  1341. */
  1342. static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
  1343. s32 rate_index, const s8 *clip_pwrs,
  1344. struct iwl_channel_info *ch_info,
  1345. int band_index)
  1346. {
  1347. struct iwl3945_scan_power_info *scan_power_info;
  1348. s8 power;
  1349. u8 power_index;
  1350. scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
  1351. /* use this channel group's 6Mbit clipping/saturation pwr,
  1352. * but cap at regulatory scan power restriction (set during init
  1353. * based on eeprom channel data) for this channel. */
  1354. power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
  1355. /* further limit to user's max power preference.
  1356. * FIXME: Other spectrum management power limitations do not
  1357. * seem to apply?? */
  1358. power = min(power, priv->tx_power_user_lmt);
  1359. scan_power_info->requested_power = power;
  1360. /* find difference between new scan *power* and current "normal"
  1361. * Tx *power* for 6Mb. Use this difference (x2) to adjust the
  1362. * current "normal" temperature-compensated Tx power *index* for
  1363. * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
  1364. * *index*. */
  1365. power_index = ch_info->power_info[rate_index].power_table_index
  1366. - (power - ch_info->power_info
  1367. [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
  1368. /* store reference index that we use when adjusting *all* scan
  1369. * powers. So we can accommodate user (all channel) or spectrum
  1370. * management (single channel) power changes "between" temperature
  1371. * feedback compensation procedures.
  1372. * don't force fit this reference index into gain table; it may be a
  1373. * negative number. This will help avoid errors when we're at
  1374. * the lower bounds (highest gains, for warmest temperatures)
  1375. * of the table. */
  1376. /* don't exceed table bounds for "real" setting */
  1377. power_index = iwl3945_hw_reg_fix_power_index(power_index);
  1378. scan_power_info->power_table_index = power_index;
  1379. scan_power_info->tpc.tx_gain =
  1380. power_gain_table[band_index][power_index].tx_gain;
  1381. scan_power_info->tpc.dsp_atten =
  1382. power_gain_table[band_index][power_index].dsp_atten;
  1383. }
  1384. /**
  1385. * iwl3945_send_tx_power - fill in Tx Power command with gain settings
  1386. *
  1387. * Configures power settings for all rates for the current channel,
  1388. * using values from channel info struct, and send to NIC
  1389. */
  1390. static int iwl3945_send_tx_power(struct iwl_priv *priv)
  1391. {
  1392. int rate_idx, i;
  1393. const struct iwl_channel_info *ch_info = NULL;
  1394. struct iwl3945_txpowertable_cmd txpower = {
  1395. .channel = priv->active_rxon.channel,
  1396. };
  1397. txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
  1398. ch_info = iwl_get_channel_info(priv,
  1399. priv->band,
  1400. le16_to_cpu(priv->active_rxon.channel));
  1401. if (!ch_info) {
  1402. IWL_ERR(priv,
  1403. "Failed to get channel info for channel %d [%d]\n",
  1404. le16_to_cpu(priv->active_rxon.channel), priv->band);
  1405. return -EINVAL;
  1406. }
  1407. if (!is_channel_valid(ch_info)) {
  1408. IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
  1409. "non-Tx channel.\n");
  1410. return 0;
  1411. }
  1412. /* fill cmd with power settings for all rates for current channel */
  1413. /* Fill OFDM rate */
  1414. for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
  1415. rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
  1416. txpower.power[i].tpc = ch_info->power_info[i].tpc;
  1417. txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
  1418. IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
  1419. le16_to_cpu(txpower.channel),
  1420. txpower.band,
  1421. txpower.power[i].tpc.tx_gain,
  1422. txpower.power[i].tpc.dsp_atten,
  1423. txpower.power[i].rate);
  1424. }
  1425. /* Fill CCK rates */
  1426. for (rate_idx = IWL_FIRST_CCK_RATE;
  1427. rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
  1428. txpower.power[i].tpc = ch_info->power_info[i].tpc;
  1429. txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
  1430. IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
  1431. le16_to_cpu(txpower.channel),
  1432. txpower.band,
  1433. txpower.power[i].tpc.tx_gain,
  1434. txpower.power[i].tpc.dsp_atten,
  1435. txpower.power[i].rate);
  1436. }
  1437. return iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
  1438. sizeof(struct iwl3945_txpowertable_cmd),
  1439. &txpower);
  1440. }
  1441. /**
  1442. * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
  1443. * @ch_info: Channel to update. Uses power_info.requested_power.
  1444. *
  1445. * Replace requested_power and base_power_index ch_info fields for
  1446. * one channel.
  1447. *
  1448. * Called if user or spectrum management changes power preferences.
  1449. * Takes into account h/w and modulation limitations (clip power).
  1450. *
  1451. * This does *not* send anything to NIC, just sets up ch_info for one channel.
  1452. *
  1453. * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
  1454. * properly fill out the scan powers, and actual h/w gain settings,
  1455. * and send changes to NIC
  1456. */
  1457. static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
  1458. struct iwl_channel_info *ch_info)
  1459. {
  1460. struct iwl3945_channel_power_info *power_info;
  1461. int power_changed = 0;
  1462. int i;
  1463. const s8 *clip_pwrs;
  1464. int power;
  1465. /* Get this chnlgrp's rate-to-max/clip-powers table */
  1466. clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
  1467. /* Get this channel's rate-to-current-power settings table */
  1468. power_info = ch_info->power_info;
  1469. /* update OFDM Txpower settings */
  1470. for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
  1471. i++, ++power_info) {
  1472. int delta_idx;
  1473. /* limit new power to be no more than h/w capability */
  1474. power = min(ch_info->curr_txpow, clip_pwrs[i]);
  1475. if (power == power_info->requested_power)
  1476. continue;
  1477. /* find difference between old and new requested powers,
  1478. * update base (non-temp-compensated) power index */
  1479. delta_idx = (power - power_info->requested_power) * 2;
  1480. power_info->base_power_index -= delta_idx;
  1481. /* save new requested power value */
  1482. power_info->requested_power = power;
  1483. power_changed = 1;
  1484. }
  1485. /* update CCK Txpower settings, based on OFDM 12M setting ...
  1486. * ... all CCK power settings for a given channel are the *same*. */
  1487. if (power_changed) {
  1488. power =
  1489. ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
  1490. requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
  1491. /* do all CCK rates' iwl3945_channel_power_info structures */
  1492. for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
  1493. power_info->requested_power = power;
  1494. power_info->base_power_index =
  1495. ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
  1496. base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
  1497. ++power_info;
  1498. }
  1499. }
  1500. return 0;
  1501. }
  1502. /**
  1503. * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
  1504. *
  1505. * NOTE: Returned power limit may be less (but not more) than requested,
  1506. * based strictly on regulatory (eeprom and spectrum mgt) limitations
  1507. * (no consideration for h/w clipping limitations).
  1508. */
  1509. static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
  1510. {
  1511. s8 max_power;
  1512. #if 0
  1513. /* if we're using TGd limits, use lower of TGd or EEPROM */
  1514. if (ch_info->tgd_data.max_power != 0)
  1515. max_power = min(ch_info->tgd_data.max_power,
  1516. ch_info->eeprom.max_power_avg);
  1517. /* else just use EEPROM limits */
  1518. else
  1519. #endif
  1520. max_power = ch_info->eeprom.max_power_avg;
  1521. return min(max_power, ch_info->max_power_avg);
  1522. }
  1523. /**
  1524. * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
  1525. *
  1526. * Compensate txpower settings of *all* channels for temperature.
  1527. * This only accounts for the difference between current temperature
  1528. * and the factory calibration temperatures, and bases the new settings
  1529. * on the channel's base_power_index.
  1530. *
  1531. * If RxOn is "associated", this sends the new Txpower to NIC!
  1532. */
  1533. static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
  1534. {
  1535. struct iwl_channel_info *ch_info = NULL;
  1536. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  1537. int delta_index;
  1538. const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
  1539. u8 a_band;
  1540. u8 rate_index;
  1541. u8 scan_tbl_index;
  1542. u8 i;
  1543. int ref_temp;
  1544. int temperature = priv->temperature;
  1545. if (priv->disable_tx_power_cal ||
  1546. test_bit(STATUS_SCANNING, &priv->status)) {
  1547. /* do not perform tx power calibration */
  1548. return 0;
  1549. }
  1550. /* set up new Tx power info for each and every channel, 2.4 and 5.x */
  1551. for (i = 0; i < priv->channel_count; i++) {
  1552. ch_info = &priv->channel_info[i];
  1553. a_band = is_channel_a_band(ch_info);
  1554. /* Get this chnlgrp's factory calibration temperature */
  1555. ref_temp = (s16)eeprom->groups[ch_info->group_index].
  1556. temperature;
  1557. /* get power index adjustment based on current and factory
  1558. * temps */
  1559. delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
  1560. ref_temp);
  1561. /* set tx power value for all rates, OFDM and CCK */
  1562. for (rate_index = 0; rate_index < IWL_RATE_COUNT;
  1563. rate_index++) {
  1564. int power_idx =
  1565. ch_info->power_info[rate_index].base_power_index;
  1566. /* temperature compensate */
  1567. power_idx += delta_index;
  1568. /* stay within table range */
  1569. power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
  1570. ch_info->power_info[rate_index].
  1571. power_table_index = (u8) power_idx;
  1572. ch_info->power_info[rate_index].tpc =
  1573. power_gain_table[a_band][power_idx];
  1574. }
  1575. /* Get this chnlgrp's rate-to-max/clip-powers table */
  1576. clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
  1577. /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
  1578. for (scan_tbl_index = 0;
  1579. scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
  1580. s32 actual_index = (scan_tbl_index == 0) ?
  1581. IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
  1582. iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
  1583. actual_index, clip_pwrs,
  1584. ch_info, a_band);
  1585. }
  1586. }
  1587. /* send Txpower command for current channel to ucode */
  1588. return priv->cfg->ops->lib->send_tx_power(priv);
  1589. }
  1590. int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
  1591. {
  1592. struct iwl_channel_info *ch_info;
  1593. s8 max_power;
  1594. u8 a_band;
  1595. u8 i;
  1596. if (priv->tx_power_user_lmt == power) {
  1597. IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
  1598. "limit: %ddBm.\n", power);
  1599. return 0;
  1600. }
  1601. IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
  1602. priv->tx_power_user_lmt = power;
  1603. /* set up new Tx powers for each and every channel, 2.4 and 5.x */
  1604. for (i = 0; i < priv->channel_count; i++) {
  1605. ch_info = &priv->channel_info[i];
  1606. a_band = is_channel_a_band(ch_info);
  1607. /* find minimum power of all user and regulatory constraints
  1608. * (does not consider h/w clipping limitations) */
  1609. max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
  1610. max_power = min(power, max_power);
  1611. if (max_power != ch_info->curr_txpow) {
  1612. ch_info->curr_txpow = max_power;
  1613. /* this considers the h/w clipping limitations */
  1614. iwl3945_hw_reg_set_new_power(priv, ch_info);
  1615. }
  1616. }
  1617. /* update txpower settings for all channels,
  1618. * send to NIC if associated. */
  1619. is_temp_calib_needed(priv);
  1620. iwl3945_hw_reg_comp_txpower_temp(priv);
  1621. return 0;
  1622. }
  1623. static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
  1624. {
  1625. int rc = 0;
  1626. struct iwl_rx_packet *pkt;
  1627. struct iwl3945_rxon_assoc_cmd rxon_assoc;
  1628. struct iwl_host_cmd cmd = {
  1629. .id = REPLY_RXON_ASSOC,
  1630. .len = sizeof(rxon_assoc),
  1631. .flags = CMD_WANT_SKB,
  1632. .data = &rxon_assoc,
  1633. };
  1634. const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
  1635. const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
  1636. if ((rxon1->flags == rxon2->flags) &&
  1637. (rxon1->filter_flags == rxon2->filter_flags) &&
  1638. (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
  1639. (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
  1640. IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
  1641. return 0;
  1642. }
  1643. rxon_assoc.flags = priv->staging_rxon.flags;
  1644. rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
  1645. rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
  1646. rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
  1647. rxon_assoc.reserved = 0;
  1648. rc = iwl_send_cmd_sync(priv, &cmd);
  1649. if (rc)
  1650. return rc;
  1651. pkt = (struct iwl_rx_packet *)cmd.reply_page;
  1652. if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
  1653. IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
  1654. rc = -EIO;
  1655. }
  1656. iwl_free_pages(priv, cmd.reply_page);
  1657. return rc;
  1658. }
  1659. /**
  1660. * iwl3945_commit_rxon - commit staging_rxon to hardware
  1661. *
  1662. * The RXON command in staging_rxon is committed to the hardware and
  1663. * the active_rxon structure is updated with the new data. This
  1664. * function correctly transitions out of the RXON_ASSOC_MSK state if
  1665. * a HW tune is required based on the RXON structure changes.
  1666. */
  1667. static int iwl3945_commit_rxon(struct iwl_priv *priv)
  1668. {
  1669. /* cast away the const for active_rxon in this function */
  1670. struct iwl3945_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
  1671. struct iwl3945_rxon_cmd *staging_rxon = (void *)&priv->staging_rxon;
  1672. int rc = 0;
  1673. bool new_assoc =
  1674. !!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
  1675. if (!iwl_is_alive(priv))
  1676. return -1;
  1677. /* always get timestamp with Rx frame */
  1678. staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
  1679. /* select antenna */
  1680. staging_rxon->flags &=
  1681. ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
  1682. staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
  1683. rc = iwl_check_rxon_cmd(priv);
  1684. if (rc) {
  1685. IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
  1686. return -EINVAL;
  1687. }
  1688. /* If we don't need to send a full RXON, we can use
  1689. * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
  1690. * and other flags for the current radio configuration. */
  1691. if (!iwl_full_rxon_required(priv)) {
  1692. rc = iwl_send_rxon_assoc(priv);
  1693. if (rc) {
  1694. IWL_ERR(priv, "Error setting RXON_ASSOC "
  1695. "configuration (%d).\n", rc);
  1696. return rc;
  1697. }
  1698. memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
  1699. return 0;
  1700. }
  1701. /* If we are currently associated and the new config requires
  1702. * an RXON_ASSOC and the new config wants the associated mask enabled,
  1703. * we must clear the associated from the active configuration
  1704. * before we apply the new config */
  1705. if (iwl_is_associated(priv) && new_assoc) {
  1706. IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
  1707. active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
  1708. /*
  1709. * reserved4 and 5 could have been filled by the iwlcore code.
  1710. * Let's clear them before pushing to the 3945.
  1711. */
  1712. active_rxon->reserved4 = 0;
  1713. active_rxon->reserved5 = 0;
  1714. rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
  1715. sizeof(struct iwl3945_rxon_cmd),
  1716. &priv->active_rxon);
  1717. /* If the mask clearing failed then we set
  1718. * active_rxon back to what it was previously */
  1719. if (rc) {
  1720. active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
  1721. IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
  1722. "configuration (%d).\n", rc);
  1723. return rc;
  1724. }
  1725. iwl_clear_ucode_stations(priv);
  1726. iwl_restore_stations(priv);
  1727. }
  1728. IWL_DEBUG_INFO(priv, "Sending RXON\n"
  1729. "* with%s RXON_FILTER_ASSOC_MSK\n"
  1730. "* channel = %d\n"
  1731. "* bssid = %pM\n",
  1732. (new_assoc ? "" : "out"),
  1733. le16_to_cpu(staging_rxon->channel),
  1734. staging_rxon->bssid_addr);
  1735. /*
  1736. * reserved4 and 5 could have been filled by the iwlcore code.
  1737. * Let's clear them before pushing to the 3945.
  1738. */
  1739. staging_rxon->reserved4 = 0;
  1740. staging_rxon->reserved5 = 0;
  1741. iwl_set_rxon_hwcrypto(priv, !iwl3945_mod_params.sw_crypto);
  1742. /* Apply the new configuration */
  1743. rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
  1744. sizeof(struct iwl3945_rxon_cmd),
  1745. staging_rxon);
  1746. if (rc) {
  1747. IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
  1748. return rc;
  1749. }
  1750. memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
  1751. if (!new_assoc) {
  1752. iwl_clear_ucode_stations(priv);
  1753. iwl_restore_stations(priv);
  1754. }
  1755. /* If we issue a new RXON command which required a tune then we must
  1756. * send a new TXPOWER command or we won't be able to Tx any frames */
  1757. rc = priv->cfg->ops->lib->send_tx_power(priv);
  1758. if (rc) {
  1759. IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
  1760. return rc;
  1761. }
  1762. /* Init the hardware's rate fallback order based on the band */
  1763. rc = iwl3945_init_hw_rate_table(priv);
  1764. if (rc) {
  1765. IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
  1766. return -EIO;
  1767. }
  1768. return 0;
  1769. }
  1770. /**
  1771. * iwl3945_reg_txpower_periodic - called when time to check our temperature.
  1772. *
  1773. * -- reset periodic timer
  1774. * -- see if temp has changed enough to warrant re-calibration ... if so:
  1775. * -- correct coeffs for temp (can reset temp timer)
  1776. * -- save this temp as "last",
  1777. * -- send new set of gain settings to NIC
  1778. * NOTE: This should continue working, even when we're not associated,
  1779. * so we can keep our internal table of scan powers current. */
  1780. void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
  1781. {
  1782. /* This will kick in the "brute force"
  1783. * iwl3945_hw_reg_comp_txpower_temp() below */
  1784. if (!is_temp_calib_needed(priv))
  1785. goto reschedule;
  1786. /* Set up a new set of temp-adjusted TxPowers, send to NIC.
  1787. * This is based *only* on current temperature,
  1788. * ignoring any previous power measurements */
  1789. iwl3945_hw_reg_comp_txpower_temp(priv);
  1790. reschedule:
  1791. queue_delayed_work(priv->workqueue,
  1792. &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
  1793. }
  1794. static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
  1795. {
  1796. struct iwl_priv *priv = container_of(work, struct iwl_priv,
  1797. _3945.thermal_periodic.work);
  1798. if (test_bit(STATUS_EXIT_PENDING, &priv->status))
  1799. return;
  1800. mutex_lock(&priv->mutex);
  1801. iwl3945_reg_txpower_periodic(priv);
  1802. mutex_unlock(&priv->mutex);
  1803. }
  1804. /**
  1805. * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
  1806. * for the channel.
  1807. *
  1808. * This function is used when initializing channel-info structs.
  1809. *
  1810. * NOTE: These channel groups do *NOT* match the bands above!
  1811. * These channel groups are based on factory-tested channels;
  1812. * on A-band, EEPROM's "group frequency" entries represent the top
  1813. * channel in each group 1-4. Group 5 All B/G channels are in group 0.
  1814. */
  1815. static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
  1816. const struct iwl_channel_info *ch_info)
  1817. {
  1818. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  1819. struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
  1820. u8 group;
  1821. u16 group_index = 0; /* based on factory calib frequencies */
  1822. u8 grp_channel;
  1823. /* Find the group index for the channel ... don't use index 1(?) */
  1824. if (is_channel_a_band(ch_info)) {
  1825. for (group = 1; group < 5; group++) {
  1826. grp_channel = ch_grp[group].group_channel;
  1827. if (ch_info->channel <= grp_channel) {
  1828. group_index = group;
  1829. break;
  1830. }
  1831. }
  1832. /* group 4 has a few channels *above* its factory cal freq */
  1833. if (group == 5)
  1834. group_index = 4;
  1835. } else
  1836. group_index = 0; /* 2.4 GHz, group 0 */
  1837. IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
  1838. group_index);
  1839. return group_index;
  1840. }
  1841. /**
  1842. * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
  1843. *
  1844. * Interpolate to get nominal (i.e. at factory calibration temperature) index
  1845. * into radio/DSP gain settings table for requested power.
  1846. */
  1847. static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
  1848. s8 requested_power,
  1849. s32 setting_index, s32 *new_index)
  1850. {
  1851. const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
  1852. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  1853. s32 index0, index1;
  1854. s32 power = 2 * requested_power;
  1855. s32 i;
  1856. const struct iwl3945_eeprom_txpower_sample *samples;
  1857. s32 gains0, gains1;
  1858. s32 res;
  1859. s32 denominator;
  1860. chnl_grp = &eeprom->groups[setting_index];
  1861. samples = chnl_grp->samples;
  1862. for (i = 0; i < 5; i++) {
  1863. if (power == samples[i].power) {
  1864. *new_index = samples[i].gain_index;
  1865. return 0;
  1866. }
  1867. }
  1868. if (power > samples[1].power) {
  1869. index0 = 0;
  1870. index1 = 1;
  1871. } else if (power > samples[2].power) {
  1872. index0 = 1;
  1873. index1 = 2;
  1874. } else if (power > samples[3].power) {
  1875. index0 = 2;
  1876. index1 = 3;
  1877. } else {
  1878. index0 = 3;
  1879. index1 = 4;
  1880. }
  1881. denominator = (s32) samples[index1].power - (s32) samples[index0].power;
  1882. if (denominator == 0)
  1883. return -EINVAL;
  1884. gains0 = (s32) samples[index0].gain_index * (1 << 19);
  1885. gains1 = (s32) samples[index1].gain_index * (1 << 19);
  1886. res = gains0 + (gains1 - gains0) *
  1887. ((s32) power - (s32) samples[index0].power) / denominator +
  1888. (1 << 18);
  1889. *new_index = res >> 19;
  1890. return 0;
  1891. }
  1892. static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
  1893. {
  1894. u32 i;
  1895. s32 rate_index;
  1896. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  1897. const struct iwl3945_eeprom_txpower_group *group;
  1898. IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
  1899. for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
  1900. s8 *clip_pwrs; /* table of power levels for each rate */
  1901. s8 satur_pwr; /* saturation power for each chnl group */
  1902. group = &eeprom->groups[i];
  1903. /* sanity check on factory saturation power value */
  1904. if (group->saturation_power < 40) {
  1905. IWL_WARN(priv, "Error: saturation power is %d, "
  1906. "less than minimum expected 40\n",
  1907. group->saturation_power);
  1908. return;
  1909. }
  1910. /*
  1911. * Derive requested power levels for each rate, based on
  1912. * hardware capabilities (saturation power for band).
  1913. * Basic value is 3dB down from saturation, with further
  1914. * power reductions for highest 3 data rates. These
  1915. * backoffs provide headroom for high rate modulation
  1916. * power peaks, without too much distortion (clipping).
  1917. */
  1918. /* we'll fill in this array with h/w max power levels */
  1919. clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
  1920. /* divide factory saturation power by 2 to find -3dB level */
  1921. satur_pwr = (s8) (group->saturation_power >> 1);
  1922. /* fill in channel group's nominal powers for each rate */
  1923. for (rate_index = 0;
  1924. rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
  1925. switch (rate_index) {
  1926. case IWL_RATE_36M_INDEX_TABLE:
  1927. if (i == 0) /* B/G */
  1928. *clip_pwrs = satur_pwr;
  1929. else /* A */
  1930. *clip_pwrs = satur_pwr - 5;
  1931. break;
  1932. case IWL_RATE_48M_INDEX_TABLE:
  1933. if (i == 0)
  1934. *clip_pwrs = satur_pwr - 7;
  1935. else
  1936. *clip_pwrs = satur_pwr - 10;
  1937. break;
  1938. case IWL_RATE_54M_INDEX_TABLE:
  1939. if (i == 0)
  1940. *clip_pwrs = satur_pwr - 9;
  1941. else
  1942. *clip_pwrs = satur_pwr - 12;
  1943. break;
  1944. default:
  1945. *clip_pwrs = satur_pwr;
  1946. break;
  1947. }
  1948. }
  1949. }
  1950. }
  1951. /**
  1952. * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
  1953. *
  1954. * Second pass (during init) to set up priv->channel_info
  1955. *
  1956. * Set up Tx-power settings in our channel info database for each VALID
  1957. * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
  1958. * and current temperature.
  1959. *
  1960. * Since this is based on current temperature (at init time), these values may
  1961. * not be valid for very long, but it gives us a starting/default point,
  1962. * and allows us to active (i.e. using Tx) scan.
  1963. *
  1964. * This does *not* write values to NIC, just sets up our internal table.
  1965. */
  1966. int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
  1967. {
  1968. struct iwl_channel_info *ch_info = NULL;
  1969. struct iwl3945_channel_power_info *pwr_info;
  1970. struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
  1971. int delta_index;
  1972. u8 rate_index;
  1973. u8 scan_tbl_index;
  1974. const s8 *clip_pwrs; /* array of power levels for each rate */
  1975. u8 gain, dsp_atten;
  1976. s8 power;
  1977. u8 pwr_index, base_pwr_index, a_band;
  1978. u8 i;
  1979. int temperature;
  1980. /* save temperature reference,
  1981. * so we can determine next time to calibrate */
  1982. temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
  1983. priv->last_temperature = temperature;
  1984. iwl3945_hw_reg_init_channel_groups(priv);
  1985. /* initialize Tx power info for each and every channel, 2.4 and 5.x */
  1986. for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
  1987. i++, ch_info++) {
  1988. a_band = is_channel_a_band(ch_info);
  1989. if (!is_channel_valid(ch_info))
  1990. continue;
  1991. /* find this channel's channel group (*not* "band") index */
  1992. ch_info->group_index =
  1993. iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
  1994. /* Get this chnlgrp's rate->max/clip-powers table */
  1995. clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
  1996. /* calculate power index *adjustment* value according to
  1997. * diff between current temperature and factory temperature */
  1998. delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
  1999. eeprom->groups[ch_info->group_index].
  2000. temperature);
  2001. IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
  2002. ch_info->channel, delta_index, temperature +
  2003. IWL_TEMP_CONVERT);
  2004. /* set tx power value for all OFDM rates */
  2005. for (rate_index = 0; rate_index < IWL_OFDM_RATES;
  2006. rate_index++) {
  2007. s32 uninitialized_var(power_idx);
  2008. int rc;
  2009. /* use channel group's clip-power table,
  2010. * but don't exceed channel's max power */
  2011. s8 pwr = min(ch_info->max_power_avg,
  2012. clip_pwrs[rate_index]);
  2013. pwr_info = &ch_info->power_info[rate_index];
  2014. /* get base (i.e. at factory-measured temperature)
  2015. * power table index for this rate's power */
  2016. rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
  2017. ch_info->group_index,
  2018. &power_idx);
  2019. if (rc) {
  2020. IWL_ERR(priv, "Invalid power index\n");
  2021. return rc;
  2022. }
  2023. pwr_info->base_power_index = (u8) power_idx;
  2024. /* temperature compensate */
  2025. power_idx += delta_index;
  2026. /* stay within range of gain table */
  2027. power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
  2028. /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
  2029. pwr_info->requested_power = pwr;
  2030. pwr_info->power_table_index = (u8) power_idx;
  2031. pwr_info->tpc.tx_gain =
  2032. power_gain_table[a_band][power_idx].tx_gain;
  2033. pwr_info->tpc.dsp_atten =
  2034. power_gain_table[a_band][power_idx].dsp_atten;
  2035. }
  2036. /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
  2037. pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
  2038. power = pwr_info->requested_power +
  2039. IWL_CCK_FROM_OFDM_POWER_DIFF;
  2040. pwr_index = pwr_info->power_table_index +
  2041. IWL_CCK_FROM_OFDM_INDEX_DIFF;
  2042. base_pwr_index = pwr_info->base_power_index +
  2043. IWL_CCK_FROM_OFDM_INDEX_DIFF;
  2044. /* stay within table range */
  2045. pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
  2046. gain = power_gain_table[a_band][pwr_index].tx_gain;
  2047. dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
  2048. /* fill each CCK rate's iwl3945_channel_power_info structure
  2049. * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
  2050. * NOTE: CCK rates start at end of OFDM rates! */
  2051. for (rate_index = 0;
  2052. rate_index < IWL_CCK_RATES; rate_index++) {
  2053. pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
  2054. pwr_info->requested_power = power;
  2055. pwr_info->power_table_index = pwr_index;
  2056. pwr_info->base_power_index = base_pwr_index;
  2057. pwr_info->tpc.tx_gain = gain;
  2058. pwr_info->tpc.dsp_atten = dsp_atten;
  2059. }
  2060. /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
  2061. for (scan_tbl_index = 0;
  2062. scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
  2063. s32 actual_index = (scan_tbl_index == 0) ?
  2064. IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
  2065. iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
  2066. actual_index, clip_pwrs, ch_info, a_band);
  2067. }
  2068. }
  2069. return 0;
  2070. }
  2071. int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
  2072. {
  2073. int rc;
  2074. iwl_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
  2075. rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
  2076. FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  2077. if (rc < 0)
  2078. IWL_ERR(priv, "Can't stop Rx DMA.\n");
  2079. return 0;
  2080. }
  2081. int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
  2082. {
  2083. int txq_id = txq->q.id;
  2084. struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
  2085. shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
  2086. iwl_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
  2087. iwl_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
  2088. iwl_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
  2089. FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
  2090. FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
  2091. FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
  2092. FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
  2093. FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
  2094. /* fake read to flush all prev. writes */
  2095. iwl_read32(priv, FH39_TSSR_CBB_BASE);
  2096. return 0;
  2097. }
  2098. /*
  2099. * HCMD utils
  2100. */
  2101. static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
  2102. {
  2103. switch (cmd_id) {
  2104. case REPLY_RXON:
  2105. return sizeof(struct iwl3945_rxon_cmd);
  2106. case POWER_TABLE_CMD:
  2107. return sizeof(struct iwl3945_powertable_cmd);
  2108. default:
  2109. return len;
  2110. }
  2111. }
  2112. static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
  2113. {
  2114. struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
  2115. addsta->mode = cmd->mode;
  2116. memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
  2117. memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
  2118. addsta->station_flags = cmd->station_flags;
  2119. addsta->station_flags_msk = cmd->station_flags_msk;
  2120. addsta->tid_disable_tx = cpu_to_le16(0);
  2121. addsta->rate_n_flags = cmd->rate_n_flags;
  2122. addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
  2123. addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
  2124. addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
  2125. return (u16)sizeof(struct iwl3945_addsta_cmd);
  2126. }
  2127. static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
  2128. struct ieee80211_vif *vif, bool add)
  2129. {
  2130. struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
  2131. int ret;
  2132. if (add) {
  2133. ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false,
  2134. &vif_priv->ibss_bssid_sta_id);
  2135. if (ret)
  2136. return ret;
  2137. iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
  2138. (priv->band == IEEE80211_BAND_5GHZ) ?
  2139. IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
  2140. CMD_ASYNC);
  2141. iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
  2142. return 0;
  2143. }
  2144. return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
  2145. vif->bss_conf.bssid);
  2146. }
  2147. /**
  2148. * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
  2149. */
  2150. int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
  2151. {
  2152. int rc, i, index, prev_index;
  2153. struct iwl3945_rate_scaling_cmd rate_cmd = {
  2154. .reserved = {0, 0, 0},
  2155. };
  2156. struct iwl3945_rate_scaling_info *table = rate_cmd.table;
  2157. for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
  2158. index = iwl3945_rates[i].table_rs_index;
  2159. table[index].rate_n_flags =
  2160. iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
  2161. table[index].try_cnt = priv->retry_rate;
  2162. prev_index = iwl3945_get_prev_ieee_rate(i);
  2163. table[index].next_rate_index =
  2164. iwl3945_rates[prev_index].table_rs_index;
  2165. }
  2166. switch (priv->band) {
  2167. case IEEE80211_BAND_5GHZ:
  2168. IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
  2169. /* If one of the following CCK rates is used,
  2170. * have it fall back to the 6M OFDM rate */
  2171. for (i = IWL_RATE_1M_INDEX_TABLE;
  2172. i <= IWL_RATE_11M_INDEX_TABLE; i++)
  2173. table[i].next_rate_index =
  2174. iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
  2175. /* Don't fall back to CCK rates */
  2176. table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
  2177. IWL_RATE_9M_INDEX_TABLE;
  2178. /* Don't drop out of OFDM rates */
  2179. table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
  2180. iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
  2181. break;
  2182. case IEEE80211_BAND_2GHZ:
  2183. IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
  2184. /* If an OFDM rate is used, have it fall back to the
  2185. * 1M CCK rates */
  2186. if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
  2187. iwl_is_associated(priv)) {
  2188. index = IWL_FIRST_CCK_RATE;
  2189. for (i = IWL_RATE_6M_INDEX_TABLE;
  2190. i <= IWL_RATE_54M_INDEX_TABLE; i++)
  2191. table[i].next_rate_index =
  2192. iwl3945_rates[index].table_rs_index;
  2193. index = IWL_RATE_11M_INDEX_TABLE;
  2194. /* CCK shouldn't fall back to OFDM... */
  2195. table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
  2196. }
  2197. break;
  2198. default:
  2199. WARN_ON(1);
  2200. break;
  2201. }
  2202. /* Update the rate scaling for control frame Tx */
  2203. rate_cmd.table_id = 0;
  2204. rc = iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
  2205. &rate_cmd);
  2206. if (rc)
  2207. return rc;
  2208. /* Update the rate scaling for data frame Tx */
  2209. rate_cmd.table_id = 1;
  2210. return iwl_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
  2211. &rate_cmd);
  2212. }
  2213. /* Called when initializing driver */
  2214. int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
  2215. {
  2216. memset((void *)&priv->hw_params, 0,
  2217. sizeof(struct iwl_hw_params));
  2218. priv->_3945.shared_virt =
  2219. dma_alloc_coherent(&priv->pci_dev->dev,
  2220. sizeof(struct iwl3945_shared),
  2221. &priv->_3945.shared_phys, GFP_KERNEL);
  2222. if (!priv->_3945.shared_virt) {
  2223. IWL_ERR(priv, "failed to allocate pci memory\n");
  2224. return -ENOMEM;
  2225. }
  2226. /* Assign number of Usable TX queues */
  2227. priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
  2228. priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
  2229. priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
  2230. priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
  2231. priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
  2232. priv->hw_params.max_stations = IWL3945_STATION_COUNT;
  2233. priv->hw_params.bcast_sta_id = IWL3945_BROADCAST_ID;
  2234. priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
  2235. priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
  2236. return 0;
  2237. }
  2238. unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
  2239. struct iwl3945_frame *frame, u8 rate)
  2240. {
  2241. struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
  2242. unsigned int frame_size;
  2243. tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
  2244. memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
  2245. tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
  2246. tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  2247. frame_size = iwl3945_fill_beacon_frame(priv,
  2248. tx_beacon_cmd->frame,
  2249. sizeof(frame->u) - sizeof(*tx_beacon_cmd));
  2250. BUG_ON(frame_size > MAX_MPDU_SIZE);
  2251. tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
  2252. tx_beacon_cmd->tx.rate = rate;
  2253. tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
  2254. TX_CMD_FLG_TSF_MSK);
  2255. /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
  2256. tx_beacon_cmd->tx.supp_rates[0] =
  2257. (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
  2258. tx_beacon_cmd->tx.supp_rates[1] =
  2259. (IWL_CCK_BASIC_RATES_MASK & 0xF);
  2260. return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
  2261. }
  2262. void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
  2263. {
  2264. priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
  2265. priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
  2266. }
  2267. void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
  2268. {
  2269. INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
  2270. iwl3945_bg_reg_txpower_periodic);
  2271. }
  2272. void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
  2273. {
  2274. cancel_delayed_work(&priv->_3945.thermal_periodic);
  2275. }
  2276. /* check contents of special bootstrap uCode SRAM */
  2277. static int iwl3945_verify_bsm(struct iwl_priv *priv)
  2278. {
  2279. __le32 *image = priv->ucode_boot.v_addr;
  2280. u32 len = priv->ucode_boot.len;
  2281. u32 reg;
  2282. u32 val;
  2283. IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
  2284. /* verify BSM SRAM contents */
  2285. val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
  2286. for (reg = BSM_SRAM_LOWER_BOUND;
  2287. reg < BSM_SRAM_LOWER_BOUND + len;
  2288. reg += sizeof(u32), image++) {
  2289. val = iwl_read_prph(priv, reg);
  2290. if (val != le32_to_cpu(*image)) {
  2291. IWL_ERR(priv, "BSM uCode verification failed at "
  2292. "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
  2293. BSM_SRAM_LOWER_BOUND,
  2294. reg - BSM_SRAM_LOWER_BOUND, len,
  2295. val, le32_to_cpu(*image));
  2296. return -EIO;
  2297. }
  2298. }
  2299. IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
  2300. return 0;
  2301. }
  2302. /******************************************************************************
  2303. *
  2304. * EEPROM related functions
  2305. *
  2306. ******************************************************************************/
  2307. /*
  2308. * Clear the OWNER_MSK, to establish driver (instead of uCode running on
  2309. * embedded controller) as EEPROM reader; each read is a series of pulses
  2310. * to/from the EEPROM chip, not a single event, so even reads could conflict
  2311. * if they weren't arbitrated by some ownership mechanism. Here, the driver
  2312. * simply claims ownership, which should be safe when this function is called
  2313. * (i.e. before loading uCode!).
  2314. */
  2315. static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
  2316. {
  2317. _iwl_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
  2318. return 0;
  2319. }
  2320. static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
  2321. {
  2322. return;
  2323. }
  2324. /**
  2325. * iwl3945_load_bsm - Load bootstrap instructions
  2326. *
  2327. * BSM operation:
  2328. *
  2329. * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
  2330. * in special SRAM that does not power down during RFKILL. When powering back
  2331. * up after power-saving sleeps (or during initial uCode load), the BSM loads
  2332. * the bootstrap program into the on-board processor, and starts it.
  2333. *
  2334. * The bootstrap program loads (via DMA) instructions and data for a new
  2335. * program from host DRAM locations indicated by the host driver in the
  2336. * BSM_DRAM_* registers. Once the new program is loaded, it starts
  2337. * automatically.
  2338. *
  2339. * When initializing the NIC, the host driver points the BSM to the
  2340. * "initialize" uCode image. This uCode sets up some internal data, then
  2341. * notifies host via "initialize alive" that it is complete.
  2342. *
  2343. * The host then replaces the BSM_DRAM_* pointer values to point to the
  2344. * normal runtime uCode instructions and a backup uCode data cache buffer
  2345. * (filled initially with starting data values for the on-board processor),
  2346. * then triggers the "initialize" uCode to load and launch the runtime uCode,
  2347. * which begins normal operation.
  2348. *
  2349. * When doing a power-save shutdown, runtime uCode saves data SRAM into
  2350. * the backup data cache in DRAM before SRAM is powered down.
  2351. *
  2352. * When powering back up, the BSM loads the bootstrap program. This reloads
  2353. * the runtime uCode instructions and the backup data cache into SRAM,
  2354. * and re-launches the runtime uCode from where it left off.
  2355. */
  2356. static int iwl3945_load_bsm(struct iwl_priv *priv)
  2357. {
  2358. __le32 *image = priv->ucode_boot.v_addr;
  2359. u32 len = priv->ucode_boot.len;
  2360. dma_addr_t pinst;
  2361. dma_addr_t pdata;
  2362. u32 inst_len;
  2363. u32 data_len;
  2364. int rc;
  2365. int i;
  2366. u32 done;
  2367. u32 reg_offset;
  2368. IWL_DEBUG_INFO(priv, "Begin load bsm\n");
  2369. /* make sure bootstrap program is no larger than BSM's SRAM size */
  2370. if (len > IWL39_MAX_BSM_SIZE)
  2371. return -EINVAL;
  2372. /* Tell bootstrap uCode where to find the "Initialize" uCode
  2373. * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
  2374. * NOTE: iwl3945_initialize_alive_start() will replace these values,
  2375. * after the "initialize" uCode has run, to point to
  2376. * runtime/protocol instructions and backup data cache. */
  2377. pinst = priv->ucode_init.p_addr;
  2378. pdata = priv->ucode_init_data.p_addr;
  2379. inst_len = priv->ucode_init.len;
  2380. data_len = priv->ucode_init_data.len;
  2381. iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
  2382. iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
  2383. iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
  2384. iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
  2385. /* Fill BSM memory with bootstrap instructions */
  2386. for (reg_offset = BSM_SRAM_LOWER_BOUND;
  2387. reg_offset < BSM_SRAM_LOWER_BOUND + len;
  2388. reg_offset += sizeof(u32), image++)
  2389. _iwl_write_prph(priv, reg_offset,
  2390. le32_to_cpu(*image));
  2391. rc = iwl3945_verify_bsm(priv);
  2392. if (rc)
  2393. return rc;
  2394. /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
  2395. iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
  2396. iwl_write_prph(priv, BSM_WR_MEM_DST_REG,
  2397. IWL39_RTC_INST_LOWER_BOUND);
  2398. iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
  2399. /* Load bootstrap code into instruction SRAM now,
  2400. * to prepare to load "initialize" uCode */
  2401. iwl_write_prph(priv, BSM_WR_CTRL_REG,
  2402. BSM_WR_CTRL_REG_BIT_START);
  2403. /* Wait for load of bootstrap uCode to finish */
  2404. for (i = 0; i < 100; i++) {
  2405. done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
  2406. if (!(done & BSM_WR_CTRL_REG_BIT_START))
  2407. break;
  2408. udelay(10);
  2409. }
  2410. if (i < 100)
  2411. IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
  2412. else {
  2413. IWL_ERR(priv, "BSM write did not complete!\n");
  2414. return -EIO;
  2415. }
  2416. /* Enable future boot loads whenever power management unit triggers it
  2417. * (e.g. when powering back up after power-save shutdown) */
  2418. iwl_write_prph(priv, BSM_WR_CTRL_REG,
  2419. BSM_WR_CTRL_REG_BIT_START_EN);
  2420. return 0;
  2421. }
  2422. static struct iwl_hcmd_ops iwl3945_hcmd = {
  2423. .rxon_assoc = iwl3945_send_rxon_assoc,
  2424. .commit_rxon = iwl3945_commit_rxon,
  2425. .send_bt_config = iwl_send_bt_config,
  2426. };
  2427. static struct iwl_lib_ops iwl3945_lib = {
  2428. .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
  2429. .txq_free_tfd = iwl3945_hw_txq_free_tfd,
  2430. .txq_init = iwl3945_hw_tx_queue_init,
  2431. .load_ucode = iwl3945_load_bsm,
  2432. .dump_nic_event_log = iwl3945_dump_nic_event_log,
  2433. .dump_nic_error_log = iwl3945_dump_nic_error_log,
  2434. .apm_ops = {
  2435. .init = iwl3945_apm_init,
  2436. .stop = iwl_apm_stop,
  2437. .config = iwl3945_nic_config,
  2438. .set_pwr_src = iwl3945_set_pwr_src,
  2439. },
  2440. .eeprom_ops = {
  2441. .regulatory_bands = {
  2442. EEPROM_REGULATORY_BAND_1_CHANNELS,
  2443. EEPROM_REGULATORY_BAND_2_CHANNELS,
  2444. EEPROM_REGULATORY_BAND_3_CHANNELS,
  2445. EEPROM_REGULATORY_BAND_4_CHANNELS,
  2446. EEPROM_REGULATORY_BAND_5_CHANNELS,
  2447. EEPROM_REGULATORY_BAND_NO_HT40,
  2448. EEPROM_REGULATORY_BAND_NO_HT40,
  2449. },
  2450. .verify_signature = iwlcore_eeprom_verify_signature,
  2451. .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
  2452. .release_semaphore = iwl3945_eeprom_release_semaphore,
  2453. .query_addr = iwlcore_eeprom_query_addr,
  2454. },
  2455. .send_tx_power = iwl3945_send_tx_power,
  2456. .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
  2457. .post_associate = iwl3945_post_associate,
  2458. .isr = iwl_isr_legacy,
  2459. .config_ap = iwl3945_config_ap,
  2460. .manage_ibss_station = iwl3945_manage_ibss_station,
  2461. .recover_from_tx_stall = iwl_bg_monitor_recover,
  2462. .check_plcp_health = iwl3945_good_plcp_health,
  2463. .debugfs_ops = {
  2464. .rx_stats_read = iwl3945_ucode_rx_stats_read,
  2465. .tx_stats_read = iwl3945_ucode_tx_stats_read,
  2466. .general_stats_read = iwl3945_ucode_general_stats_read,
  2467. },
  2468. };
  2469. static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
  2470. .get_hcmd_size = iwl3945_get_hcmd_size,
  2471. .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
  2472. .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
  2473. .request_scan = iwl3945_request_scan,
  2474. };
  2475. static const struct iwl_ops iwl3945_ops = {
  2476. .lib = &iwl3945_lib,
  2477. .hcmd = &iwl3945_hcmd,
  2478. .utils = &iwl3945_hcmd_utils,
  2479. .led = &iwl3945_led_ops,
  2480. };
  2481. static struct iwl_cfg iwl3945_bg_cfg = {
  2482. .name = "3945BG",
  2483. .fw_name_pre = IWL3945_FW_PRE,
  2484. .ucode_api_max = IWL3945_UCODE_API_MAX,
  2485. .ucode_api_min = IWL3945_UCODE_API_MIN,
  2486. .sku = IWL_SKU_G,
  2487. .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
  2488. .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
  2489. .ops = &iwl3945_ops,
  2490. .num_of_queues = IWL39_NUM_QUEUES,
  2491. .mod_params = &iwl3945_mod_params,
  2492. .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
  2493. .set_l0s = false,
  2494. .use_bsm = true,
  2495. .use_isr_legacy = true,
  2496. .ht_greenfield_support = false,
  2497. .led_compensation = 64,
  2498. .broken_powersave = true,
  2499. .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
  2500. .monitor_recover_period = IWL_MONITORING_PERIOD,
  2501. .max_event_log_size = 512,
  2502. .tx_power_by_driver = true,
  2503. };
  2504. static struct iwl_cfg iwl3945_abg_cfg = {
  2505. .name = "3945ABG",
  2506. .fw_name_pre = IWL3945_FW_PRE,
  2507. .ucode_api_max = IWL3945_UCODE_API_MAX,
  2508. .ucode_api_min = IWL3945_UCODE_API_MIN,
  2509. .sku = IWL_SKU_A|IWL_SKU_G,
  2510. .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
  2511. .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
  2512. .ops = &iwl3945_ops,
  2513. .num_of_queues = IWL39_NUM_QUEUES,
  2514. .mod_params = &iwl3945_mod_params,
  2515. .use_isr_legacy = true,
  2516. .ht_greenfield_support = false,
  2517. .led_compensation = 64,
  2518. .broken_powersave = true,
  2519. .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
  2520. .monitor_recover_period = IWL_MONITORING_PERIOD,
  2521. .max_event_log_size = 512,
  2522. .tx_power_by_driver = true,
  2523. };
  2524. DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
  2525. {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
  2526. {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
  2527. {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
  2528. {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
  2529. {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
  2530. {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
  2531. {0}
  2532. };
  2533. MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);