ipw2200.c 197 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383
  1. /******************************************************************************
  2. Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
  3. 802.11 status code portion of this file from ethereal-0.10.6:
  4. Copyright 2000, Axis Communications AB
  5. Ethereal - Network traffic analyzer
  6. By Gerald Combs <gerald@ethereal.com>
  7. Copyright 1998 Gerald Combs
  8. This program is free software; you can redistribute it and/or modify it
  9. under the terms of version 2 of the GNU General Public License as
  10. published by the Free Software Foundation.
  11. This program is distributed in the hope that it will be useful, but WITHOUT
  12. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. more details.
  15. You should have received a copy of the GNU General Public License along with
  16. this program; if not, write to the Free Software Foundation, Inc., 59
  17. Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. The full GNU General Public License is included in this distribution in the
  19. file called LICENSE.
  20. Contact Information:
  21. James P. Ketrenos <ipw2100-admin@linux.intel.com>
  22. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  23. ******************************************************************************/
  24. #include "ipw2200.h"
  25. #define IPW2200_VERSION "1.0.0"
  26. #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
  27. #define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
  28. #define DRV_VERSION IPW2200_VERSION
  29. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  30. MODULE_VERSION(DRV_VERSION);
  31. MODULE_AUTHOR(DRV_COPYRIGHT);
  32. MODULE_LICENSE("GPL");
  33. static int debug = 0;
  34. static int channel = 0;
  35. static char *ifname;
  36. static int mode = 0;
  37. static u32 ipw_debug_level;
  38. static int associate = 1;
  39. static int auto_create = 1;
  40. static int disable = 0;
  41. static const char ipw_modes[] = {
  42. 'a', 'b', 'g', '?'
  43. };
  44. static void ipw_rx(struct ipw_priv *priv);
  45. static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
  46. struct clx2_tx_queue *txq, int qindex);
  47. static int ipw_queue_reset(struct ipw_priv *priv);
  48. static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
  49. int len, int sync);
  50. static void ipw_tx_queue_free(struct ipw_priv *);
  51. static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
  52. static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
  53. static void ipw_rx_queue_replenish(void *);
  54. static int ipw_up(struct ipw_priv *);
  55. static void ipw_down(struct ipw_priv *);
  56. static int ipw_config(struct ipw_priv *);
  57. static int init_supported_rates(struct ipw_priv *priv,
  58. struct ipw_supported_rates *prates);
  59. static u8 band_b_active_channel[MAX_B_CHANNELS] = {
  60. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
  61. };
  62. static u8 band_a_active_channel[MAX_A_CHANNELS] = {
  63. 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
  64. };
  65. static int is_valid_channel(int mode_mask, int channel)
  66. {
  67. int i;
  68. if (!channel)
  69. return 0;
  70. if (mode_mask & IEEE_A)
  71. for (i = 0; i < MAX_A_CHANNELS; i++)
  72. if (band_a_active_channel[i] == channel)
  73. return IEEE_A;
  74. if (mode_mask & (IEEE_B | IEEE_G))
  75. for (i = 0; i < MAX_B_CHANNELS; i++)
  76. if (band_b_active_channel[i] == channel)
  77. return mode_mask & (IEEE_B | IEEE_G);
  78. return 0;
  79. }
  80. static char *snprint_line(char *buf, size_t count,
  81. const u8 * data, u32 len, u32 ofs)
  82. {
  83. int out, i, j, l;
  84. char c;
  85. out = snprintf(buf, count, "%08X", ofs);
  86. for (l = 0, i = 0; i < 2; i++) {
  87. out += snprintf(buf + out, count - out, " ");
  88. for (j = 0; j < 8 && l < len; j++, l++)
  89. out += snprintf(buf + out, count - out, "%02X ",
  90. data[(i * 8 + j)]);
  91. for (; j < 8; j++)
  92. out += snprintf(buf + out, count - out, " ");
  93. }
  94. out += snprintf(buf + out, count - out, " ");
  95. for (l = 0, i = 0; i < 2; i++) {
  96. out += snprintf(buf + out, count - out, " ");
  97. for (j = 0; j < 8 && l < len; j++, l++) {
  98. c = data[(i * 8 + j)];
  99. if (!isascii(c) || !isprint(c))
  100. c = '.';
  101. out += snprintf(buf + out, count - out, "%c", c);
  102. }
  103. for (; j < 8; j++)
  104. out += snprintf(buf + out, count - out, " ");
  105. }
  106. return buf;
  107. }
  108. static void printk_buf(int level, const u8 * data, u32 len)
  109. {
  110. char line[81];
  111. u32 ofs = 0;
  112. if (!(ipw_debug_level & level))
  113. return;
  114. while (len) {
  115. printk(KERN_DEBUG "%s\n",
  116. snprint_line(line, sizeof(line), &data[ofs],
  117. min(len, 16U), ofs));
  118. ofs += 16;
  119. len -= min(len, 16U);
  120. }
  121. }
  122. static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
  123. #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
  124. static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
  125. #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
  126. static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
  127. static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
  128. {
  129. IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
  130. __LINE__, (u32) (b), (u32) (c));
  131. _ipw_write_reg8(a, b, c);
  132. }
  133. static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
  134. static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
  135. {
  136. IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
  137. __LINE__, (u32) (b), (u32) (c));
  138. _ipw_write_reg16(a, b, c);
  139. }
  140. static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
  141. static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
  142. {
  143. IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
  144. __LINE__, (u32) (b), (u32) (c));
  145. _ipw_write_reg32(a, b, c);
  146. }
  147. #define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
  148. #define ipw_write8(ipw, ofs, val) \
  149. IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
  150. _ipw_write8(ipw, ofs, val)
  151. #define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
  152. #define ipw_write16(ipw, ofs, val) \
  153. IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
  154. _ipw_write16(ipw, ofs, val)
  155. #define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
  156. #define ipw_write32(ipw, ofs, val) \
  157. IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
  158. _ipw_write32(ipw, ofs, val)
  159. #define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
  160. static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
  161. {
  162. IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
  163. return _ipw_read8(ipw, ofs);
  164. }
  165. #define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
  166. #define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
  167. static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
  168. {
  169. IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
  170. return _ipw_read16(ipw, ofs);
  171. }
  172. #define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
  173. #define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
  174. static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
  175. {
  176. IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
  177. return _ipw_read32(ipw, ofs);
  178. }
  179. #define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
  180. static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
  181. #define ipw_read_indirect(a, b, c, d) \
  182. IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
  183. _ipw_read_indirect(a, b, c, d)
  184. static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
  185. int num);
  186. #define ipw_write_indirect(a, b, c, d) \
  187. IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
  188. _ipw_write_indirect(a, b, c, d)
  189. /* indirect write s */
  190. static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
  191. {
  192. IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
  193. _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
  194. _ipw_write32(priv, CX2_INDIRECT_DATA, value);
  195. }
  196. static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
  197. {
  198. IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
  199. _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
  200. _ipw_write8(priv, CX2_INDIRECT_DATA, value);
  201. IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
  202. (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA), value);
  203. }
  204. static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
  205. {
  206. IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
  207. _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
  208. _ipw_write16(priv, CX2_INDIRECT_DATA, value);
  209. }
  210. /* indirect read s */
  211. static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
  212. {
  213. u32 word;
  214. _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
  215. IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
  216. word = _ipw_read32(priv, CX2_INDIRECT_DATA);
  217. return (word >> ((reg & 0x3) * 8)) & 0xff;
  218. }
  219. static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
  220. {
  221. u32 value;
  222. IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
  223. _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
  224. value = _ipw_read32(priv, CX2_INDIRECT_DATA);
  225. IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
  226. return value;
  227. }
  228. /* iterative/auto-increment 32 bit reads and writes */
  229. static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
  230. int num)
  231. {
  232. u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
  233. u32 dif_len = addr - aligned_addr;
  234. u32 aligned_len;
  235. u32 i;
  236. IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
  237. /* Read the first nibble byte by byte */
  238. if (unlikely(dif_len)) {
  239. /* Start reading at aligned_addr + dif_len */
  240. _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
  241. for (i = dif_len; i < 4; i++, buf++)
  242. *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
  243. num -= dif_len;
  244. aligned_addr += 4;
  245. }
  246. /* Read DWs through autoinc register */
  247. _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
  248. aligned_len = num & CX2_INDIRECT_ADDR_MASK;
  249. for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
  250. *(u32 *) buf = ipw_read32(priv, CX2_AUTOINC_DATA);
  251. /* Copy the last nibble */
  252. dif_len = num - aligned_len;
  253. _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
  254. for (i = 0; i < dif_len; i++, buf++)
  255. *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
  256. }
  257. static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
  258. int num)
  259. {
  260. u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
  261. u32 dif_len = addr - aligned_addr;
  262. u32 aligned_len;
  263. u32 i;
  264. IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
  265. /* Write the first nibble byte by byte */
  266. if (unlikely(dif_len)) {
  267. /* Start writing at aligned_addr + dif_len */
  268. _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
  269. for (i = dif_len; i < 4; i++, buf++)
  270. _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
  271. num -= dif_len;
  272. aligned_addr += 4;
  273. }
  274. /* Write DWs through autoinc register */
  275. _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
  276. aligned_len = num & CX2_INDIRECT_ADDR_MASK;
  277. for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
  278. _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32 *) buf);
  279. /* Copy the last nibble */
  280. dif_len = num - aligned_len;
  281. _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
  282. for (i = 0; i < dif_len; i++, buf++)
  283. _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
  284. }
  285. static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
  286. int num)
  287. {
  288. memcpy_toio((priv->hw_base + addr), buf, num);
  289. }
  290. static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
  291. {
  292. ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
  293. }
  294. static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
  295. {
  296. ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
  297. }
  298. static inline void ipw_enable_interrupts(struct ipw_priv *priv)
  299. {
  300. if (priv->status & STATUS_INT_ENABLED)
  301. return;
  302. priv->status |= STATUS_INT_ENABLED;
  303. ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
  304. }
  305. static inline void ipw_disable_interrupts(struct ipw_priv *priv)
  306. {
  307. if (!(priv->status & STATUS_INT_ENABLED))
  308. return;
  309. priv->status &= ~STATUS_INT_ENABLED;
  310. ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
  311. }
  312. static char *ipw_error_desc(u32 val)
  313. {
  314. switch (val) {
  315. case IPW_FW_ERROR_OK:
  316. return "ERROR_OK";
  317. case IPW_FW_ERROR_FAIL:
  318. return "ERROR_FAIL";
  319. case IPW_FW_ERROR_MEMORY_UNDERFLOW:
  320. return "MEMORY_UNDERFLOW";
  321. case IPW_FW_ERROR_MEMORY_OVERFLOW:
  322. return "MEMORY_OVERFLOW";
  323. case IPW_FW_ERROR_BAD_PARAM:
  324. return "ERROR_BAD_PARAM";
  325. case IPW_FW_ERROR_BAD_CHECKSUM:
  326. return "ERROR_BAD_CHECKSUM";
  327. case IPW_FW_ERROR_NMI_INTERRUPT:
  328. return "ERROR_NMI_INTERRUPT";
  329. case IPW_FW_ERROR_BAD_DATABASE:
  330. return "ERROR_BAD_DATABASE";
  331. case IPW_FW_ERROR_ALLOC_FAIL:
  332. return "ERROR_ALLOC_FAIL";
  333. case IPW_FW_ERROR_DMA_UNDERRUN:
  334. return "ERROR_DMA_UNDERRUN";
  335. case IPW_FW_ERROR_DMA_STATUS:
  336. return "ERROR_DMA_STATUS";
  337. case IPW_FW_ERROR_DINOSTATUS_ERROR:
  338. return "ERROR_DINOSTATUS_ERROR";
  339. case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
  340. return "ERROR_EEPROMSTATUS_ERROR";
  341. case IPW_FW_ERROR_SYSASSERT:
  342. return "ERROR_SYSASSERT";
  343. case IPW_FW_ERROR_FATAL_ERROR:
  344. return "ERROR_FATALSTATUS_ERROR";
  345. default:
  346. return "UNKNOWNSTATUS_ERROR";
  347. }
  348. }
  349. static void ipw_dump_nic_error_log(struct ipw_priv *priv)
  350. {
  351. u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
  352. base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
  353. count = ipw_read_reg32(priv, base);
  354. if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
  355. IPW_ERROR("Start IPW Error Log Dump:\n");
  356. IPW_ERROR("Status: 0x%08X, Config: %08X\n",
  357. priv->status, priv->config);
  358. }
  359. for (i = ERROR_START_OFFSET;
  360. i <= count * ERROR_ELEM_SIZE; i += ERROR_ELEM_SIZE) {
  361. desc = ipw_read_reg32(priv, base + i);
  362. time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
  363. blink1 = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
  364. blink2 = ipw_read_reg32(priv, base + i + 3 * sizeof(u32));
  365. ilink1 = ipw_read_reg32(priv, base + i + 4 * sizeof(u32));
  366. ilink2 = ipw_read_reg32(priv, base + i + 5 * sizeof(u32));
  367. idata = ipw_read_reg32(priv, base + i + 6 * sizeof(u32));
  368. IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  369. ipw_error_desc(desc), time, blink1, blink2,
  370. ilink1, ilink2, idata);
  371. }
  372. }
  373. static void ipw_dump_nic_event_log(struct ipw_priv *priv)
  374. {
  375. u32 ev, time, data, i, count, base;
  376. base = ipw_read32(priv, IPW_EVENT_LOG);
  377. count = ipw_read_reg32(priv, base);
  378. if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
  379. IPW_ERROR("Start IPW Event Log Dump:\n");
  380. for (i = EVENT_START_OFFSET;
  381. i <= count * EVENT_ELEM_SIZE; i += EVENT_ELEM_SIZE) {
  382. ev = ipw_read_reg32(priv, base + i);
  383. time = ipw_read_reg32(priv, base + i + 1 * sizeof(u32));
  384. data = ipw_read_reg32(priv, base + i + 2 * sizeof(u32));
  385. #ifdef CONFIG_IPW_DEBUG
  386. IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
  387. #endif
  388. }
  389. }
  390. static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
  391. {
  392. u32 addr, field_info, field_len, field_count, total_len;
  393. IPW_DEBUG_ORD("ordinal = %i\n", ord);
  394. if (!priv || !val || !len) {
  395. IPW_DEBUG_ORD("Invalid argument\n");
  396. return -EINVAL;
  397. }
  398. /* verify device ordinal tables have been initialized */
  399. if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
  400. IPW_DEBUG_ORD("Access ordinals before initialization\n");
  401. return -EINVAL;
  402. }
  403. switch (IPW_ORD_TABLE_ID_MASK & ord) {
  404. case IPW_ORD_TABLE_0_MASK:
  405. /*
  406. * TABLE 0: Direct access to a table of 32 bit values
  407. *
  408. * This is a very simple table with the data directly
  409. * read from the table
  410. */
  411. /* remove the table id from the ordinal */
  412. ord &= IPW_ORD_TABLE_VALUE_MASK;
  413. /* boundary check */
  414. if (ord > priv->table0_len) {
  415. IPW_DEBUG_ORD("ordinal value (%i) longer then "
  416. "max (%i)\n", ord, priv->table0_len);
  417. return -EINVAL;
  418. }
  419. /* verify we have enough room to store the value */
  420. if (*len < sizeof(u32)) {
  421. IPW_DEBUG_ORD("ordinal buffer length too small, "
  422. "need %zd\n", sizeof(u32));
  423. return -EINVAL;
  424. }
  425. IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
  426. ord, priv->table0_addr + (ord << 2));
  427. *len = sizeof(u32);
  428. ord <<= 2;
  429. *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
  430. break;
  431. case IPW_ORD_TABLE_1_MASK:
  432. /*
  433. * TABLE 1: Indirect access to a table of 32 bit values
  434. *
  435. * This is a fairly large table of u32 values each
  436. * representing starting addr for the data (which is
  437. * also a u32)
  438. */
  439. /* remove the table id from the ordinal */
  440. ord &= IPW_ORD_TABLE_VALUE_MASK;
  441. /* boundary check */
  442. if (ord > priv->table1_len) {
  443. IPW_DEBUG_ORD("ordinal value too long\n");
  444. return -EINVAL;
  445. }
  446. /* verify we have enough room to store the value */
  447. if (*len < sizeof(u32)) {
  448. IPW_DEBUG_ORD("ordinal buffer length too small, "
  449. "need %zd\n", sizeof(u32));
  450. return -EINVAL;
  451. }
  452. *((u32 *) val) =
  453. ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
  454. *len = sizeof(u32);
  455. break;
  456. case IPW_ORD_TABLE_2_MASK:
  457. /*
  458. * TABLE 2: Indirect access to a table of variable sized values
  459. *
  460. * This table consist of six values, each containing
  461. * - dword containing the starting offset of the data
  462. * - dword containing the lengh in the first 16bits
  463. * and the count in the second 16bits
  464. */
  465. /* remove the table id from the ordinal */
  466. ord &= IPW_ORD_TABLE_VALUE_MASK;
  467. /* boundary check */
  468. if (ord > priv->table2_len) {
  469. IPW_DEBUG_ORD("ordinal value too long\n");
  470. return -EINVAL;
  471. }
  472. /* get the address of statistic */
  473. addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
  474. /* get the second DW of statistics ;
  475. * two 16-bit words - first is length, second is count */
  476. field_info =
  477. ipw_read_reg32(priv,
  478. priv->table2_addr + (ord << 3) +
  479. sizeof(u32));
  480. /* get each entry length */
  481. field_len = *((u16 *) & field_info);
  482. /* get number of entries */
  483. field_count = *(((u16 *) & field_info) + 1);
  484. /* abort if not enought memory */
  485. total_len = field_len * field_count;
  486. if (total_len > *len) {
  487. *len = total_len;
  488. return -EINVAL;
  489. }
  490. *len = total_len;
  491. if (!total_len)
  492. return 0;
  493. IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
  494. "field_info = 0x%08x\n",
  495. addr, total_len, field_info);
  496. ipw_read_indirect(priv, addr, val, total_len);
  497. break;
  498. default:
  499. IPW_DEBUG_ORD("Invalid ordinal!\n");
  500. return -EINVAL;
  501. }
  502. return 0;
  503. }
  504. static void ipw_init_ordinals(struct ipw_priv *priv)
  505. {
  506. priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
  507. priv->table0_len = ipw_read32(priv, priv->table0_addr);
  508. IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
  509. priv->table0_addr, priv->table0_len);
  510. priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
  511. priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
  512. IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
  513. priv->table1_addr, priv->table1_len);
  514. priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
  515. priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
  516. priv->table2_len &= 0x0000ffff; /* use first two bytes */
  517. IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
  518. priv->table2_addr, priv->table2_len);
  519. }
  520. /*
  521. * The following adds a new attribute to the sysfs representation
  522. * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
  523. * used for controling the debug level.
  524. *
  525. * See the level definitions in ipw for details.
  526. */
  527. static ssize_t show_debug_level(struct device_driver *d, char *buf)
  528. {
  529. return sprintf(buf, "0x%08X\n", ipw_debug_level);
  530. }
  531. static ssize_t store_debug_level(struct device_driver *d,
  532. const char *buf, size_t count)
  533. {
  534. char *p = (char *)buf;
  535. u32 val;
  536. if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
  537. p++;
  538. if (p[0] == 'x' || p[0] == 'X')
  539. p++;
  540. val = simple_strtoul(p, &p, 16);
  541. } else
  542. val = simple_strtoul(p, &p, 10);
  543. if (p == buf)
  544. printk(KERN_INFO DRV_NAME
  545. ": %s is not in hex or decimal form.\n", buf);
  546. else
  547. ipw_debug_level = val;
  548. return strnlen(buf, count);
  549. }
  550. static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
  551. show_debug_level, store_debug_level);
  552. static ssize_t show_status(struct device *d,
  553. struct device_attribute *attr, char *buf)
  554. {
  555. struct ipw_priv *p = d->driver_data;
  556. return sprintf(buf, "0x%08x\n", (int)p->status);
  557. }
  558. static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
  559. static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
  560. char *buf)
  561. {
  562. struct ipw_priv *p = d->driver_data;
  563. return sprintf(buf, "0x%08x\n", (int)p->config);
  564. }
  565. static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
  566. static ssize_t show_nic_type(struct device *d,
  567. struct device_attribute *attr, char *buf)
  568. {
  569. struct ipw_priv *p = d->driver_data;
  570. u8 type = p->eeprom[EEPROM_NIC_TYPE];
  571. switch (type) {
  572. case EEPROM_NIC_TYPE_STANDARD:
  573. return sprintf(buf, "STANDARD\n");
  574. case EEPROM_NIC_TYPE_DELL:
  575. return sprintf(buf, "DELL\n");
  576. case EEPROM_NIC_TYPE_FUJITSU:
  577. return sprintf(buf, "FUJITSU\n");
  578. case EEPROM_NIC_TYPE_IBM:
  579. return sprintf(buf, "IBM\n");
  580. case EEPROM_NIC_TYPE_HP:
  581. return sprintf(buf, "HP\n");
  582. }
  583. return sprintf(buf, "UNKNOWN\n");
  584. }
  585. static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
  586. static ssize_t dump_error_log(struct device *d,
  587. struct device_attribute *attr, const char *buf,
  588. size_t count)
  589. {
  590. char *p = (char *)buf;
  591. if (p[0] == '1')
  592. ipw_dump_nic_error_log((struct ipw_priv *)d->driver_data);
  593. return strnlen(buf, count);
  594. }
  595. static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
  596. static ssize_t dump_event_log(struct device *d,
  597. struct device_attribute *attr, const char *buf,
  598. size_t count)
  599. {
  600. char *p = (char *)buf;
  601. if (p[0] == '1')
  602. ipw_dump_nic_event_log((struct ipw_priv *)d->driver_data);
  603. return strnlen(buf, count);
  604. }
  605. static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
  606. static ssize_t show_ucode_version(struct device *d,
  607. struct device_attribute *attr, char *buf)
  608. {
  609. u32 len = sizeof(u32), tmp = 0;
  610. struct ipw_priv *p = d->driver_data;
  611. if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
  612. return 0;
  613. return sprintf(buf, "0x%08x\n", tmp);
  614. }
  615. static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
  616. static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
  617. char *buf)
  618. {
  619. u32 len = sizeof(u32), tmp = 0;
  620. struct ipw_priv *p = d->driver_data;
  621. if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
  622. return 0;
  623. return sprintf(buf, "0x%08x\n", tmp);
  624. }
  625. static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
  626. /*
  627. * Add a device attribute to view/control the delay between eeprom
  628. * operations.
  629. */
  630. static ssize_t show_eeprom_delay(struct device *d,
  631. struct device_attribute *attr, char *buf)
  632. {
  633. int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
  634. return sprintf(buf, "%i\n", n);
  635. }
  636. static ssize_t store_eeprom_delay(struct device *d,
  637. struct device_attribute *attr,
  638. const char *buf, size_t count)
  639. {
  640. struct ipw_priv *p = d->driver_data;
  641. sscanf(buf, "%i", &p->eeprom_delay);
  642. return strnlen(buf, count);
  643. }
  644. static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
  645. show_eeprom_delay, store_eeprom_delay);
  646. static ssize_t show_command_event_reg(struct device *d,
  647. struct device_attribute *attr, char *buf)
  648. {
  649. u32 reg = 0;
  650. struct ipw_priv *p = d->driver_data;
  651. reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
  652. return sprintf(buf, "0x%08x\n", reg);
  653. }
  654. static ssize_t store_command_event_reg(struct device *d,
  655. struct device_attribute *attr,
  656. const char *buf, size_t count)
  657. {
  658. u32 reg;
  659. struct ipw_priv *p = d->driver_data;
  660. sscanf(buf, "%x", &reg);
  661. ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
  662. return strnlen(buf, count);
  663. }
  664. static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
  665. show_command_event_reg, store_command_event_reg);
  666. static ssize_t show_mem_gpio_reg(struct device *d,
  667. struct device_attribute *attr, char *buf)
  668. {
  669. u32 reg = 0;
  670. struct ipw_priv *p = d->driver_data;
  671. reg = ipw_read_reg32(p, 0x301100);
  672. return sprintf(buf, "0x%08x\n", reg);
  673. }
  674. static ssize_t store_mem_gpio_reg(struct device *d,
  675. struct device_attribute *attr,
  676. const char *buf, size_t count)
  677. {
  678. u32 reg;
  679. struct ipw_priv *p = d->driver_data;
  680. sscanf(buf, "%x", &reg);
  681. ipw_write_reg32(p, 0x301100, reg);
  682. return strnlen(buf, count);
  683. }
  684. static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
  685. show_mem_gpio_reg, store_mem_gpio_reg);
  686. static ssize_t show_indirect_dword(struct device *d,
  687. struct device_attribute *attr, char *buf)
  688. {
  689. u32 reg = 0;
  690. struct ipw_priv *priv = d->driver_data;
  691. if (priv->status & STATUS_INDIRECT_DWORD)
  692. reg = ipw_read_reg32(priv, priv->indirect_dword);
  693. else
  694. reg = 0;
  695. return sprintf(buf, "0x%08x\n", reg);
  696. }
  697. static ssize_t store_indirect_dword(struct device *d,
  698. struct device_attribute *attr,
  699. const char *buf, size_t count)
  700. {
  701. struct ipw_priv *priv = d->driver_data;
  702. sscanf(buf, "%x", &priv->indirect_dword);
  703. priv->status |= STATUS_INDIRECT_DWORD;
  704. return strnlen(buf, count);
  705. }
  706. static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
  707. show_indirect_dword, store_indirect_dword);
  708. static ssize_t show_indirect_byte(struct device *d,
  709. struct device_attribute *attr, char *buf)
  710. {
  711. u8 reg = 0;
  712. struct ipw_priv *priv = d->driver_data;
  713. if (priv->status & STATUS_INDIRECT_BYTE)
  714. reg = ipw_read_reg8(priv, priv->indirect_byte);
  715. else
  716. reg = 0;
  717. return sprintf(buf, "0x%02x\n", reg);
  718. }
  719. static ssize_t store_indirect_byte(struct device *d,
  720. struct device_attribute *attr,
  721. const char *buf, size_t count)
  722. {
  723. struct ipw_priv *priv = d->driver_data;
  724. sscanf(buf, "%x", &priv->indirect_byte);
  725. priv->status |= STATUS_INDIRECT_BYTE;
  726. return strnlen(buf, count);
  727. }
  728. static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
  729. show_indirect_byte, store_indirect_byte);
  730. static ssize_t show_direct_dword(struct device *d,
  731. struct device_attribute *attr, char *buf)
  732. {
  733. u32 reg = 0;
  734. struct ipw_priv *priv = d->driver_data;
  735. if (priv->status & STATUS_DIRECT_DWORD)
  736. reg = ipw_read32(priv, priv->direct_dword);
  737. else
  738. reg = 0;
  739. return sprintf(buf, "0x%08x\n", reg);
  740. }
  741. static ssize_t store_direct_dword(struct device *d,
  742. struct device_attribute *attr,
  743. const char *buf, size_t count)
  744. {
  745. struct ipw_priv *priv = d->driver_data;
  746. sscanf(buf, "%x", &priv->direct_dword);
  747. priv->status |= STATUS_DIRECT_DWORD;
  748. return strnlen(buf, count);
  749. }
  750. static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
  751. show_direct_dword, store_direct_dword);
  752. static inline int rf_kill_active(struct ipw_priv *priv)
  753. {
  754. if (0 == (ipw_read32(priv, 0x30) & 0x10000))
  755. priv->status |= STATUS_RF_KILL_HW;
  756. else
  757. priv->status &= ~STATUS_RF_KILL_HW;
  758. return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
  759. }
  760. static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
  761. char *buf)
  762. {
  763. /* 0 - RF kill not enabled
  764. 1 - SW based RF kill active (sysfs)
  765. 2 - HW based RF kill active
  766. 3 - Both HW and SW baed RF kill active */
  767. struct ipw_priv *priv = d->driver_data;
  768. int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
  769. (rf_kill_active(priv) ? 0x2 : 0x0);
  770. return sprintf(buf, "%i\n", val);
  771. }
  772. static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
  773. {
  774. if ((disable_radio ? 1 : 0) ==
  775. (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
  776. return 0;
  777. IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
  778. disable_radio ? "OFF" : "ON");
  779. if (disable_radio) {
  780. priv->status |= STATUS_RF_KILL_SW;
  781. if (priv->workqueue) {
  782. cancel_delayed_work(&priv->request_scan);
  783. }
  784. wake_up_interruptible(&priv->wait_command_queue);
  785. queue_work(priv->workqueue, &priv->down);
  786. } else {
  787. priv->status &= ~STATUS_RF_KILL_SW;
  788. if (rf_kill_active(priv)) {
  789. IPW_DEBUG_RF_KILL("Can not turn radio back on - "
  790. "disabled by HW switch\n");
  791. /* Make sure the RF_KILL check timer is running */
  792. cancel_delayed_work(&priv->rf_kill);
  793. queue_delayed_work(priv->workqueue, &priv->rf_kill,
  794. 2 * HZ);
  795. } else
  796. queue_work(priv->workqueue, &priv->up);
  797. }
  798. return 1;
  799. }
  800. static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
  801. const char *buf, size_t count)
  802. {
  803. struct ipw_priv *priv = d->driver_data;
  804. ipw_radio_kill_sw(priv, buf[0] == '1');
  805. return count;
  806. }
  807. static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
  808. static void ipw_irq_tasklet(struct ipw_priv *priv)
  809. {
  810. u32 inta, inta_mask, handled = 0;
  811. unsigned long flags;
  812. int rc = 0;
  813. spin_lock_irqsave(&priv->lock, flags);
  814. inta = ipw_read32(priv, CX2_INTA_RW);
  815. inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
  816. inta &= (CX2_INTA_MASK_ALL & inta_mask);
  817. /* Add any cached INTA values that need to be handled */
  818. inta |= priv->isr_inta;
  819. /* handle all the justifications for the interrupt */
  820. if (inta & CX2_INTA_BIT_RX_TRANSFER) {
  821. ipw_rx(priv);
  822. handled |= CX2_INTA_BIT_RX_TRANSFER;
  823. }
  824. if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
  825. IPW_DEBUG_HC("Command completed.\n");
  826. rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
  827. priv->status &= ~STATUS_HCMD_ACTIVE;
  828. wake_up_interruptible(&priv->wait_command_queue);
  829. handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
  830. }
  831. if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
  832. IPW_DEBUG_TX("TX_QUEUE_1\n");
  833. rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
  834. handled |= CX2_INTA_BIT_TX_QUEUE_1;
  835. }
  836. if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
  837. IPW_DEBUG_TX("TX_QUEUE_2\n");
  838. rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
  839. handled |= CX2_INTA_BIT_TX_QUEUE_2;
  840. }
  841. if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
  842. IPW_DEBUG_TX("TX_QUEUE_3\n");
  843. rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
  844. handled |= CX2_INTA_BIT_TX_QUEUE_3;
  845. }
  846. if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
  847. IPW_DEBUG_TX("TX_QUEUE_4\n");
  848. rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
  849. handled |= CX2_INTA_BIT_TX_QUEUE_4;
  850. }
  851. if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
  852. IPW_WARNING("STATUS_CHANGE\n");
  853. handled |= CX2_INTA_BIT_STATUS_CHANGE;
  854. }
  855. if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
  856. IPW_WARNING("TX_PERIOD_EXPIRED\n");
  857. handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
  858. }
  859. if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
  860. IPW_WARNING("HOST_CMD_DONE\n");
  861. handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
  862. }
  863. if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
  864. IPW_WARNING("FW_INITIALIZATION_DONE\n");
  865. handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
  866. }
  867. if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
  868. IPW_WARNING("PHY_OFF_DONE\n");
  869. handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
  870. }
  871. if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
  872. IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
  873. priv->status |= STATUS_RF_KILL_HW;
  874. wake_up_interruptible(&priv->wait_command_queue);
  875. netif_carrier_off(priv->net_dev);
  876. netif_stop_queue(priv->net_dev);
  877. cancel_delayed_work(&priv->request_scan);
  878. queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
  879. handled |= CX2_INTA_BIT_RF_KILL_DONE;
  880. }
  881. if (inta & CX2_INTA_BIT_FATAL_ERROR) {
  882. IPW_ERROR("Firmware error detected. Restarting.\n");
  883. #ifdef CONFIG_IPW_DEBUG
  884. if (ipw_debug_level & IPW_DL_FW_ERRORS) {
  885. ipw_dump_nic_error_log(priv);
  886. ipw_dump_nic_event_log(priv);
  887. }
  888. #endif
  889. queue_work(priv->workqueue, &priv->adapter_restart);
  890. handled |= CX2_INTA_BIT_FATAL_ERROR;
  891. }
  892. if (inta & CX2_INTA_BIT_PARITY_ERROR) {
  893. IPW_ERROR("Parity error\n");
  894. handled |= CX2_INTA_BIT_PARITY_ERROR;
  895. }
  896. if (handled != inta) {
  897. IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
  898. }
  899. /* enable all interrupts */
  900. ipw_enable_interrupts(priv);
  901. spin_unlock_irqrestore(&priv->lock, flags);
  902. }
  903. #ifdef CONFIG_IPW_DEBUG
  904. #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
  905. static char *get_cmd_string(u8 cmd)
  906. {
  907. switch (cmd) {
  908. IPW_CMD(HOST_COMPLETE);
  909. IPW_CMD(POWER_DOWN);
  910. IPW_CMD(SYSTEM_CONFIG);
  911. IPW_CMD(MULTICAST_ADDRESS);
  912. IPW_CMD(SSID);
  913. IPW_CMD(ADAPTER_ADDRESS);
  914. IPW_CMD(PORT_TYPE);
  915. IPW_CMD(RTS_THRESHOLD);
  916. IPW_CMD(FRAG_THRESHOLD);
  917. IPW_CMD(POWER_MODE);
  918. IPW_CMD(WEP_KEY);
  919. IPW_CMD(TGI_TX_KEY);
  920. IPW_CMD(SCAN_REQUEST);
  921. IPW_CMD(SCAN_REQUEST_EXT);
  922. IPW_CMD(ASSOCIATE);
  923. IPW_CMD(SUPPORTED_RATES);
  924. IPW_CMD(SCAN_ABORT);
  925. IPW_CMD(TX_FLUSH);
  926. IPW_CMD(QOS_PARAMETERS);
  927. IPW_CMD(DINO_CONFIG);
  928. IPW_CMD(RSN_CAPABILITIES);
  929. IPW_CMD(RX_KEY);
  930. IPW_CMD(CARD_DISABLE);
  931. IPW_CMD(SEED_NUMBER);
  932. IPW_CMD(TX_POWER);
  933. IPW_CMD(COUNTRY_INFO);
  934. IPW_CMD(AIRONET_INFO);
  935. IPW_CMD(AP_TX_POWER);
  936. IPW_CMD(CCKM_INFO);
  937. IPW_CMD(CCX_VER_INFO);
  938. IPW_CMD(SET_CALIBRATION);
  939. IPW_CMD(SENSITIVITY_CALIB);
  940. IPW_CMD(RETRY_LIMIT);
  941. IPW_CMD(IPW_PRE_POWER_DOWN);
  942. IPW_CMD(VAP_BEACON_TEMPLATE);
  943. IPW_CMD(VAP_DTIM_PERIOD);
  944. IPW_CMD(EXT_SUPPORTED_RATES);
  945. IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
  946. IPW_CMD(VAP_QUIET_INTERVALS);
  947. IPW_CMD(VAP_CHANNEL_SWITCH);
  948. IPW_CMD(VAP_MANDATORY_CHANNELS);
  949. IPW_CMD(VAP_CELL_PWR_LIMIT);
  950. IPW_CMD(VAP_CF_PARAM_SET);
  951. IPW_CMD(VAP_SET_BEACONING_STATE);
  952. IPW_CMD(MEASUREMENT);
  953. IPW_CMD(POWER_CAPABILITY);
  954. IPW_CMD(SUPPORTED_CHANNELS);
  955. IPW_CMD(TPC_REPORT);
  956. IPW_CMD(WME_INFO);
  957. IPW_CMD(PRODUCTION_COMMAND);
  958. default:
  959. return "UNKNOWN";
  960. }
  961. }
  962. #endif /* CONFIG_IPW_DEBUG */
  963. #define HOST_COMPLETE_TIMEOUT HZ
  964. static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
  965. {
  966. int rc = 0;
  967. if (priv->status & STATUS_HCMD_ACTIVE) {
  968. IPW_ERROR("Already sending a command\n");
  969. return -1;
  970. }
  971. priv->status |= STATUS_HCMD_ACTIVE;
  972. IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
  973. get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
  974. printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
  975. rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
  976. if (rc)
  977. return rc;
  978. rc = wait_event_interruptible_timeout(priv->wait_command_queue,
  979. !(priv->
  980. status & STATUS_HCMD_ACTIVE),
  981. HOST_COMPLETE_TIMEOUT);
  982. if (rc == 0) {
  983. IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
  984. jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
  985. priv->status &= ~STATUS_HCMD_ACTIVE;
  986. return -EIO;
  987. }
  988. if (priv->status & STATUS_RF_KILL_MASK) {
  989. IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
  990. return -EIO;
  991. }
  992. return 0;
  993. }
  994. static int ipw_send_host_complete(struct ipw_priv *priv)
  995. {
  996. struct host_cmd cmd = {
  997. .cmd = IPW_CMD_HOST_COMPLETE,
  998. .len = 0
  999. };
  1000. if (!priv) {
  1001. IPW_ERROR("Invalid args\n");
  1002. return -1;
  1003. }
  1004. if (ipw_send_cmd(priv, &cmd)) {
  1005. IPW_ERROR("failed to send HOST_COMPLETE command\n");
  1006. return -1;
  1007. }
  1008. return 0;
  1009. }
  1010. static int ipw_send_system_config(struct ipw_priv *priv,
  1011. struct ipw_sys_config *config)
  1012. {
  1013. struct host_cmd cmd = {
  1014. .cmd = IPW_CMD_SYSTEM_CONFIG,
  1015. .len = sizeof(*config)
  1016. };
  1017. if (!priv || !config) {
  1018. IPW_ERROR("Invalid args\n");
  1019. return -1;
  1020. }
  1021. memcpy(&cmd.param, config, sizeof(*config));
  1022. if (ipw_send_cmd(priv, &cmd)) {
  1023. IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
  1024. return -1;
  1025. }
  1026. return 0;
  1027. }
  1028. static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
  1029. {
  1030. struct host_cmd cmd = {
  1031. .cmd = IPW_CMD_SSID,
  1032. .len = min(len, IW_ESSID_MAX_SIZE)
  1033. };
  1034. if (!priv || !ssid) {
  1035. IPW_ERROR("Invalid args\n");
  1036. return -1;
  1037. }
  1038. memcpy(&cmd.param, ssid, cmd.len);
  1039. if (ipw_send_cmd(priv, &cmd)) {
  1040. IPW_ERROR("failed to send SSID command\n");
  1041. return -1;
  1042. }
  1043. return 0;
  1044. }
  1045. static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
  1046. {
  1047. struct host_cmd cmd = {
  1048. .cmd = IPW_CMD_ADAPTER_ADDRESS,
  1049. .len = ETH_ALEN
  1050. };
  1051. if (!priv || !mac) {
  1052. IPW_ERROR("Invalid args\n");
  1053. return -1;
  1054. }
  1055. IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
  1056. priv->net_dev->name, MAC_ARG(mac));
  1057. memcpy(&cmd.param, mac, ETH_ALEN);
  1058. if (ipw_send_cmd(priv, &cmd)) {
  1059. IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
  1060. return -1;
  1061. }
  1062. return 0;
  1063. }
  1064. static void ipw_adapter_restart(void *adapter)
  1065. {
  1066. struct ipw_priv *priv = adapter;
  1067. if (priv->status & STATUS_RF_KILL_MASK)
  1068. return;
  1069. ipw_down(priv);
  1070. if (ipw_up(priv)) {
  1071. IPW_ERROR("Failed to up device\n");
  1072. return;
  1073. }
  1074. }
  1075. #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
  1076. static void ipw_scan_check(void *data)
  1077. {
  1078. struct ipw_priv *priv = data;
  1079. if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
  1080. IPW_DEBUG_SCAN("Scan completion watchdog resetting "
  1081. "adapter (%dms).\n",
  1082. IPW_SCAN_CHECK_WATCHDOG / 100);
  1083. ipw_adapter_restart(priv);
  1084. }
  1085. }
  1086. static int ipw_send_scan_request_ext(struct ipw_priv *priv,
  1087. struct ipw_scan_request_ext *request)
  1088. {
  1089. struct host_cmd cmd = {
  1090. .cmd = IPW_CMD_SCAN_REQUEST_EXT,
  1091. .len = sizeof(*request)
  1092. };
  1093. if (!priv || !request) {
  1094. IPW_ERROR("Invalid args\n");
  1095. return -1;
  1096. }
  1097. memcpy(&cmd.param, request, sizeof(*request));
  1098. if (ipw_send_cmd(priv, &cmd)) {
  1099. IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
  1100. return -1;
  1101. }
  1102. queue_delayed_work(priv->workqueue, &priv->scan_check,
  1103. IPW_SCAN_CHECK_WATCHDOG);
  1104. return 0;
  1105. }
  1106. static int ipw_send_scan_abort(struct ipw_priv *priv)
  1107. {
  1108. struct host_cmd cmd = {
  1109. .cmd = IPW_CMD_SCAN_ABORT,
  1110. .len = 0
  1111. };
  1112. if (!priv) {
  1113. IPW_ERROR("Invalid args\n");
  1114. return -1;
  1115. }
  1116. if (ipw_send_cmd(priv, &cmd)) {
  1117. IPW_ERROR("failed to send SCAN_ABORT command\n");
  1118. return -1;
  1119. }
  1120. return 0;
  1121. }
  1122. static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
  1123. {
  1124. struct host_cmd cmd = {
  1125. .cmd = IPW_CMD_SENSITIVITY_CALIB,
  1126. .len = sizeof(struct ipw_sensitivity_calib)
  1127. };
  1128. struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
  1129. &cmd.param;
  1130. calib->beacon_rssi_raw = sens;
  1131. if (ipw_send_cmd(priv, &cmd)) {
  1132. IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
  1133. return -1;
  1134. }
  1135. return 0;
  1136. }
  1137. static int ipw_send_associate(struct ipw_priv *priv,
  1138. struct ipw_associate *associate)
  1139. {
  1140. struct host_cmd cmd = {
  1141. .cmd = IPW_CMD_ASSOCIATE,
  1142. .len = sizeof(*associate)
  1143. };
  1144. if (!priv || !associate) {
  1145. IPW_ERROR("Invalid args\n");
  1146. return -1;
  1147. }
  1148. memcpy(&cmd.param, associate, sizeof(*associate));
  1149. if (ipw_send_cmd(priv, &cmd)) {
  1150. IPW_ERROR("failed to send ASSOCIATE command\n");
  1151. return -1;
  1152. }
  1153. return 0;
  1154. }
  1155. static int ipw_send_supported_rates(struct ipw_priv *priv,
  1156. struct ipw_supported_rates *rates)
  1157. {
  1158. struct host_cmd cmd = {
  1159. .cmd = IPW_CMD_SUPPORTED_RATES,
  1160. .len = sizeof(*rates)
  1161. };
  1162. if (!priv || !rates) {
  1163. IPW_ERROR("Invalid args\n");
  1164. return -1;
  1165. }
  1166. memcpy(&cmd.param, rates, sizeof(*rates));
  1167. if (ipw_send_cmd(priv, &cmd)) {
  1168. IPW_ERROR("failed to send SUPPORTED_RATES command\n");
  1169. return -1;
  1170. }
  1171. return 0;
  1172. }
  1173. static int ipw_set_random_seed(struct ipw_priv *priv)
  1174. {
  1175. struct host_cmd cmd = {
  1176. .cmd = IPW_CMD_SEED_NUMBER,
  1177. .len = sizeof(u32)
  1178. };
  1179. if (!priv) {
  1180. IPW_ERROR("Invalid args\n");
  1181. return -1;
  1182. }
  1183. get_random_bytes(&cmd.param, sizeof(u32));
  1184. if (ipw_send_cmd(priv, &cmd)) {
  1185. IPW_ERROR("failed to send SEED_NUMBER command\n");
  1186. return -1;
  1187. }
  1188. return 0;
  1189. }
  1190. #if 0
  1191. static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
  1192. {
  1193. struct host_cmd cmd = {
  1194. .cmd = IPW_CMD_CARD_DISABLE,
  1195. .len = sizeof(u32)
  1196. };
  1197. if (!priv) {
  1198. IPW_ERROR("Invalid args\n");
  1199. return -1;
  1200. }
  1201. *((u32 *) & cmd.param) = phy_off;
  1202. if (ipw_send_cmd(priv, &cmd)) {
  1203. IPW_ERROR("failed to send CARD_DISABLE command\n");
  1204. return -1;
  1205. }
  1206. return 0;
  1207. }
  1208. #endif
  1209. static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
  1210. {
  1211. struct host_cmd cmd = {
  1212. .cmd = IPW_CMD_TX_POWER,
  1213. .len = sizeof(*power)
  1214. };
  1215. if (!priv || !power) {
  1216. IPW_ERROR("Invalid args\n");
  1217. return -1;
  1218. }
  1219. memcpy(&cmd.param, power, sizeof(*power));
  1220. if (ipw_send_cmd(priv, &cmd)) {
  1221. IPW_ERROR("failed to send TX_POWER command\n");
  1222. return -1;
  1223. }
  1224. return 0;
  1225. }
  1226. static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
  1227. {
  1228. struct ipw_rts_threshold rts_threshold = {
  1229. .rts_threshold = rts,
  1230. };
  1231. struct host_cmd cmd = {
  1232. .cmd = IPW_CMD_RTS_THRESHOLD,
  1233. .len = sizeof(rts_threshold)
  1234. };
  1235. if (!priv) {
  1236. IPW_ERROR("Invalid args\n");
  1237. return -1;
  1238. }
  1239. memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
  1240. if (ipw_send_cmd(priv, &cmd)) {
  1241. IPW_ERROR("failed to send RTS_THRESHOLD command\n");
  1242. return -1;
  1243. }
  1244. return 0;
  1245. }
  1246. static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
  1247. {
  1248. struct ipw_frag_threshold frag_threshold = {
  1249. .frag_threshold = frag,
  1250. };
  1251. struct host_cmd cmd = {
  1252. .cmd = IPW_CMD_FRAG_THRESHOLD,
  1253. .len = sizeof(frag_threshold)
  1254. };
  1255. if (!priv) {
  1256. IPW_ERROR("Invalid args\n");
  1257. return -1;
  1258. }
  1259. memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
  1260. if (ipw_send_cmd(priv, &cmd)) {
  1261. IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
  1262. return -1;
  1263. }
  1264. return 0;
  1265. }
  1266. static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
  1267. {
  1268. struct host_cmd cmd = {
  1269. .cmd = IPW_CMD_POWER_MODE,
  1270. .len = sizeof(u32)
  1271. };
  1272. u32 *param = (u32 *) (&cmd.param);
  1273. if (!priv) {
  1274. IPW_ERROR("Invalid args\n");
  1275. return -1;
  1276. }
  1277. /* If on battery, set to 3, if AC set to CAM, else user
  1278. * level */
  1279. switch (mode) {
  1280. case IPW_POWER_BATTERY:
  1281. *param = IPW_POWER_INDEX_3;
  1282. break;
  1283. case IPW_POWER_AC:
  1284. *param = IPW_POWER_MODE_CAM;
  1285. break;
  1286. default:
  1287. *param = mode;
  1288. break;
  1289. }
  1290. if (ipw_send_cmd(priv, &cmd)) {
  1291. IPW_ERROR("failed to send POWER_MODE command\n");
  1292. return -1;
  1293. }
  1294. return 0;
  1295. }
  1296. /*
  1297. * The IPW device contains a Microwire compatible EEPROM that stores
  1298. * various data like the MAC address. Usually the firmware has exclusive
  1299. * access to the eeprom, but during device initialization (before the
  1300. * device driver has sent the HostComplete command to the firmware) the
  1301. * device driver has read access to the EEPROM by way of indirect addressing
  1302. * through a couple of memory mapped registers.
  1303. *
  1304. * The following is a simplified implementation for pulling data out of the
  1305. * the eeprom, along with some helper functions to find information in
  1306. * the per device private data's copy of the eeprom.
  1307. *
  1308. * NOTE: To better understand how these functions work (i.e what is a chip
  1309. * select and why do have to keep driving the eeprom clock?), read
  1310. * just about any data sheet for a Microwire compatible EEPROM.
  1311. */
  1312. /* write a 32 bit value into the indirect accessor register */
  1313. static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
  1314. {
  1315. ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
  1316. /* the eeprom requires some time to complete the operation */
  1317. udelay(p->eeprom_delay);
  1318. return;
  1319. }
  1320. /* perform a chip select operation */
  1321. static inline void eeprom_cs(struct ipw_priv *priv)
  1322. {
  1323. eeprom_write_reg(priv, 0);
  1324. eeprom_write_reg(priv, EEPROM_BIT_CS);
  1325. eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
  1326. eeprom_write_reg(priv, EEPROM_BIT_CS);
  1327. }
  1328. /* perform a chip select operation */
  1329. static inline void eeprom_disable_cs(struct ipw_priv *priv)
  1330. {
  1331. eeprom_write_reg(priv, EEPROM_BIT_CS);
  1332. eeprom_write_reg(priv, 0);
  1333. eeprom_write_reg(priv, EEPROM_BIT_SK);
  1334. }
  1335. /* push a single bit down to the eeprom */
  1336. static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
  1337. {
  1338. int d = (bit ? EEPROM_BIT_DI : 0);
  1339. eeprom_write_reg(p, EEPROM_BIT_CS | d);
  1340. eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
  1341. }
  1342. /* push an opcode followed by an address down to the eeprom */
  1343. static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
  1344. {
  1345. int i;
  1346. eeprom_cs(priv);
  1347. eeprom_write_bit(priv, 1);
  1348. eeprom_write_bit(priv, op & 2);
  1349. eeprom_write_bit(priv, op & 1);
  1350. for (i = 7; i >= 0; i--) {
  1351. eeprom_write_bit(priv, addr & (1 << i));
  1352. }
  1353. }
  1354. /* pull 16 bits off the eeprom, one bit at a time */
  1355. static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
  1356. {
  1357. int i;
  1358. u16 r = 0;
  1359. /* Send READ Opcode */
  1360. eeprom_op(priv, EEPROM_CMD_READ, addr);
  1361. /* Send dummy bit */
  1362. eeprom_write_reg(priv, EEPROM_BIT_CS);
  1363. /* Read the byte off the eeprom one bit at a time */
  1364. for (i = 0; i < 16; i++) {
  1365. u32 data = 0;
  1366. eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
  1367. eeprom_write_reg(priv, EEPROM_BIT_CS);
  1368. data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
  1369. r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
  1370. }
  1371. /* Send another dummy bit */
  1372. eeprom_write_reg(priv, 0);
  1373. eeprom_disable_cs(priv);
  1374. return r;
  1375. }
  1376. /* helper function for pulling the mac address out of the private */
  1377. /* data's copy of the eeprom data */
  1378. static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
  1379. {
  1380. u8 *ee = (u8 *) priv->eeprom;
  1381. memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
  1382. }
  1383. /*
  1384. * Either the device driver (i.e. the host) or the firmware can
  1385. * load eeprom data into the designated region in SRAM. If neither
  1386. * happens then the FW will shutdown with a fatal error.
  1387. *
  1388. * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
  1389. * bit needs region of shared SRAM needs to be non-zero.
  1390. */
  1391. static void ipw_eeprom_init_sram(struct ipw_priv *priv)
  1392. {
  1393. int i;
  1394. u16 *eeprom = (u16 *) priv->eeprom;
  1395. IPW_DEBUG_TRACE(">>\n");
  1396. /* read entire contents of eeprom into private buffer */
  1397. for (i = 0; i < 128; i++)
  1398. eeprom[i] = eeprom_read_u16(priv, (u8) i);
  1399. /*
  1400. If the data looks correct, then copy it to our private
  1401. copy. Otherwise let the firmware know to perform the operation
  1402. on it's own
  1403. */
  1404. if ((priv->eeprom + EEPROM_VERSION) != 0) {
  1405. IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
  1406. /* write the eeprom data to sram */
  1407. for (i = 0; i < CX2_EEPROM_IMAGE_SIZE; i++)
  1408. ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
  1409. /* Do not load eeprom data on fatal error or suspend */
  1410. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
  1411. } else {
  1412. IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
  1413. /* Load eeprom data on fatal error or suspend */
  1414. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
  1415. }
  1416. IPW_DEBUG_TRACE("<<\n");
  1417. }
  1418. static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
  1419. {
  1420. count >>= 2;
  1421. if (!count)
  1422. return;
  1423. _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
  1424. while (count--)
  1425. _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
  1426. }
  1427. static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
  1428. {
  1429. ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
  1430. CB_NUMBER_OF_ELEMENTS_SMALL *
  1431. sizeof(struct command_block));
  1432. }
  1433. static int ipw_fw_dma_enable(struct ipw_priv *priv)
  1434. { /* start dma engine but no transfers yet */
  1435. IPW_DEBUG_FW(">> : \n");
  1436. /* Start the dma */
  1437. ipw_fw_dma_reset_command_blocks(priv);
  1438. /* Write CB base address */
  1439. ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
  1440. IPW_DEBUG_FW("<< : \n");
  1441. return 0;
  1442. }
  1443. static void ipw_fw_dma_abort(struct ipw_priv *priv)
  1444. {
  1445. u32 control = 0;
  1446. IPW_DEBUG_FW(">> :\n");
  1447. //set the Stop and Abort bit
  1448. control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
  1449. ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
  1450. priv->sram_desc.last_cb_index = 0;
  1451. IPW_DEBUG_FW("<< \n");
  1452. }
  1453. static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
  1454. struct command_block *cb)
  1455. {
  1456. u32 address =
  1457. CX2_SHARED_SRAM_DMA_CONTROL +
  1458. (sizeof(struct command_block) * index);
  1459. IPW_DEBUG_FW(">> :\n");
  1460. ipw_write_indirect(priv, address, (u8 *) cb,
  1461. (int)sizeof(struct command_block));
  1462. IPW_DEBUG_FW("<< :\n");
  1463. return 0;
  1464. }
  1465. static int ipw_fw_dma_kick(struct ipw_priv *priv)
  1466. {
  1467. u32 control = 0;
  1468. u32 index = 0;
  1469. IPW_DEBUG_FW(">> :\n");
  1470. for (index = 0; index < priv->sram_desc.last_cb_index; index++)
  1471. ipw_fw_dma_write_command_block(priv, index,
  1472. &priv->sram_desc.cb_list[index]);
  1473. /* Enable the DMA in the CSR register */
  1474. ipw_clear_bit(priv, CX2_RESET_REG,
  1475. CX2_RESET_REG_MASTER_DISABLED |
  1476. CX2_RESET_REG_STOP_MASTER);
  1477. /* Set the Start bit. */
  1478. control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
  1479. ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
  1480. IPW_DEBUG_FW("<< :\n");
  1481. return 0;
  1482. }
  1483. static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
  1484. {
  1485. u32 address;
  1486. u32 register_value = 0;
  1487. u32 cb_fields_address = 0;
  1488. IPW_DEBUG_FW(">> :\n");
  1489. address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
  1490. IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
  1491. /* Read the DMA Controlor register */
  1492. register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
  1493. IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
  1494. /* Print the CB values */
  1495. cb_fields_address = address;
  1496. register_value = ipw_read_reg32(priv, cb_fields_address);
  1497. IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
  1498. cb_fields_address += sizeof(u32);
  1499. register_value = ipw_read_reg32(priv, cb_fields_address);
  1500. IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
  1501. cb_fields_address += sizeof(u32);
  1502. register_value = ipw_read_reg32(priv, cb_fields_address);
  1503. IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
  1504. register_value);
  1505. cb_fields_address += sizeof(u32);
  1506. register_value = ipw_read_reg32(priv, cb_fields_address);
  1507. IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
  1508. IPW_DEBUG_FW(">> :\n");
  1509. }
  1510. static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
  1511. {
  1512. u32 current_cb_address = 0;
  1513. u32 current_cb_index = 0;
  1514. IPW_DEBUG_FW("<< :\n");
  1515. current_cb_address = ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
  1516. current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL) /
  1517. sizeof(struct command_block);
  1518. IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
  1519. current_cb_index, current_cb_address);
  1520. IPW_DEBUG_FW(">> :\n");
  1521. return current_cb_index;
  1522. }
  1523. static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
  1524. u32 src_address,
  1525. u32 dest_address,
  1526. u32 length,
  1527. int interrupt_enabled, int is_last)
  1528. {
  1529. u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
  1530. CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
  1531. CB_DEST_SIZE_LONG;
  1532. struct command_block *cb;
  1533. u32 last_cb_element = 0;
  1534. IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
  1535. src_address, dest_address, length);
  1536. if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
  1537. return -1;
  1538. last_cb_element = priv->sram_desc.last_cb_index;
  1539. cb = &priv->sram_desc.cb_list[last_cb_element];
  1540. priv->sram_desc.last_cb_index++;
  1541. /* Calculate the new CB control word */
  1542. if (interrupt_enabled)
  1543. control |= CB_INT_ENABLED;
  1544. if (is_last)
  1545. control |= CB_LAST_VALID;
  1546. control |= length;
  1547. /* Calculate the CB Element's checksum value */
  1548. cb->status = control ^ src_address ^ dest_address;
  1549. /* Copy the Source and Destination addresses */
  1550. cb->dest_addr = dest_address;
  1551. cb->source_addr = src_address;
  1552. /* Copy the Control Word last */
  1553. cb->control = control;
  1554. return 0;
  1555. }
  1556. static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
  1557. u32 src_phys, u32 dest_address, u32 length)
  1558. {
  1559. u32 bytes_left = length;
  1560. u32 src_offset = 0;
  1561. u32 dest_offset = 0;
  1562. int status = 0;
  1563. IPW_DEBUG_FW(">> \n");
  1564. IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
  1565. src_phys, dest_address, length);
  1566. while (bytes_left > CB_MAX_LENGTH) {
  1567. status = ipw_fw_dma_add_command_block(priv,
  1568. src_phys + src_offset,
  1569. dest_address +
  1570. dest_offset,
  1571. CB_MAX_LENGTH, 0, 0);
  1572. if (status) {
  1573. IPW_DEBUG_FW_INFO(": Failed\n");
  1574. return -1;
  1575. } else
  1576. IPW_DEBUG_FW_INFO(": Added new cb\n");
  1577. src_offset += CB_MAX_LENGTH;
  1578. dest_offset += CB_MAX_LENGTH;
  1579. bytes_left -= CB_MAX_LENGTH;
  1580. }
  1581. /* add the buffer tail */
  1582. if (bytes_left > 0) {
  1583. status =
  1584. ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
  1585. dest_address + dest_offset,
  1586. bytes_left, 0, 0);
  1587. if (status) {
  1588. IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
  1589. return -1;
  1590. } else
  1591. IPW_DEBUG_FW_INFO
  1592. (": Adding new cb - the buffer tail\n");
  1593. }
  1594. IPW_DEBUG_FW("<< \n");
  1595. return 0;
  1596. }
  1597. static int ipw_fw_dma_wait(struct ipw_priv *priv)
  1598. {
  1599. u32 current_index = 0;
  1600. u32 watchdog = 0;
  1601. IPW_DEBUG_FW(">> : \n");
  1602. current_index = ipw_fw_dma_command_block_index(priv);
  1603. IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
  1604. (int)priv->sram_desc.last_cb_index);
  1605. while (current_index < priv->sram_desc.last_cb_index) {
  1606. udelay(50);
  1607. current_index = ipw_fw_dma_command_block_index(priv);
  1608. watchdog++;
  1609. if (watchdog > 400) {
  1610. IPW_DEBUG_FW_INFO("Timeout\n");
  1611. ipw_fw_dma_dump_command_block(priv);
  1612. ipw_fw_dma_abort(priv);
  1613. return -1;
  1614. }
  1615. }
  1616. ipw_fw_dma_abort(priv);
  1617. /*Disable the DMA in the CSR register */
  1618. ipw_set_bit(priv, CX2_RESET_REG,
  1619. CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
  1620. IPW_DEBUG_FW("<< dmaWaitSync \n");
  1621. return 0;
  1622. }
  1623. static void ipw_remove_current_network(struct ipw_priv *priv)
  1624. {
  1625. struct list_head *element, *safe;
  1626. struct ieee80211_network *network = NULL;
  1627. list_for_each_safe(element, safe, &priv->ieee->network_list) {
  1628. network = list_entry(element, struct ieee80211_network, list);
  1629. if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
  1630. list_del(element);
  1631. list_add_tail(&network->list,
  1632. &priv->ieee->network_free_list);
  1633. }
  1634. }
  1635. }
  1636. /**
  1637. * Check that card is still alive.
  1638. * Reads debug register from domain0.
  1639. * If card is present, pre-defined value should
  1640. * be found there.
  1641. *
  1642. * @param priv
  1643. * @return 1 if card is present, 0 otherwise
  1644. */
  1645. static inline int ipw_alive(struct ipw_priv *priv)
  1646. {
  1647. return ipw_read32(priv, 0x90) == 0xd55555d5;
  1648. }
  1649. static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
  1650. int timeout)
  1651. {
  1652. int i = 0;
  1653. do {
  1654. if ((ipw_read32(priv, addr) & mask) == mask)
  1655. return i;
  1656. mdelay(10);
  1657. i += 10;
  1658. } while (i < timeout);
  1659. return -ETIME;
  1660. }
  1661. /* These functions load the firmware and micro code for the operation of
  1662. * the ipw hardware. It assumes the buffer has all the bits for the
  1663. * image and the caller is handling the memory allocation and clean up.
  1664. */
  1665. static int ipw_stop_master(struct ipw_priv *priv)
  1666. {
  1667. int rc;
  1668. IPW_DEBUG_TRACE(">> \n");
  1669. /* stop master. typical delay - 0 */
  1670. ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
  1671. rc = ipw_poll_bit(priv, CX2_RESET_REG,
  1672. CX2_RESET_REG_MASTER_DISABLED, 100);
  1673. if (rc < 0) {
  1674. IPW_ERROR("stop master failed in 10ms\n");
  1675. return -1;
  1676. }
  1677. IPW_DEBUG_INFO("stop master %dms\n", rc);
  1678. return rc;
  1679. }
  1680. static void ipw_arc_release(struct ipw_priv *priv)
  1681. {
  1682. IPW_DEBUG_TRACE(">> \n");
  1683. mdelay(5);
  1684. ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
  1685. /* no one knows timing, for safety add some delay */
  1686. mdelay(5);
  1687. }
  1688. struct fw_header {
  1689. u32 version;
  1690. u32 mode;
  1691. };
  1692. struct fw_chunk {
  1693. u32 address;
  1694. u32 length;
  1695. };
  1696. #define IPW_FW_MAJOR_VERSION 2
  1697. #define IPW_FW_MINOR_VERSION 2
  1698. #define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
  1699. #define IPW_FW_MAJOR(x) (x & 0xff)
  1700. #define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
  1701. IPW_FW_MAJOR_VERSION)
  1702. #define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
  1703. "." __stringify(IPW_FW_MINOR_VERSION) "-"
  1704. #if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
  1705. #define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
  1706. #else
  1707. #define IPW_FW_NAME(x) "ipw2200_" x ".fw"
  1708. #endif
  1709. static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
  1710. {
  1711. int rc = 0, i, addr;
  1712. u8 cr = 0;
  1713. u16 *image;
  1714. image = (u16 *) data;
  1715. IPW_DEBUG_TRACE(">> \n");
  1716. rc = ipw_stop_master(priv);
  1717. if (rc < 0)
  1718. return rc;
  1719. // spin_lock_irqsave(&priv->lock, flags);
  1720. for (addr = CX2_SHARED_LOWER_BOUND;
  1721. addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
  1722. ipw_write32(priv, addr, 0);
  1723. }
  1724. /* no ucode (yet) */
  1725. memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
  1726. /* destroy DMA queues */
  1727. /* reset sequence */
  1728. ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_ON);
  1729. ipw_arc_release(priv);
  1730. ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
  1731. mdelay(1);
  1732. /* reset PHY */
  1733. ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
  1734. mdelay(1);
  1735. ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
  1736. mdelay(1);
  1737. /* enable ucode store */
  1738. ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
  1739. ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
  1740. mdelay(1);
  1741. /* write ucode */
  1742. /**
  1743. * @bug
  1744. * Do NOT set indirect address register once and then
  1745. * store data to indirect data register in the loop.
  1746. * It seems very reasonable, but in this case DINO do not
  1747. * accept ucode. It is essential to set address each time.
  1748. */
  1749. /* load new ipw uCode */
  1750. for (i = 0; i < len / 2; i++)
  1751. ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
  1752. /* enable DINO */
  1753. ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
  1754. ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
  1755. /* this is where the igx / win driver deveates from the VAP driver. */
  1756. /* wait for alive response */
  1757. for (i = 0; i < 100; i++) {
  1758. /* poll for incoming data */
  1759. cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
  1760. if (cr & DINO_RXFIFO_DATA)
  1761. break;
  1762. mdelay(1);
  1763. }
  1764. if (cr & DINO_RXFIFO_DATA) {
  1765. /* alive_command_responce size is NOT multiple of 4 */
  1766. u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
  1767. for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
  1768. response_buffer[i] =
  1769. ipw_read_reg32(priv, CX2_BASEBAND_RX_FIFO_READ);
  1770. memcpy(&priv->dino_alive, response_buffer,
  1771. sizeof(priv->dino_alive));
  1772. if (priv->dino_alive.alive_command == 1
  1773. && priv->dino_alive.ucode_valid == 1) {
  1774. rc = 0;
  1775. IPW_DEBUG_INFO
  1776. ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
  1777. "of %02d/%02d/%02d %02d:%02d\n",
  1778. priv->dino_alive.software_revision,
  1779. priv->dino_alive.software_revision,
  1780. priv->dino_alive.device_identifier,
  1781. priv->dino_alive.device_identifier,
  1782. priv->dino_alive.time_stamp[0],
  1783. priv->dino_alive.time_stamp[1],
  1784. priv->dino_alive.time_stamp[2],
  1785. priv->dino_alive.time_stamp[3],
  1786. priv->dino_alive.time_stamp[4]);
  1787. } else {
  1788. IPW_DEBUG_INFO("Microcode is not alive\n");
  1789. rc = -EINVAL;
  1790. }
  1791. } else {
  1792. IPW_DEBUG_INFO("No alive response from DINO\n");
  1793. rc = -ETIME;
  1794. }
  1795. /* disable DINO, otherwise for some reason
  1796. firmware have problem getting alive resp. */
  1797. ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
  1798. // spin_unlock_irqrestore(&priv->lock, flags);
  1799. return rc;
  1800. }
  1801. static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
  1802. {
  1803. int rc = -1;
  1804. int offset = 0;
  1805. struct fw_chunk *chunk;
  1806. dma_addr_t shared_phys;
  1807. u8 *shared_virt;
  1808. IPW_DEBUG_TRACE("<< : \n");
  1809. shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
  1810. if (!shared_virt)
  1811. return -ENOMEM;
  1812. memmove(shared_virt, data, len);
  1813. /* Start the Dma */
  1814. rc = ipw_fw_dma_enable(priv);
  1815. if (priv->sram_desc.last_cb_index > 0) {
  1816. /* the DMA is already ready this would be a bug. */
  1817. BUG();
  1818. goto out;
  1819. }
  1820. do {
  1821. chunk = (struct fw_chunk *)(data + offset);
  1822. offset += sizeof(struct fw_chunk);
  1823. /* build DMA packet and queue up for sending */
  1824. /* dma to chunk->address, the chunk->length bytes from data +
  1825. * offeset*/
  1826. /* Dma loading */
  1827. rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
  1828. chunk->address, chunk->length);
  1829. if (rc) {
  1830. IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
  1831. goto out;
  1832. }
  1833. offset += chunk->length;
  1834. } while (offset < len);
  1835. /* Run the DMA and wait for the answer */
  1836. rc = ipw_fw_dma_kick(priv);
  1837. if (rc) {
  1838. IPW_ERROR("dmaKick Failed\n");
  1839. goto out;
  1840. }
  1841. rc = ipw_fw_dma_wait(priv);
  1842. if (rc) {
  1843. IPW_ERROR("dmaWaitSync Failed\n");
  1844. goto out;
  1845. }
  1846. out:
  1847. pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
  1848. return rc;
  1849. }
  1850. /* stop nic */
  1851. static int ipw_stop_nic(struct ipw_priv *priv)
  1852. {
  1853. int rc = 0;
  1854. /* stop */
  1855. ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
  1856. rc = ipw_poll_bit(priv, CX2_RESET_REG,
  1857. CX2_RESET_REG_MASTER_DISABLED, 500);
  1858. if (rc < 0) {
  1859. IPW_ERROR("wait for reg master disabled failed\n");
  1860. return rc;
  1861. }
  1862. ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
  1863. return rc;
  1864. }
  1865. static void ipw_start_nic(struct ipw_priv *priv)
  1866. {
  1867. IPW_DEBUG_TRACE(">>\n");
  1868. /* prvHwStartNic release ARC */
  1869. ipw_clear_bit(priv, CX2_RESET_REG,
  1870. CX2_RESET_REG_MASTER_DISABLED |
  1871. CX2_RESET_REG_STOP_MASTER |
  1872. CBD_RESET_REG_PRINCETON_RESET);
  1873. /* enable power management */
  1874. ipw_set_bit(priv, CX2_GP_CNTRL_RW,
  1875. CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
  1876. IPW_DEBUG_TRACE("<<\n");
  1877. }
  1878. static int ipw_init_nic(struct ipw_priv *priv)
  1879. {
  1880. int rc;
  1881. IPW_DEBUG_TRACE(">>\n");
  1882. /* reset */
  1883. /*prvHwInitNic */
  1884. /* set "initialization complete" bit to move adapter to D0 state */
  1885. ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
  1886. /* low-level PLL activation */
  1887. ipw_write32(priv, CX2_READ_INT_REGISTER,
  1888. CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
  1889. /* wait for clock stabilization */
  1890. rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
  1891. CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
  1892. if (rc < 0)
  1893. IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
  1894. /* assert SW reset */
  1895. ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
  1896. udelay(10);
  1897. /* set "initialization complete" bit to move adapter to D0 state */
  1898. ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
  1899. IPW_DEBUG_TRACE(">>\n");
  1900. return 0;
  1901. }
  1902. /* Call this function from process context, it will sleep in request_firmware.
  1903. * Probe is an ok place to call this from.
  1904. */
  1905. static int ipw_reset_nic(struct ipw_priv *priv)
  1906. {
  1907. int rc = 0;
  1908. IPW_DEBUG_TRACE(">>\n");
  1909. rc = ipw_init_nic(priv);
  1910. /* Clear the 'host command active' bit... */
  1911. priv->status &= ~STATUS_HCMD_ACTIVE;
  1912. wake_up_interruptible(&priv->wait_command_queue);
  1913. IPW_DEBUG_TRACE("<<\n");
  1914. return rc;
  1915. }
  1916. static int ipw_get_fw(struct ipw_priv *priv,
  1917. const struct firmware **fw, const char *name)
  1918. {
  1919. struct fw_header *header;
  1920. int rc;
  1921. /* ask firmware_class module to get the boot firmware off disk */
  1922. rc = request_firmware(fw, name, &priv->pci_dev->dev);
  1923. if (rc < 0) {
  1924. IPW_ERROR("%s load failed: Reason %d\n", name, rc);
  1925. return rc;
  1926. }
  1927. header = (struct fw_header *)(*fw)->data;
  1928. if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
  1929. IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
  1930. name,
  1931. IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
  1932. return -EINVAL;
  1933. }
  1934. IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
  1935. name,
  1936. IPW_FW_MAJOR(header->version),
  1937. IPW_FW_MINOR(header->version),
  1938. (*fw)->size - sizeof(struct fw_header));
  1939. return 0;
  1940. }
  1941. #define CX2_RX_BUF_SIZE (3000)
  1942. static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
  1943. struct ipw_rx_queue *rxq)
  1944. {
  1945. unsigned long flags;
  1946. int i;
  1947. spin_lock_irqsave(&rxq->lock, flags);
  1948. INIT_LIST_HEAD(&rxq->rx_free);
  1949. INIT_LIST_HEAD(&rxq->rx_used);
  1950. /* Fill the rx_used queue with _all_ of the Rx buffers */
  1951. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  1952. /* In the reset function, these buffers may have been allocated
  1953. * to an SKB, so we need to unmap and free potential storage */
  1954. if (rxq->pool[i].skb != NULL) {
  1955. pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
  1956. CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  1957. dev_kfree_skb(rxq->pool[i].skb);
  1958. }
  1959. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  1960. }
  1961. /* Set us so that we have processed and used all buffers, but have
  1962. * not restocked the Rx queue with fresh buffers */
  1963. rxq->read = rxq->write = 0;
  1964. rxq->processed = RX_QUEUE_SIZE - 1;
  1965. rxq->free_count = 0;
  1966. spin_unlock_irqrestore(&rxq->lock, flags);
  1967. }
  1968. #ifdef CONFIG_PM
  1969. static int fw_loaded = 0;
  1970. static const struct firmware *bootfw = NULL;
  1971. static const struct firmware *firmware = NULL;
  1972. static const struct firmware *ucode = NULL;
  1973. #endif
  1974. static int ipw_load(struct ipw_priv *priv)
  1975. {
  1976. #ifndef CONFIG_PM
  1977. const struct firmware *bootfw = NULL;
  1978. const struct firmware *firmware = NULL;
  1979. const struct firmware *ucode = NULL;
  1980. #endif
  1981. int rc = 0, retries = 3;
  1982. #ifdef CONFIG_PM
  1983. if (!fw_loaded) {
  1984. #endif
  1985. rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
  1986. if (rc)
  1987. goto error;
  1988. switch (priv->ieee->iw_mode) {
  1989. case IW_MODE_ADHOC:
  1990. rc = ipw_get_fw(priv, &ucode,
  1991. IPW_FW_NAME("ibss_ucode"));
  1992. if (rc)
  1993. goto error;
  1994. rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
  1995. break;
  1996. #ifdef CONFIG_IPW_PROMISC
  1997. case IW_MODE_MONITOR:
  1998. rc = ipw_get_fw(priv, &ucode,
  1999. IPW_FW_NAME("ibss_ucode"));
  2000. if (rc)
  2001. goto error;
  2002. rc = ipw_get_fw(priv, &firmware,
  2003. IPW_FW_NAME("sniffer"));
  2004. break;
  2005. #endif
  2006. case IW_MODE_INFRA:
  2007. rc = ipw_get_fw(priv, &ucode, IPW_FW_NAME("bss_ucode"));
  2008. if (rc)
  2009. goto error;
  2010. rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
  2011. break;
  2012. default:
  2013. rc = -EINVAL;
  2014. }
  2015. if (rc)
  2016. goto error;
  2017. #ifdef CONFIG_PM
  2018. fw_loaded = 1;
  2019. }
  2020. #endif
  2021. if (!priv->rxq)
  2022. priv->rxq = ipw_rx_queue_alloc(priv);
  2023. else
  2024. ipw_rx_queue_reset(priv, priv->rxq);
  2025. if (!priv->rxq) {
  2026. IPW_ERROR("Unable to initialize Rx queue\n");
  2027. goto error;
  2028. }
  2029. retry:
  2030. /* Ensure interrupts are disabled */
  2031. ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
  2032. priv->status &= ~STATUS_INT_ENABLED;
  2033. /* ack pending interrupts */
  2034. ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
  2035. ipw_stop_nic(priv);
  2036. rc = ipw_reset_nic(priv);
  2037. if (rc) {
  2038. IPW_ERROR("Unable to reset NIC\n");
  2039. goto error;
  2040. }
  2041. ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
  2042. CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
  2043. /* DMA the initial boot firmware into the device */
  2044. rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
  2045. bootfw->size - sizeof(struct fw_header));
  2046. if (rc < 0) {
  2047. IPW_ERROR("Unable to load boot firmware\n");
  2048. goto error;
  2049. }
  2050. /* kick start the device */
  2051. ipw_start_nic(priv);
  2052. /* wait for the device to finish it's initial startup sequence */
  2053. rc = ipw_poll_bit(priv, CX2_INTA_RW,
  2054. CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
  2055. if (rc < 0) {
  2056. IPW_ERROR("device failed to boot initial fw image\n");
  2057. goto error;
  2058. }
  2059. IPW_DEBUG_INFO("initial device response after %dms\n", rc);
  2060. /* ack fw init done interrupt */
  2061. ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
  2062. /* DMA the ucode into the device */
  2063. rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
  2064. ucode->size - sizeof(struct fw_header));
  2065. if (rc < 0) {
  2066. IPW_ERROR("Unable to load ucode\n");
  2067. goto error;
  2068. }
  2069. /* stop nic */
  2070. ipw_stop_nic(priv);
  2071. /* DMA bss firmware into the device */
  2072. rc = ipw_load_firmware(priv, firmware->data +
  2073. sizeof(struct fw_header),
  2074. firmware->size - sizeof(struct fw_header));
  2075. if (rc < 0) {
  2076. IPW_ERROR("Unable to load firmware\n");
  2077. goto error;
  2078. }
  2079. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
  2080. rc = ipw_queue_reset(priv);
  2081. if (rc) {
  2082. IPW_ERROR("Unable to initialize queues\n");
  2083. goto error;
  2084. }
  2085. /* Ensure interrupts are disabled */
  2086. ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
  2087. /* kick start the device */
  2088. ipw_start_nic(priv);
  2089. if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
  2090. if (retries > 0) {
  2091. IPW_WARNING("Parity error. Retrying init.\n");
  2092. retries--;
  2093. goto retry;
  2094. }
  2095. IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
  2096. rc = -EIO;
  2097. goto error;
  2098. }
  2099. /* wait for the device */
  2100. rc = ipw_poll_bit(priv, CX2_INTA_RW,
  2101. CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
  2102. if (rc < 0) {
  2103. IPW_ERROR("device failed to start after 500ms\n");
  2104. goto error;
  2105. }
  2106. IPW_DEBUG_INFO("device response after %dms\n", rc);
  2107. /* ack fw init done interrupt */
  2108. ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
  2109. /* read eeprom data and initialize the eeprom region of sram */
  2110. priv->eeprom_delay = 1;
  2111. ipw_eeprom_init_sram(priv);
  2112. /* enable interrupts */
  2113. ipw_enable_interrupts(priv);
  2114. /* Ensure our queue has valid packets */
  2115. ipw_rx_queue_replenish(priv);
  2116. ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
  2117. /* ack pending interrupts */
  2118. ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
  2119. #ifndef CONFIG_PM
  2120. release_firmware(bootfw);
  2121. release_firmware(ucode);
  2122. release_firmware(firmware);
  2123. #endif
  2124. return 0;
  2125. error:
  2126. if (priv->rxq) {
  2127. ipw_rx_queue_free(priv, priv->rxq);
  2128. priv->rxq = NULL;
  2129. }
  2130. ipw_tx_queue_free(priv);
  2131. if (bootfw)
  2132. release_firmware(bootfw);
  2133. if (ucode)
  2134. release_firmware(ucode);
  2135. if (firmware)
  2136. release_firmware(firmware);
  2137. #ifdef CONFIG_PM
  2138. fw_loaded = 0;
  2139. bootfw = ucode = firmware = NULL;
  2140. #endif
  2141. return rc;
  2142. }
  2143. /**
  2144. * DMA services
  2145. *
  2146. * Theory of operation
  2147. *
  2148. * A queue is a circular buffers with 'Read' and 'Write' pointers.
  2149. * 2 empty entries always kept in the buffer to protect from overflow.
  2150. *
  2151. * For Tx queue, there are low mark and high mark limits. If, after queuing
  2152. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  2153. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  2154. * Tx queue resumed.
  2155. *
  2156. * The IPW operates with six queues, one receive queue in the device's
  2157. * sram, one transmit queue for sending commands to the device firmware,
  2158. * and four transmit queues for data.
  2159. *
  2160. * The four transmit queues allow for performing quality of service (qos)
  2161. * transmissions as per the 802.11 protocol. Currently Linux does not
  2162. * provide a mechanism to the user for utilizing prioritized queues, so
  2163. * we only utilize the first data transmit queue (queue1).
  2164. */
  2165. /**
  2166. * Driver allocates buffers of this size for Rx
  2167. */
  2168. static inline int ipw_queue_space(const struct clx2_queue *q)
  2169. {
  2170. int s = q->last_used - q->first_empty;
  2171. if (s <= 0)
  2172. s += q->n_bd;
  2173. s -= 2; /* keep some reserve to not confuse empty and full situations */
  2174. if (s < 0)
  2175. s = 0;
  2176. return s;
  2177. }
  2178. static inline int ipw_queue_inc_wrap(int index, int n_bd)
  2179. {
  2180. return (++index == n_bd) ? 0 : index;
  2181. }
  2182. /**
  2183. * Initialize common DMA queue structure
  2184. *
  2185. * @param q queue to init
  2186. * @param count Number of BD's to allocate. Should be power of 2
  2187. * @param read_register Address for 'read' register
  2188. * (not offset within BAR, full address)
  2189. * @param write_register Address for 'write' register
  2190. * (not offset within BAR, full address)
  2191. * @param base_register Address for 'base' register
  2192. * (not offset within BAR, full address)
  2193. * @param size Address for 'size' register
  2194. * (not offset within BAR, full address)
  2195. */
  2196. static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
  2197. int count, u32 read, u32 write, u32 base, u32 size)
  2198. {
  2199. q->n_bd = count;
  2200. q->low_mark = q->n_bd / 4;
  2201. if (q->low_mark < 4)
  2202. q->low_mark = 4;
  2203. q->high_mark = q->n_bd / 8;
  2204. if (q->high_mark < 2)
  2205. q->high_mark = 2;
  2206. q->first_empty = q->last_used = 0;
  2207. q->reg_r = read;
  2208. q->reg_w = write;
  2209. ipw_write32(priv, base, q->dma_addr);
  2210. ipw_write32(priv, size, count);
  2211. ipw_write32(priv, read, 0);
  2212. ipw_write32(priv, write, 0);
  2213. _ipw_read32(priv, 0x90);
  2214. }
  2215. static int ipw_queue_tx_init(struct ipw_priv *priv,
  2216. struct clx2_tx_queue *q,
  2217. int count, u32 read, u32 write, u32 base, u32 size)
  2218. {
  2219. struct pci_dev *dev = priv->pci_dev;
  2220. q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
  2221. if (!q->txb) {
  2222. IPW_ERROR("vmalloc for auxilary BD structures failed\n");
  2223. return -ENOMEM;
  2224. }
  2225. q->bd =
  2226. pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
  2227. if (!q->bd) {
  2228. IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
  2229. sizeof(q->bd[0]) * count);
  2230. kfree(q->txb);
  2231. q->txb = NULL;
  2232. return -ENOMEM;
  2233. }
  2234. ipw_queue_init(priv, &q->q, count, read, write, base, size);
  2235. return 0;
  2236. }
  2237. /**
  2238. * Free one TFD, those at index [txq->q.last_used].
  2239. * Do NOT advance any indexes
  2240. *
  2241. * @param dev
  2242. * @param txq
  2243. */
  2244. static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
  2245. struct clx2_tx_queue *txq)
  2246. {
  2247. struct tfd_frame *bd = &txq->bd[txq->q.last_used];
  2248. struct pci_dev *dev = priv->pci_dev;
  2249. int i;
  2250. /* classify bd */
  2251. if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
  2252. /* nothing to cleanup after for host commands */
  2253. return;
  2254. /* sanity check */
  2255. if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
  2256. IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
  2257. /** @todo issue fatal error, it is quite serious situation */
  2258. return;
  2259. }
  2260. /* unmap chunks if any */
  2261. for (i = 0; i < bd->u.data.num_chunks; i++) {
  2262. pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
  2263. bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
  2264. if (txq->txb[txq->q.last_used]) {
  2265. ieee80211_txb_free(txq->txb[txq->q.last_used]);
  2266. txq->txb[txq->q.last_used] = NULL;
  2267. }
  2268. }
  2269. }
  2270. /**
  2271. * Deallocate DMA queue.
  2272. *
  2273. * Empty queue by removing and destroying all BD's.
  2274. * Free all buffers.
  2275. *
  2276. * @param dev
  2277. * @param q
  2278. */
  2279. static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
  2280. {
  2281. struct clx2_queue *q = &txq->q;
  2282. struct pci_dev *dev = priv->pci_dev;
  2283. if (q->n_bd == 0)
  2284. return;
  2285. /* first, empty all BD's */
  2286. for (; q->first_empty != q->last_used;
  2287. q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
  2288. ipw_queue_tx_free_tfd(priv, txq);
  2289. }
  2290. /* free buffers belonging to queue itself */
  2291. pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
  2292. q->dma_addr);
  2293. kfree(txq->txb);
  2294. /* 0 fill whole structure */
  2295. memset(txq, 0, sizeof(*txq));
  2296. }
  2297. /**
  2298. * Destroy all DMA queues and structures
  2299. *
  2300. * @param priv
  2301. */
  2302. static void ipw_tx_queue_free(struct ipw_priv *priv)
  2303. {
  2304. /* Tx CMD queue */
  2305. ipw_queue_tx_free(priv, &priv->txq_cmd);
  2306. /* Tx queues */
  2307. ipw_queue_tx_free(priv, &priv->txq[0]);
  2308. ipw_queue_tx_free(priv, &priv->txq[1]);
  2309. ipw_queue_tx_free(priv, &priv->txq[2]);
  2310. ipw_queue_tx_free(priv, &priv->txq[3]);
  2311. }
  2312. static void inline __maybe_wake_tx(struct ipw_priv *priv)
  2313. {
  2314. if (netif_running(priv->net_dev)) {
  2315. switch (priv->port_type) {
  2316. case DCR_TYPE_MU_BSS:
  2317. case DCR_TYPE_MU_IBSS:
  2318. if (!(priv->status & STATUS_ASSOCIATED)) {
  2319. return;
  2320. }
  2321. }
  2322. netif_wake_queue(priv->net_dev);
  2323. }
  2324. }
  2325. static inline void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
  2326. {
  2327. /* First 3 bytes are manufacturer */
  2328. bssid[0] = priv->mac_addr[0];
  2329. bssid[1] = priv->mac_addr[1];
  2330. bssid[2] = priv->mac_addr[2];
  2331. /* Last bytes are random */
  2332. get_random_bytes(&bssid[3], ETH_ALEN - 3);
  2333. bssid[0] &= 0xfe; /* clear multicast bit */
  2334. bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
  2335. }
  2336. static inline u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
  2337. {
  2338. struct ipw_station_entry entry;
  2339. int i;
  2340. for (i = 0; i < priv->num_stations; i++) {
  2341. if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
  2342. /* Another node is active in network */
  2343. priv->missed_adhoc_beacons = 0;
  2344. if (!(priv->config & CFG_STATIC_CHANNEL))
  2345. /* when other nodes drop out, we drop out */
  2346. priv->config &= ~CFG_ADHOC_PERSIST;
  2347. return i;
  2348. }
  2349. }
  2350. if (i == MAX_STATIONS)
  2351. return IPW_INVALID_STATION;
  2352. IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
  2353. entry.reserved = 0;
  2354. entry.support_mode = 0;
  2355. memcpy(entry.mac_addr, bssid, ETH_ALEN);
  2356. memcpy(priv->stations[i], bssid, ETH_ALEN);
  2357. ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
  2358. &entry, sizeof(entry));
  2359. priv->num_stations++;
  2360. return i;
  2361. }
  2362. static inline u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
  2363. {
  2364. int i;
  2365. for (i = 0; i < priv->num_stations; i++)
  2366. if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
  2367. return i;
  2368. return IPW_INVALID_STATION;
  2369. }
  2370. static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
  2371. {
  2372. int err;
  2373. if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
  2374. IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
  2375. return;
  2376. }
  2377. IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
  2378. "on channel %d.\n",
  2379. MAC_ARG(priv->assoc_request.bssid),
  2380. priv->assoc_request.channel);
  2381. priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
  2382. priv->status |= STATUS_DISASSOCIATING;
  2383. if (quiet)
  2384. priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
  2385. else
  2386. priv->assoc_request.assoc_type = HC_DISASSOCIATE;
  2387. err = ipw_send_associate(priv, &priv->assoc_request);
  2388. if (err) {
  2389. IPW_DEBUG_HC("Attempt to send [dis]associate command "
  2390. "failed.\n");
  2391. return;
  2392. }
  2393. }
  2394. static void ipw_disassociate(void *data)
  2395. {
  2396. ipw_send_disassociate(data, 0);
  2397. }
  2398. static void notify_wx_assoc_event(struct ipw_priv *priv)
  2399. {
  2400. union iwreq_data wrqu;
  2401. wrqu.ap_addr.sa_family = ARPHRD_ETHER;
  2402. if (priv->status & STATUS_ASSOCIATED)
  2403. memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
  2404. else
  2405. memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
  2406. wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
  2407. }
  2408. struct ipw_status_code {
  2409. u16 status;
  2410. const char *reason;
  2411. };
  2412. static const struct ipw_status_code ipw_status_codes[] = {
  2413. {0x00, "Successful"},
  2414. {0x01, "Unspecified failure"},
  2415. {0x0A, "Cannot support all requested capabilities in the "
  2416. "Capability information field"},
  2417. {0x0B, "Reassociation denied due to inability to confirm that "
  2418. "association exists"},
  2419. {0x0C, "Association denied due to reason outside the scope of this "
  2420. "standard"},
  2421. {0x0D,
  2422. "Responding station does not support the specified authentication "
  2423. "algorithm"},
  2424. {0x0E,
  2425. "Received an Authentication frame with authentication sequence "
  2426. "transaction sequence number out of expected sequence"},
  2427. {0x0F, "Authentication rejected because of challenge failure"},
  2428. {0x10, "Authentication rejected due to timeout waiting for next "
  2429. "frame in sequence"},
  2430. {0x11, "Association denied because AP is unable to handle additional "
  2431. "associated stations"},
  2432. {0x12,
  2433. "Association denied due to requesting station not supporting all "
  2434. "of the datarates in the BSSBasicServiceSet Parameter"},
  2435. {0x13,
  2436. "Association denied due to requesting station not supporting "
  2437. "short preamble operation"},
  2438. {0x14,
  2439. "Association denied due to requesting station not supporting "
  2440. "PBCC encoding"},
  2441. {0x15,
  2442. "Association denied due to requesting station not supporting "
  2443. "channel agility"},
  2444. {0x19,
  2445. "Association denied due to requesting station not supporting "
  2446. "short slot operation"},
  2447. {0x1A,
  2448. "Association denied due to requesting station not supporting "
  2449. "DSSS-OFDM operation"},
  2450. {0x28, "Invalid Information Element"},
  2451. {0x29, "Group Cipher is not valid"},
  2452. {0x2A, "Pairwise Cipher is not valid"},
  2453. {0x2B, "AKMP is not valid"},
  2454. {0x2C, "Unsupported RSN IE version"},
  2455. {0x2D, "Invalid RSN IE Capabilities"},
  2456. {0x2E, "Cipher suite is rejected per security policy"},
  2457. };
  2458. #ifdef CONFIG_IPW_DEBUG
  2459. static const char *ipw_get_status_code(u16 status)
  2460. {
  2461. int i;
  2462. for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
  2463. if (ipw_status_codes[i].status == status)
  2464. return ipw_status_codes[i].reason;
  2465. return "Unknown status value.";
  2466. }
  2467. #endif
  2468. static void inline average_init(struct average *avg)
  2469. {
  2470. memset(avg, 0, sizeof(*avg));
  2471. }
  2472. static void inline average_add(struct average *avg, s16 val)
  2473. {
  2474. avg->sum -= avg->entries[avg->pos];
  2475. avg->sum += val;
  2476. avg->entries[avg->pos++] = val;
  2477. if (unlikely(avg->pos == AVG_ENTRIES)) {
  2478. avg->init = 1;
  2479. avg->pos = 0;
  2480. }
  2481. }
  2482. static s16 inline average_value(struct average *avg)
  2483. {
  2484. if (!unlikely(avg->init)) {
  2485. if (avg->pos)
  2486. return avg->sum / avg->pos;
  2487. return 0;
  2488. }
  2489. return avg->sum / AVG_ENTRIES;
  2490. }
  2491. static void ipw_reset_stats(struct ipw_priv *priv)
  2492. {
  2493. u32 len = sizeof(u32);
  2494. priv->quality = 0;
  2495. average_init(&priv->average_missed_beacons);
  2496. average_init(&priv->average_rssi);
  2497. average_init(&priv->average_noise);
  2498. priv->last_rate = 0;
  2499. priv->last_missed_beacons = 0;
  2500. priv->last_rx_packets = 0;
  2501. priv->last_tx_packets = 0;
  2502. priv->last_tx_failures = 0;
  2503. /* Firmware managed, reset only when NIC is restarted, so we have to
  2504. * normalize on the current value */
  2505. ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
  2506. &priv->last_rx_err, &len);
  2507. ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
  2508. &priv->last_tx_failures, &len);
  2509. /* Driver managed, reset with each association */
  2510. priv->missed_adhoc_beacons = 0;
  2511. priv->missed_beacons = 0;
  2512. priv->tx_packets = 0;
  2513. priv->rx_packets = 0;
  2514. }
  2515. static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
  2516. {
  2517. u32 i = 0x80000000;
  2518. u32 mask = priv->rates_mask;
  2519. /* If currently associated in B mode, restrict the maximum
  2520. * rate match to B rates */
  2521. if (priv->assoc_request.ieee_mode == IPW_B_MODE)
  2522. mask &= IEEE80211_CCK_RATES_MASK;
  2523. /* TODO: Verify that the rate is supported by the current rates
  2524. * list. */
  2525. while (i && !(mask & i))
  2526. i >>= 1;
  2527. switch (i) {
  2528. case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
  2529. case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
  2530. case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
  2531. case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
  2532. case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
  2533. case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
  2534. case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
  2535. case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
  2536. case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
  2537. case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
  2538. case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
  2539. case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
  2540. }
  2541. if (priv->ieee->mode == IEEE_B)
  2542. return 11000000;
  2543. else
  2544. return 54000000;
  2545. }
  2546. static u32 ipw_get_current_rate(struct ipw_priv *priv)
  2547. {
  2548. u32 rate, len = sizeof(rate);
  2549. int err;
  2550. if (!(priv->status & STATUS_ASSOCIATED))
  2551. return 0;
  2552. if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
  2553. err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
  2554. &len);
  2555. if (err) {
  2556. IPW_DEBUG_INFO("failed querying ordinals.\n");
  2557. return 0;
  2558. }
  2559. } else
  2560. return ipw_get_max_rate(priv);
  2561. switch (rate) {
  2562. case IPW_TX_RATE_1MB: return 1000000;
  2563. case IPW_TX_RATE_2MB: return 2000000;
  2564. case IPW_TX_RATE_5MB: return 5500000;
  2565. case IPW_TX_RATE_6MB: return 6000000;
  2566. case IPW_TX_RATE_9MB: return 9000000;
  2567. case IPW_TX_RATE_11MB: return 11000000;
  2568. case IPW_TX_RATE_12MB: return 12000000;
  2569. case IPW_TX_RATE_18MB: return 18000000;
  2570. case IPW_TX_RATE_24MB: return 24000000;
  2571. case IPW_TX_RATE_36MB: return 36000000;
  2572. case IPW_TX_RATE_48MB: return 48000000;
  2573. case IPW_TX_RATE_54MB: return 54000000;
  2574. }
  2575. return 0;
  2576. }
  2577. #define PERFECT_RSSI (-50)
  2578. #define WORST_RSSI (-85)
  2579. #define IPW_STATS_INTERVAL (2 * HZ)
  2580. static void ipw_gather_stats(struct ipw_priv *priv)
  2581. {
  2582. u32 rx_err, rx_err_delta, rx_packets_delta;
  2583. u32 tx_failures, tx_failures_delta, tx_packets_delta;
  2584. u32 missed_beacons_percent, missed_beacons_delta;
  2585. u32 quality = 0;
  2586. u32 len = sizeof(u32);
  2587. s16 rssi;
  2588. u32 beacon_quality, signal_quality, tx_quality, rx_quality,
  2589. rate_quality;
  2590. if (!(priv->status & STATUS_ASSOCIATED)) {
  2591. priv->quality = 0;
  2592. return;
  2593. }
  2594. /* Update the statistics */
  2595. ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
  2596. &priv->missed_beacons, &len);
  2597. missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
  2598. priv->last_missed_beacons = priv->missed_beacons;
  2599. if (priv->assoc_request.beacon_interval) {
  2600. missed_beacons_percent = missed_beacons_delta *
  2601. (HZ * priv->assoc_request.beacon_interval) /
  2602. (IPW_STATS_INTERVAL * 10);
  2603. } else {
  2604. missed_beacons_percent = 0;
  2605. }
  2606. average_add(&priv->average_missed_beacons, missed_beacons_percent);
  2607. ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
  2608. rx_err_delta = rx_err - priv->last_rx_err;
  2609. priv->last_rx_err = rx_err;
  2610. ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
  2611. tx_failures_delta = tx_failures - priv->last_tx_failures;
  2612. priv->last_tx_failures = tx_failures;
  2613. rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
  2614. priv->last_rx_packets = priv->rx_packets;
  2615. tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
  2616. priv->last_tx_packets = priv->tx_packets;
  2617. /* Calculate quality based on the following:
  2618. *
  2619. * Missed beacon: 100% = 0, 0% = 70% missed
  2620. * Rate: 60% = 1Mbs, 100% = Max
  2621. * Rx and Tx errors represent a straight % of total Rx/Tx
  2622. * RSSI: 100% = > -50, 0% = < -80
  2623. * Rx errors: 100% = 0, 0% = 50% missed
  2624. *
  2625. * The lowest computed quality is used.
  2626. *
  2627. */
  2628. #define BEACON_THRESHOLD 5
  2629. beacon_quality = 100 - missed_beacons_percent;
  2630. if (beacon_quality < BEACON_THRESHOLD)
  2631. beacon_quality = 0;
  2632. else
  2633. beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
  2634. (100 - BEACON_THRESHOLD);
  2635. IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
  2636. beacon_quality, missed_beacons_percent);
  2637. priv->last_rate = ipw_get_current_rate(priv);
  2638. rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
  2639. IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
  2640. rate_quality, priv->last_rate / 1000000);
  2641. if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
  2642. rx_quality = 100 - (rx_err_delta * 100) /
  2643. (rx_packets_delta + rx_err_delta);
  2644. else
  2645. rx_quality = 100;
  2646. IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
  2647. rx_quality, rx_err_delta, rx_packets_delta);
  2648. if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
  2649. tx_quality = 100 - (tx_failures_delta * 100) /
  2650. (tx_packets_delta + tx_failures_delta);
  2651. else
  2652. tx_quality = 100;
  2653. IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
  2654. tx_quality, tx_failures_delta, tx_packets_delta);
  2655. rssi = average_value(&priv->average_rssi);
  2656. if (rssi > PERFECT_RSSI)
  2657. signal_quality = 100;
  2658. else if (rssi < WORST_RSSI)
  2659. signal_quality = 0;
  2660. else
  2661. signal_quality = (rssi - WORST_RSSI) * 100 /
  2662. (PERFECT_RSSI - WORST_RSSI);
  2663. IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
  2664. signal_quality, rssi);
  2665. quality = min(beacon_quality,
  2666. min(rate_quality,
  2667. min(tx_quality, min(rx_quality, signal_quality))));
  2668. if (quality == beacon_quality)
  2669. IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
  2670. quality);
  2671. if (quality == rate_quality)
  2672. IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
  2673. quality);
  2674. if (quality == tx_quality)
  2675. IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
  2676. quality);
  2677. if (quality == rx_quality)
  2678. IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
  2679. quality);
  2680. if (quality == signal_quality)
  2681. IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
  2682. quality);
  2683. priv->quality = quality;
  2684. queue_delayed_work(priv->workqueue, &priv->gather_stats,
  2685. IPW_STATS_INTERVAL);
  2686. }
  2687. /**
  2688. * Handle host notification packet.
  2689. * Called from interrupt routine
  2690. */
  2691. static inline void ipw_rx_notification(struct ipw_priv *priv,
  2692. struct ipw_rx_notification *notif)
  2693. {
  2694. IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, notif->size);
  2695. switch (notif->subtype) {
  2696. case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
  2697. struct notif_association *assoc = &notif->u.assoc;
  2698. switch (assoc->state) {
  2699. case CMAS_ASSOCIATED:{
  2700. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2701. IPW_DL_ASSOC,
  2702. "associated: '%s' " MAC_FMT
  2703. " \n",
  2704. escape_essid(priv->essid,
  2705. priv->essid_len),
  2706. MAC_ARG(priv->bssid));
  2707. switch (priv->ieee->iw_mode) {
  2708. case IW_MODE_INFRA:
  2709. memcpy(priv->ieee->bssid,
  2710. priv->bssid, ETH_ALEN);
  2711. break;
  2712. case IW_MODE_ADHOC:
  2713. memcpy(priv->ieee->bssid,
  2714. priv->bssid, ETH_ALEN);
  2715. /* clear out the station table */
  2716. priv->num_stations = 0;
  2717. IPW_DEBUG_ASSOC
  2718. ("queueing adhoc check\n");
  2719. queue_delayed_work(priv->
  2720. workqueue,
  2721. &priv->
  2722. adhoc_check,
  2723. priv->
  2724. assoc_request.
  2725. beacon_interval);
  2726. break;
  2727. }
  2728. priv->status &= ~STATUS_ASSOCIATING;
  2729. priv->status |= STATUS_ASSOCIATED;
  2730. netif_carrier_on(priv->net_dev);
  2731. if (netif_queue_stopped(priv->net_dev)) {
  2732. IPW_DEBUG_NOTIF
  2733. ("waking queue\n");
  2734. netif_wake_queue(priv->net_dev);
  2735. } else {
  2736. IPW_DEBUG_NOTIF
  2737. ("starting queue\n");
  2738. netif_start_queue(priv->
  2739. net_dev);
  2740. }
  2741. ipw_reset_stats(priv);
  2742. /* Ensure the rate is updated immediately */
  2743. priv->last_rate =
  2744. ipw_get_current_rate(priv);
  2745. schedule_work(&priv->gather_stats);
  2746. notify_wx_assoc_event(priv);
  2747. /* queue_delayed_work(priv->workqueue,
  2748. &priv->request_scan,
  2749. SCAN_ASSOCIATED_INTERVAL);
  2750. */
  2751. break;
  2752. }
  2753. case CMAS_AUTHENTICATED:{
  2754. if (priv->
  2755. status & (STATUS_ASSOCIATED |
  2756. STATUS_AUTH)) {
  2757. #ifdef CONFIG_IPW_DEBUG
  2758. struct notif_authenticate *auth
  2759. = &notif->u.auth;
  2760. IPW_DEBUG(IPW_DL_NOTIF |
  2761. IPW_DL_STATE |
  2762. IPW_DL_ASSOC,
  2763. "deauthenticated: '%s' "
  2764. MAC_FMT
  2765. ": (0x%04X) - %s \n",
  2766. escape_essid(priv->
  2767. essid,
  2768. priv->
  2769. essid_len),
  2770. MAC_ARG(priv->bssid),
  2771. ntohs(auth->status),
  2772. ipw_get_status_code
  2773. (ntohs
  2774. (auth->status)));
  2775. #endif
  2776. priv->status &=
  2777. ~(STATUS_ASSOCIATING |
  2778. STATUS_AUTH |
  2779. STATUS_ASSOCIATED);
  2780. netif_carrier_off(priv->
  2781. net_dev);
  2782. netif_stop_queue(priv->net_dev);
  2783. queue_work(priv->workqueue,
  2784. &priv->request_scan);
  2785. notify_wx_assoc_event(priv);
  2786. break;
  2787. }
  2788. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2789. IPW_DL_ASSOC,
  2790. "authenticated: '%s' " MAC_FMT
  2791. "\n",
  2792. escape_essid(priv->essid,
  2793. priv->essid_len),
  2794. MAC_ARG(priv->bssid));
  2795. break;
  2796. }
  2797. case CMAS_INIT:{
  2798. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2799. IPW_DL_ASSOC,
  2800. "disassociated: '%s' " MAC_FMT
  2801. " \n",
  2802. escape_essid(priv->essid,
  2803. priv->essid_len),
  2804. MAC_ARG(priv->bssid));
  2805. priv->status &=
  2806. ~(STATUS_DISASSOCIATING |
  2807. STATUS_ASSOCIATING |
  2808. STATUS_ASSOCIATED | STATUS_AUTH);
  2809. netif_stop_queue(priv->net_dev);
  2810. if (!(priv->status & STATUS_ROAMING)) {
  2811. netif_carrier_off(priv->
  2812. net_dev);
  2813. notify_wx_assoc_event(priv);
  2814. /* Cancel any queued work ... */
  2815. cancel_delayed_work(&priv->
  2816. request_scan);
  2817. cancel_delayed_work(&priv->
  2818. adhoc_check);
  2819. /* Queue up another scan... */
  2820. queue_work(priv->workqueue,
  2821. &priv->request_scan);
  2822. cancel_delayed_work(&priv->
  2823. gather_stats);
  2824. } else {
  2825. priv->status |= STATUS_ROAMING;
  2826. queue_work(priv->workqueue,
  2827. &priv->request_scan);
  2828. }
  2829. ipw_reset_stats(priv);
  2830. break;
  2831. }
  2832. default:
  2833. IPW_ERROR("assoc: unknown (%d)\n",
  2834. assoc->state);
  2835. break;
  2836. }
  2837. break;
  2838. }
  2839. case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
  2840. struct notif_authenticate *auth = &notif->u.auth;
  2841. switch (auth->state) {
  2842. case CMAS_AUTHENTICATED:
  2843. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  2844. "authenticated: '%s' " MAC_FMT " \n",
  2845. escape_essid(priv->essid,
  2846. priv->essid_len),
  2847. MAC_ARG(priv->bssid));
  2848. priv->status |= STATUS_AUTH;
  2849. break;
  2850. case CMAS_INIT:
  2851. if (priv->status & STATUS_AUTH) {
  2852. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2853. IPW_DL_ASSOC,
  2854. "authentication failed (0x%04X): %s\n",
  2855. ntohs(auth->status),
  2856. ipw_get_status_code(ntohs
  2857. (auth->
  2858. status)));
  2859. }
  2860. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2861. IPW_DL_ASSOC,
  2862. "deauthenticated: '%s' " MAC_FMT "\n",
  2863. escape_essid(priv->essid,
  2864. priv->essid_len),
  2865. MAC_ARG(priv->bssid));
  2866. priv->status &= ~(STATUS_ASSOCIATING |
  2867. STATUS_AUTH |
  2868. STATUS_ASSOCIATED);
  2869. netif_carrier_off(priv->net_dev);
  2870. netif_stop_queue(priv->net_dev);
  2871. queue_work(priv->workqueue,
  2872. &priv->request_scan);
  2873. notify_wx_assoc_event(priv);
  2874. break;
  2875. case CMAS_TX_AUTH_SEQ_1:
  2876. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2877. IPW_DL_ASSOC, "AUTH_SEQ_1\n");
  2878. break;
  2879. case CMAS_RX_AUTH_SEQ_2:
  2880. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2881. IPW_DL_ASSOC, "AUTH_SEQ_2\n");
  2882. break;
  2883. case CMAS_AUTH_SEQ_1_PASS:
  2884. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2885. IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
  2886. break;
  2887. case CMAS_AUTH_SEQ_1_FAIL:
  2888. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2889. IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
  2890. break;
  2891. case CMAS_TX_AUTH_SEQ_3:
  2892. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2893. IPW_DL_ASSOC, "AUTH_SEQ_3\n");
  2894. break;
  2895. case CMAS_RX_AUTH_SEQ_4:
  2896. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2897. IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
  2898. break;
  2899. case CMAS_AUTH_SEQ_2_PASS:
  2900. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2901. IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
  2902. break;
  2903. case CMAS_AUTH_SEQ_2_FAIL:
  2904. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2905. IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
  2906. break;
  2907. case CMAS_TX_ASSOC:
  2908. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2909. IPW_DL_ASSOC, "TX_ASSOC\n");
  2910. break;
  2911. case CMAS_RX_ASSOC_RESP:
  2912. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2913. IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
  2914. break;
  2915. case CMAS_ASSOCIATED:
  2916. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  2917. IPW_DL_ASSOC, "ASSOCIATED\n");
  2918. break;
  2919. default:
  2920. IPW_DEBUG_NOTIF("auth: failure - %d\n",
  2921. auth->state);
  2922. break;
  2923. }
  2924. break;
  2925. }
  2926. case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
  2927. struct notif_channel_result *x =
  2928. &notif->u.channel_result;
  2929. if (notif->size == sizeof(*x)) {
  2930. IPW_DEBUG_SCAN("Scan result for channel %d\n",
  2931. x->channel_num);
  2932. } else {
  2933. IPW_DEBUG_SCAN("Scan result of wrong size %d "
  2934. "(should be %zd)\n",
  2935. notif->size, sizeof(*x));
  2936. }
  2937. break;
  2938. }
  2939. case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
  2940. struct notif_scan_complete *x = &notif->u.scan_complete;
  2941. if (notif->size == sizeof(*x)) {
  2942. IPW_DEBUG_SCAN
  2943. ("Scan completed: type %d, %d channels, "
  2944. "%d status\n", x->scan_type,
  2945. x->num_channels, x->status);
  2946. } else {
  2947. IPW_ERROR("Scan completed of wrong size %d "
  2948. "(should be %zd)\n",
  2949. notif->size, sizeof(*x));
  2950. }
  2951. priv->status &=
  2952. ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
  2953. cancel_delayed_work(&priv->scan_check);
  2954. if (!(priv->status & (STATUS_ASSOCIATED |
  2955. STATUS_ASSOCIATING |
  2956. STATUS_ROAMING |
  2957. STATUS_DISASSOCIATING)))
  2958. queue_work(priv->workqueue, &priv->associate);
  2959. else if (priv->status & STATUS_ROAMING) {
  2960. /* If a scan completed and we are in roam mode, then
  2961. * the scan that completed was the one requested as a
  2962. * result of entering roam... so, schedule the
  2963. * roam work */
  2964. queue_work(priv->workqueue, &priv->roam);
  2965. } else if (priv->status & STATUS_SCAN_PENDING)
  2966. queue_work(priv->workqueue,
  2967. &priv->request_scan);
  2968. priv->ieee->scans++;
  2969. break;
  2970. }
  2971. case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
  2972. struct notif_frag_length *x = &notif->u.frag_len;
  2973. if (notif->size == sizeof(*x)) {
  2974. IPW_ERROR("Frag length: %d\n", x->frag_length);
  2975. } else {
  2976. IPW_ERROR("Frag length of wrong size %d "
  2977. "(should be %zd)\n",
  2978. notif->size, sizeof(*x));
  2979. }
  2980. break;
  2981. }
  2982. case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
  2983. struct notif_link_deterioration *x =
  2984. &notif->u.link_deterioration;
  2985. if (notif->size == sizeof(*x)) {
  2986. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  2987. "link deterioration: '%s' " MAC_FMT
  2988. " \n", escape_essid(priv->essid,
  2989. priv->essid_len),
  2990. MAC_ARG(priv->bssid));
  2991. memcpy(&priv->last_link_deterioration, x,
  2992. sizeof(*x));
  2993. } else {
  2994. IPW_ERROR("Link Deterioration of wrong size %d "
  2995. "(should be %zd)\n",
  2996. notif->size, sizeof(*x));
  2997. }
  2998. break;
  2999. }
  3000. case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
  3001. IPW_ERROR("Dino config\n");
  3002. if (priv->hcmd
  3003. && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
  3004. /* TODO: Do anything special? */
  3005. } else {
  3006. IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
  3007. }
  3008. break;
  3009. }
  3010. case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
  3011. struct notif_beacon_state *x = &notif->u.beacon_state;
  3012. if (notif->size != sizeof(*x)) {
  3013. IPW_ERROR
  3014. ("Beacon state of wrong size %d (should "
  3015. "be %zd)\n", notif->size, sizeof(*x));
  3016. break;
  3017. }
  3018. if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
  3019. if (priv->status & STATUS_SCANNING) {
  3020. /* Stop scan to keep fw from getting
  3021. * stuck... */
  3022. queue_work(priv->workqueue,
  3023. &priv->abort_scan);
  3024. }
  3025. if (x->number > priv->missed_beacon_threshold &&
  3026. priv->status & STATUS_ASSOCIATED) {
  3027. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  3028. IPW_DL_STATE,
  3029. "Missed beacon: %d - disassociate\n",
  3030. x->number);
  3031. queue_work(priv->workqueue,
  3032. &priv->disassociate);
  3033. } else if (x->number > priv->roaming_threshold) {
  3034. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3035. "Missed beacon: %d - initiate "
  3036. "roaming\n", x->number);
  3037. queue_work(priv->workqueue,
  3038. &priv->roam);
  3039. } else {
  3040. IPW_DEBUG_NOTIF("Missed beacon: %d\n",
  3041. x->number);
  3042. }
  3043. priv->notif_missed_beacons = x->number;
  3044. }
  3045. break;
  3046. }
  3047. case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
  3048. struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
  3049. if (notif->size == sizeof(*x)) {
  3050. IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
  3051. "0x%02x station %d\n",
  3052. x->key_state, x->security_type,
  3053. x->station_index);
  3054. break;
  3055. }
  3056. IPW_ERROR
  3057. ("TGi Tx Key of wrong size %d (should be %zd)\n",
  3058. notif->size, sizeof(*x));
  3059. break;
  3060. }
  3061. case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
  3062. struct notif_calibration *x = &notif->u.calibration;
  3063. if (notif->size == sizeof(*x)) {
  3064. memcpy(&priv->calib, x, sizeof(*x));
  3065. IPW_DEBUG_INFO("TODO: Calibration\n");
  3066. break;
  3067. }
  3068. IPW_ERROR
  3069. ("Calibration of wrong size %d (should be %zd)\n",
  3070. notif->size, sizeof(*x));
  3071. break;
  3072. }
  3073. case HOST_NOTIFICATION_NOISE_STATS:{
  3074. if (notif->size == sizeof(u32)) {
  3075. priv->last_noise =
  3076. (u8) (notif->u.noise.value & 0xff);
  3077. average_add(&priv->average_noise,
  3078. priv->last_noise);
  3079. break;
  3080. }
  3081. IPW_ERROR
  3082. ("Noise stat is wrong size %d (should be %zd)\n",
  3083. notif->size, sizeof(u32));
  3084. break;
  3085. }
  3086. default:
  3087. IPW_ERROR("Unknown notification: "
  3088. "subtype=%d,flags=0x%2x,size=%d\n",
  3089. notif->subtype, notif->flags, notif->size);
  3090. }
  3091. }
  3092. /**
  3093. * Destroys all DMA structures and initialise them again
  3094. *
  3095. * @param priv
  3096. * @return error code
  3097. */
  3098. static int ipw_queue_reset(struct ipw_priv *priv)
  3099. {
  3100. int rc = 0;
  3101. /** @todo customize queue sizes */
  3102. int nTx = 64, nTxCmd = 8;
  3103. ipw_tx_queue_free(priv);
  3104. /* Tx CMD queue */
  3105. rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
  3106. CX2_TX_CMD_QUEUE_READ_INDEX,
  3107. CX2_TX_CMD_QUEUE_WRITE_INDEX,
  3108. CX2_TX_CMD_QUEUE_BD_BASE,
  3109. CX2_TX_CMD_QUEUE_BD_SIZE);
  3110. if (rc) {
  3111. IPW_ERROR("Tx Cmd queue init failed\n");
  3112. goto error;
  3113. }
  3114. /* Tx queue(s) */
  3115. rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
  3116. CX2_TX_QUEUE_0_READ_INDEX,
  3117. CX2_TX_QUEUE_0_WRITE_INDEX,
  3118. CX2_TX_QUEUE_0_BD_BASE, CX2_TX_QUEUE_0_BD_SIZE);
  3119. if (rc) {
  3120. IPW_ERROR("Tx 0 queue init failed\n");
  3121. goto error;
  3122. }
  3123. rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
  3124. CX2_TX_QUEUE_1_READ_INDEX,
  3125. CX2_TX_QUEUE_1_WRITE_INDEX,
  3126. CX2_TX_QUEUE_1_BD_BASE, CX2_TX_QUEUE_1_BD_SIZE);
  3127. if (rc) {
  3128. IPW_ERROR("Tx 1 queue init failed\n");
  3129. goto error;
  3130. }
  3131. rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
  3132. CX2_TX_QUEUE_2_READ_INDEX,
  3133. CX2_TX_QUEUE_2_WRITE_INDEX,
  3134. CX2_TX_QUEUE_2_BD_BASE, CX2_TX_QUEUE_2_BD_SIZE);
  3135. if (rc) {
  3136. IPW_ERROR("Tx 2 queue init failed\n");
  3137. goto error;
  3138. }
  3139. rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
  3140. CX2_TX_QUEUE_3_READ_INDEX,
  3141. CX2_TX_QUEUE_3_WRITE_INDEX,
  3142. CX2_TX_QUEUE_3_BD_BASE, CX2_TX_QUEUE_3_BD_SIZE);
  3143. if (rc) {
  3144. IPW_ERROR("Tx 3 queue init failed\n");
  3145. goto error;
  3146. }
  3147. /* statistics */
  3148. priv->rx_bufs_min = 0;
  3149. priv->rx_pend_max = 0;
  3150. return rc;
  3151. error:
  3152. ipw_tx_queue_free(priv);
  3153. return rc;
  3154. }
  3155. /**
  3156. * Reclaim Tx queue entries no more used by NIC.
  3157. *
  3158. * When FW adwances 'R' index, all entries between old and
  3159. * new 'R' index need to be reclaimed. As result, some free space
  3160. * forms. If there is enough free space (> low mark), wake Tx queue.
  3161. *
  3162. * @note Need to protect against garbage in 'R' index
  3163. * @param priv
  3164. * @param txq
  3165. * @param qindex
  3166. * @return Number of used entries remains in the queue
  3167. */
  3168. static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
  3169. struct clx2_tx_queue *txq, int qindex)
  3170. {
  3171. u32 hw_tail;
  3172. int used;
  3173. struct clx2_queue *q = &txq->q;
  3174. hw_tail = ipw_read32(priv, q->reg_r);
  3175. if (hw_tail >= q->n_bd) {
  3176. IPW_ERROR
  3177. ("Read index for DMA queue (%d) is out of range [0-%d)\n",
  3178. hw_tail, q->n_bd);
  3179. goto done;
  3180. }
  3181. for (; q->last_used != hw_tail;
  3182. q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
  3183. ipw_queue_tx_free_tfd(priv, txq);
  3184. priv->tx_packets++;
  3185. }
  3186. done:
  3187. if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
  3188. __maybe_wake_tx(priv);
  3189. }
  3190. used = q->first_empty - q->last_used;
  3191. if (used < 0)
  3192. used += q->n_bd;
  3193. return used;
  3194. }
  3195. static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
  3196. int len, int sync)
  3197. {
  3198. struct clx2_tx_queue *txq = &priv->txq_cmd;
  3199. struct clx2_queue *q = &txq->q;
  3200. struct tfd_frame *tfd;
  3201. if (ipw_queue_space(q) < (sync ? 1 : 2)) {
  3202. IPW_ERROR("No space for Tx\n");
  3203. return -EBUSY;
  3204. }
  3205. tfd = &txq->bd[q->first_empty];
  3206. txq->txb[q->first_empty] = NULL;
  3207. memset(tfd, 0, sizeof(*tfd));
  3208. tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
  3209. tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
  3210. priv->hcmd_seq++;
  3211. tfd->u.cmd.index = hcmd;
  3212. tfd->u.cmd.length = len;
  3213. memcpy(tfd->u.cmd.payload, buf, len);
  3214. q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
  3215. ipw_write32(priv, q->reg_w, q->first_empty);
  3216. _ipw_read32(priv, 0x90);
  3217. return 0;
  3218. }
  3219. /*
  3220. * Rx theory of operation
  3221. *
  3222. * The host allocates 32 DMA target addresses and passes the host address
  3223. * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
  3224. * 0 to 31
  3225. *
  3226. * Rx Queue Indexes
  3227. * The host/firmware share two index registers for managing the Rx buffers.
  3228. *
  3229. * The READ index maps to the first position that the firmware may be writing
  3230. * to -- the driver can read up to (but not including) this position and get
  3231. * good data.
  3232. * The READ index is managed by the firmware once the card is enabled.
  3233. *
  3234. * The WRITE index maps to the last position the driver has read from -- the
  3235. * position preceding WRITE is the last slot the firmware can place a packet.
  3236. *
  3237. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  3238. * WRITE = READ.
  3239. *
  3240. * During initialization the host sets up the READ queue position to the first
  3241. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  3242. *
  3243. * When the firmware places a packet in a buffer it will advance the READ index
  3244. * and fire the RX interrupt. The driver can then query the READ index and
  3245. * process as many packets as possible, moving the WRITE index forward as it
  3246. * resets the Rx queue buffers with new memory.
  3247. *
  3248. * The management in the driver is as follows:
  3249. * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
  3250. * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  3251. * to replensish the ipw->rxq->rx_free.
  3252. * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
  3253. * ipw->rxq is replenished and the READ INDEX is updated (updating the
  3254. * 'processed' and 'read' driver indexes as well)
  3255. * + A received packet is processed and handed to the kernel network stack,
  3256. * detached from the ipw->rxq. The driver 'processed' index is updated.
  3257. * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
  3258. * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
  3259. * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
  3260. * were enough free buffers and RX_STALLED is set it is cleared.
  3261. *
  3262. *
  3263. * Driver sequence:
  3264. *
  3265. * ipw_rx_queue_alloc() Allocates rx_free
  3266. * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
  3267. * ipw_rx_queue_restock
  3268. * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
  3269. * queue, updates firmware pointers, and updates
  3270. * the WRITE index. If insufficient rx_free buffers
  3271. * are available, schedules ipw_rx_queue_replenish
  3272. *
  3273. * -- enable interrupts --
  3274. * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
  3275. * READ INDEX, detaching the SKB from the pool.
  3276. * Moves the packet buffer from queue to rx_used.
  3277. * Calls ipw_rx_queue_restock to refill any empty
  3278. * slots.
  3279. * ...
  3280. *
  3281. */
  3282. /*
  3283. * If there are slots in the RX queue that need to be restocked,
  3284. * and we have free pre-allocated buffers, fill the ranks as much
  3285. * as we can pulling from rx_free.
  3286. *
  3287. * This moves the 'write' index forward to catch up with 'processed', and
  3288. * also updates the memory address in the firmware to reference the new
  3289. * target buffer.
  3290. */
  3291. static void ipw_rx_queue_restock(struct ipw_priv *priv)
  3292. {
  3293. struct ipw_rx_queue *rxq = priv->rxq;
  3294. struct list_head *element;
  3295. struct ipw_rx_mem_buffer *rxb;
  3296. unsigned long flags;
  3297. int write;
  3298. spin_lock_irqsave(&rxq->lock, flags);
  3299. write = rxq->write;
  3300. while ((rxq->write != rxq->processed) && (rxq->free_count)) {
  3301. element = rxq->rx_free.next;
  3302. rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
  3303. list_del(element);
  3304. ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
  3305. rxb->dma_addr);
  3306. rxq->queue[rxq->write] = rxb;
  3307. rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
  3308. rxq->free_count--;
  3309. }
  3310. spin_unlock_irqrestore(&rxq->lock, flags);
  3311. /* If the pre-allocated buffer pool is dropping low, schedule to
  3312. * refill it */
  3313. if (rxq->free_count <= RX_LOW_WATERMARK)
  3314. queue_work(priv->workqueue, &priv->rx_replenish);
  3315. /* If we've added more space for the firmware to place data, tell it */
  3316. if (write != rxq->write)
  3317. ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
  3318. }
  3319. /*
  3320. * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
  3321. * Also restock the Rx queue via ipw_rx_queue_restock.
  3322. *
  3323. * This is called as a scheduled work item (except for during intialization)
  3324. */
  3325. static void ipw_rx_queue_replenish(void *data)
  3326. {
  3327. struct ipw_priv *priv = data;
  3328. struct ipw_rx_queue *rxq = priv->rxq;
  3329. struct list_head *element;
  3330. struct ipw_rx_mem_buffer *rxb;
  3331. unsigned long flags;
  3332. spin_lock_irqsave(&rxq->lock, flags);
  3333. while (!list_empty(&rxq->rx_used)) {
  3334. element = rxq->rx_used.next;
  3335. rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
  3336. rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
  3337. if (!rxb->skb) {
  3338. printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
  3339. priv->net_dev->name);
  3340. /* We don't reschedule replenish work here -- we will
  3341. * call the restock method and if it still needs
  3342. * more buffers it will schedule replenish */
  3343. break;
  3344. }
  3345. list_del(element);
  3346. rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
  3347. rxb->dma_addr =
  3348. pci_map_single(priv->pci_dev, rxb->skb->data,
  3349. CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  3350. list_add_tail(&rxb->list, &rxq->rx_free);
  3351. rxq->free_count++;
  3352. }
  3353. spin_unlock_irqrestore(&rxq->lock, flags);
  3354. ipw_rx_queue_restock(priv);
  3355. }
  3356. /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
  3357. * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
  3358. * This free routine walks the list of POOL entries and if SKB is set to
  3359. * non NULL it is unmapped and freed
  3360. */
  3361. static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
  3362. {
  3363. int i;
  3364. if (!rxq)
  3365. return;
  3366. for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
  3367. if (rxq->pool[i].skb != NULL) {
  3368. pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
  3369. CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  3370. dev_kfree_skb(rxq->pool[i].skb);
  3371. }
  3372. }
  3373. kfree(rxq);
  3374. }
  3375. static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
  3376. {
  3377. struct ipw_rx_queue *rxq;
  3378. int i;
  3379. rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
  3380. memset(rxq, 0, sizeof(*rxq));
  3381. spin_lock_init(&rxq->lock);
  3382. INIT_LIST_HEAD(&rxq->rx_free);
  3383. INIT_LIST_HEAD(&rxq->rx_used);
  3384. /* Fill the rx_used queue with _all_ of the Rx buffers */
  3385. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  3386. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  3387. /* Set us so that we have processed and used all buffers, but have
  3388. * not restocked the Rx queue with fresh buffers */
  3389. rxq->read = rxq->write = 0;
  3390. rxq->processed = RX_QUEUE_SIZE - 1;
  3391. rxq->free_count = 0;
  3392. return rxq;
  3393. }
  3394. static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
  3395. {
  3396. rate &= ~IEEE80211_BASIC_RATE_MASK;
  3397. if (ieee_mode == IEEE_A) {
  3398. switch (rate) {
  3399. case IEEE80211_OFDM_RATE_6MB:
  3400. return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
  3401. 1 : 0;
  3402. case IEEE80211_OFDM_RATE_9MB:
  3403. return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
  3404. 1 : 0;
  3405. case IEEE80211_OFDM_RATE_12MB:
  3406. return priv->
  3407. rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
  3408. case IEEE80211_OFDM_RATE_18MB:
  3409. return priv->
  3410. rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
  3411. case IEEE80211_OFDM_RATE_24MB:
  3412. return priv->
  3413. rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
  3414. case IEEE80211_OFDM_RATE_36MB:
  3415. return priv->
  3416. rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
  3417. case IEEE80211_OFDM_RATE_48MB:
  3418. return priv->
  3419. rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
  3420. case IEEE80211_OFDM_RATE_54MB:
  3421. return priv->
  3422. rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
  3423. default:
  3424. return 0;
  3425. }
  3426. }
  3427. /* B and G mixed */
  3428. switch (rate) {
  3429. case IEEE80211_CCK_RATE_1MB:
  3430. return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
  3431. case IEEE80211_CCK_RATE_2MB:
  3432. return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
  3433. case IEEE80211_CCK_RATE_5MB:
  3434. return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
  3435. case IEEE80211_CCK_RATE_11MB:
  3436. return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
  3437. }
  3438. /* If we are limited to B modulations, bail at this point */
  3439. if (ieee_mode == IEEE_B)
  3440. return 0;
  3441. /* G */
  3442. switch (rate) {
  3443. case IEEE80211_OFDM_RATE_6MB:
  3444. return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
  3445. case IEEE80211_OFDM_RATE_9MB:
  3446. return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
  3447. case IEEE80211_OFDM_RATE_12MB:
  3448. return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
  3449. case IEEE80211_OFDM_RATE_18MB:
  3450. return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
  3451. case IEEE80211_OFDM_RATE_24MB:
  3452. return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
  3453. case IEEE80211_OFDM_RATE_36MB:
  3454. return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
  3455. case IEEE80211_OFDM_RATE_48MB:
  3456. return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
  3457. case IEEE80211_OFDM_RATE_54MB:
  3458. return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
  3459. }
  3460. return 0;
  3461. }
  3462. static int ipw_compatible_rates(struct ipw_priv *priv,
  3463. const struct ieee80211_network *network,
  3464. struct ipw_supported_rates *rates)
  3465. {
  3466. int num_rates, i;
  3467. memset(rates, 0, sizeof(*rates));
  3468. num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
  3469. rates->num_rates = 0;
  3470. for (i = 0; i < num_rates; i++) {
  3471. if (!ipw_is_rate_in_mask
  3472. (priv, network->mode, network->rates[i])) {
  3473. IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
  3474. network->rates[i], priv->rates_mask);
  3475. continue;
  3476. }
  3477. rates->supported_rates[rates->num_rates++] = network->rates[i];
  3478. }
  3479. num_rates =
  3480. min(network->rates_ex_len, (u8) (IPW_MAX_RATES - num_rates));
  3481. for (i = 0; i < num_rates; i++) {
  3482. if (!ipw_is_rate_in_mask
  3483. (priv, network->mode, network->rates_ex[i])) {
  3484. IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
  3485. network->rates_ex[i], priv->rates_mask);
  3486. continue;
  3487. }
  3488. rates->supported_rates[rates->num_rates++] =
  3489. network->rates_ex[i];
  3490. }
  3491. return rates->num_rates;
  3492. }
  3493. static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
  3494. const struct ipw_supported_rates *src)
  3495. {
  3496. u8 i;
  3497. for (i = 0; i < src->num_rates; i++)
  3498. dest->supported_rates[i] = src->supported_rates[i];
  3499. dest->num_rates = src->num_rates;
  3500. }
  3501. /* TODO: Look at sniffed packets in the air to determine if the basic rate
  3502. * mask should ever be used -- right now all callers to add the scan rates are
  3503. * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
  3504. static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
  3505. u8 modulation, u32 rate_mask)
  3506. {
  3507. u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
  3508. IEEE80211_BASIC_RATE_MASK : 0;
  3509. if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
  3510. rates->supported_rates[rates->num_rates++] =
  3511. IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
  3512. if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
  3513. rates->supported_rates[rates->num_rates++] =
  3514. IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
  3515. if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
  3516. rates->supported_rates[rates->num_rates++] = basic_mask |
  3517. IEEE80211_CCK_RATE_5MB;
  3518. if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
  3519. rates->supported_rates[rates->num_rates++] = basic_mask |
  3520. IEEE80211_CCK_RATE_11MB;
  3521. }
  3522. static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
  3523. u8 modulation, u32 rate_mask)
  3524. {
  3525. u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
  3526. IEEE80211_BASIC_RATE_MASK : 0;
  3527. if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
  3528. rates->supported_rates[rates->num_rates++] = basic_mask |
  3529. IEEE80211_OFDM_RATE_6MB;
  3530. if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
  3531. rates->supported_rates[rates->num_rates++] =
  3532. IEEE80211_OFDM_RATE_9MB;
  3533. if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
  3534. rates->supported_rates[rates->num_rates++] = basic_mask |
  3535. IEEE80211_OFDM_RATE_12MB;
  3536. if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
  3537. rates->supported_rates[rates->num_rates++] =
  3538. IEEE80211_OFDM_RATE_18MB;
  3539. if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
  3540. rates->supported_rates[rates->num_rates++] = basic_mask |
  3541. IEEE80211_OFDM_RATE_24MB;
  3542. if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
  3543. rates->supported_rates[rates->num_rates++] =
  3544. IEEE80211_OFDM_RATE_36MB;
  3545. if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
  3546. rates->supported_rates[rates->num_rates++] =
  3547. IEEE80211_OFDM_RATE_48MB;
  3548. if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
  3549. rates->supported_rates[rates->num_rates++] =
  3550. IEEE80211_OFDM_RATE_54MB;
  3551. }
  3552. struct ipw_network_match {
  3553. struct ieee80211_network *network;
  3554. struct ipw_supported_rates rates;
  3555. };
  3556. static int ipw_best_network(struct ipw_priv *priv,
  3557. struct ipw_network_match *match,
  3558. struct ieee80211_network *network, int roaming)
  3559. {
  3560. struct ipw_supported_rates rates;
  3561. /* Verify that this network's capability is compatible with the
  3562. * current mode (AdHoc or Infrastructure) */
  3563. if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
  3564. !(network->capability & WLAN_CAPABILITY_ESS)) ||
  3565. (priv->ieee->iw_mode == IW_MODE_ADHOC &&
  3566. !(network->capability & WLAN_CAPABILITY_IBSS))) {
  3567. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
  3568. "capability mismatch.\n",
  3569. escape_essid(network->ssid, network->ssid_len),
  3570. MAC_ARG(network->bssid));
  3571. return 0;
  3572. }
  3573. /* If we do not have an ESSID for this AP, we can not associate with
  3574. * it */
  3575. if (network->flags & NETWORK_EMPTY_ESSID) {
  3576. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3577. "because of hidden ESSID.\n",
  3578. escape_essid(network->ssid, network->ssid_len),
  3579. MAC_ARG(network->bssid));
  3580. return 0;
  3581. }
  3582. if (unlikely(roaming)) {
  3583. /* If we are roaming, then ensure check if this is a valid
  3584. * network to try and roam to */
  3585. if ((network->ssid_len != match->network->ssid_len) ||
  3586. memcmp(network->ssid, match->network->ssid,
  3587. network->ssid_len)) {
  3588. IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
  3589. "because of non-network ESSID.\n",
  3590. escape_essid(network->ssid,
  3591. network->ssid_len),
  3592. MAC_ARG(network->bssid));
  3593. return 0;
  3594. }
  3595. } else {
  3596. /* If an ESSID has been configured then compare the broadcast
  3597. * ESSID to ours */
  3598. if ((priv->config & CFG_STATIC_ESSID) &&
  3599. ((network->ssid_len != priv->essid_len) ||
  3600. memcmp(network->ssid, priv->essid,
  3601. min(network->ssid_len, priv->essid_len)))) {
  3602. char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
  3603. strncpy(escaped,
  3604. escape_essid(network->ssid, network->ssid_len),
  3605. sizeof(escaped));
  3606. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3607. "because of ESSID mismatch: '%s'.\n",
  3608. escaped, MAC_ARG(network->bssid),
  3609. escape_essid(priv->essid,
  3610. priv->essid_len));
  3611. return 0;
  3612. }
  3613. }
  3614. /* If the old network rate is better than this one, don't bother
  3615. * testing everything else. */
  3616. if (match->network && match->network->stats.rssi > network->stats.rssi) {
  3617. char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
  3618. strncpy(escaped,
  3619. escape_essid(network->ssid, network->ssid_len),
  3620. sizeof(escaped));
  3621. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
  3622. "'%s (" MAC_FMT ")' has a stronger signal.\n",
  3623. escaped, MAC_ARG(network->bssid),
  3624. escape_essid(match->network->ssid,
  3625. match->network->ssid_len),
  3626. MAC_ARG(match->network->bssid));
  3627. return 0;
  3628. }
  3629. /* If this network has already had an association attempt within the
  3630. * last 3 seconds, do not try and associate again... */
  3631. if (network->last_associate &&
  3632. time_after(network->last_associate + (HZ * 5UL), jiffies)) {
  3633. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3634. "because of storming (%lu since last "
  3635. "assoc attempt).\n",
  3636. escape_essid(network->ssid, network->ssid_len),
  3637. MAC_ARG(network->bssid),
  3638. (jiffies - network->last_associate) / HZ);
  3639. return 0;
  3640. }
  3641. /* Now go through and see if the requested network is valid... */
  3642. if (priv->ieee->scan_age != 0 &&
  3643. jiffies - network->last_scanned > priv->ieee->scan_age) {
  3644. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3645. "because of age: %lums.\n",
  3646. escape_essid(network->ssid, network->ssid_len),
  3647. MAC_ARG(network->bssid),
  3648. (jiffies - network->last_scanned) / (HZ / 100));
  3649. return 0;
  3650. }
  3651. if ((priv->config & CFG_STATIC_CHANNEL) &&
  3652. (network->channel != priv->channel)) {
  3653. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3654. "because of channel mismatch: %d != %d.\n",
  3655. escape_essid(network->ssid, network->ssid_len),
  3656. MAC_ARG(network->bssid),
  3657. network->channel, priv->channel);
  3658. return 0;
  3659. }
  3660. /* Verify privacy compatability */
  3661. if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
  3662. ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
  3663. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3664. "because of privacy mismatch: %s != %s.\n",
  3665. escape_essid(network->ssid, network->ssid_len),
  3666. MAC_ARG(network->bssid),
  3667. priv->capability & CAP_PRIVACY_ON ? "on" :
  3668. "off",
  3669. network->capability &
  3670. WLAN_CAPABILITY_PRIVACY ? "on" : "off");
  3671. return 0;
  3672. }
  3673. if ((priv->config & CFG_STATIC_BSSID) &&
  3674. memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
  3675. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3676. "because of BSSID mismatch: " MAC_FMT ".\n",
  3677. escape_essid(network->ssid, network->ssid_len),
  3678. MAC_ARG(network->bssid), MAC_ARG(priv->bssid));
  3679. return 0;
  3680. }
  3681. /* Filter out any incompatible freq / mode combinations */
  3682. if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
  3683. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3684. "because of invalid frequency/mode "
  3685. "combination.\n",
  3686. escape_essid(network->ssid, network->ssid_len),
  3687. MAC_ARG(network->bssid));
  3688. return 0;
  3689. }
  3690. ipw_compatible_rates(priv, network, &rates);
  3691. if (rates.num_rates == 0) {
  3692. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
  3693. "because of no compatible rates.\n",
  3694. escape_essid(network->ssid, network->ssid_len),
  3695. MAC_ARG(network->bssid));
  3696. return 0;
  3697. }
  3698. /* TODO: Perform any further minimal comparititive tests. We do not
  3699. * want to put too much policy logic here; intelligent scan selection
  3700. * should occur within a generic IEEE 802.11 user space tool. */
  3701. /* Set up 'new' AP to this network */
  3702. ipw_copy_rates(&match->rates, &rates);
  3703. match->network = network;
  3704. IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
  3705. escape_essid(network->ssid, network->ssid_len),
  3706. MAC_ARG(network->bssid));
  3707. return 1;
  3708. }
  3709. static void ipw_adhoc_create(struct ipw_priv *priv,
  3710. struct ieee80211_network *network)
  3711. {
  3712. /*
  3713. * For the purposes of scanning, we can set our wireless mode
  3714. * to trigger scans across combinations of bands, but when it
  3715. * comes to creating a new ad-hoc network, we have tell the FW
  3716. * exactly which band to use.
  3717. *
  3718. * We also have the possibility of an invalid channel for the
  3719. * chossen band. Attempting to create a new ad-hoc network
  3720. * with an invalid channel for wireless mode will trigger a
  3721. * FW fatal error.
  3722. */
  3723. network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
  3724. if (network->mode) {
  3725. network->channel = priv->channel;
  3726. } else {
  3727. IPW_WARNING("Overriding invalid channel\n");
  3728. if (priv->ieee->mode & IEEE_A) {
  3729. network->mode = IEEE_A;
  3730. priv->channel = band_a_active_channel[0];
  3731. } else if (priv->ieee->mode & IEEE_G) {
  3732. network->mode = IEEE_G;
  3733. priv->channel = band_b_active_channel[0];
  3734. } else {
  3735. network->mode = IEEE_B;
  3736. priv->channel = band_b_active_channel[0];
  3737. }
  3738. }
  3739. network->channel = priv->channel;
  3740. priv->config |= CFG_ADHOC_PERSIST;
  3741. ipw_create_bssid(priv, network->bssid);
  3742. network->ssid_len = priv->essid_len;
  3743. memcpy(network->ssid, priv->essid, priv->essid_len);
  3744. memset(&network->stats, 0, sizeof(network->stats));
  3745. network->capability = WLAN_CAPABILITY_IBSS;
  3746. if (priv->capability & CAP_PRIVACY_ON)
  3747. network->capability |= WLAN_CAPABILITY_PRIVACY;
  3748. network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
  3749. memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
  3750. network->rates_ex_len = priv->rates.num_rates - network->rates_len;
  3751. memcpy(network->rates_ex,
  3752. &priv->rates.supported_rates[network->rates_len],
  3753. network->rates_ex_len);
  3754. network->last_scanned = 0;
  3755. network->flags = 0;
  3756. network->last_associate = 0;
  3757. network->time_stamp[0] = 0;
  3758. network->time_stamp[1] = 0;
  3759. network->beacon_interval = 100; /* Default */
  3760. network->listen_interval = 10; /* Default */
  3761. network->atim_window = 0; /* Default */
  3762. #ifdef CONFIG_IEEE80211_WPA
  3763. network->wpa_ie_len = 0;
  3764. network->rsn_ie_len = 0;
  3765. #endif /* CONFIG_IEEE80211_WPA */
  3766. }
  3767. static void ipw_send_wep_keys(struct ipw_priv *priv)
  3768. {
  3769. struct ipw_wep_key *key;
  3770. int i;
  3771. struct host_cmd cmd = {
  3772. .cmd = IPW_CMD_WEP_KEY,
  3773. .len = sizeof(*key)
  3774. };
  3775. key = (struct ipw_wep_key *)&cmd.param;
  3776. key->cmd_id = DINO_CMD_WEP_KEY;
  3777. key->seq_num = 0;
  3778. for (i = 0; i < 4; i++) {
  3779. key->key_index = i;
  3780. if (!(priv->sec.flags & (1 << i))) {
  3781. key->key_size = 0;
  3782. } else {
  3783. key->key_size = priv->sec.key_sizes[i];
  3784. memcpy(key->key, priv->sec.keys[i], key->key_size);
  3785. }
  3786. if (ipw_send_cmd(priv, &cmd)) {
  3787. IPW_ERROR("failed to send WEP_KEY command\n");
  3788. return;
  3789. }
  3790. }
  3791. }
  3792. static void ipw_adhoc_check(void *data)
  3793. {
  3794. struct ipw_priv *priv = data;
  3795. if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
  3796. !(priv->config & CFG_ADHOC_PERSIST)) {
  3797. IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
  3798. ipw_remove_current_network(priv);
  3799. ipw_disassociate(priv);
  3800. return;
  3801. }
  3802. queue_delayed_work(priv->workqueue, &priv->adhoc_check,
  3803. priv->assoc_request.beacon_interval);
  3804. }
  3805. #ifdef CONFIG_IPW_DEBUG
  3806. static void ipw_debug_config(struct ipw_priv *priv)
  3807. {
  3808. IPW_DEBUG_INFO("Scan completed, no valid APs matched "
  3809. "[CFG 0x%08X]\n", priv->config);
  3810. if (priv->config & CFG_STATIC_CHANNEL)
  3811. IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
  3812. else
  3813. IPW_DEBUG_INFO("Channel unlocked.\n");
  3814. if (priv->config & CFG_STATIC_ESSID)
  3815. IPW_DEBUG_INFO("ESSID locked to '%s'\n",
  3816. escape_essid(priv->essid, priv->essid_len));
  3817. else
  3818. IPW_DEBUG_INFO("ESSID unlocked.\n");
  3819. if (priv->config & CFG_STATIC_BSSID)
  3820. IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
  3821. else
  3822. IPW_DEBUG_INFO("BSSID unlocked.\n");
  3823. if (priv->capability & CAP_PRIVACY_ON)
  3824. IPW_DEBUG_INFO("PRIVACY on\n");
  3825. else
  3826. IPW_DEBUG_INFO("PRIVACY off\n");
  3827. IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
  3828. }
  3829. #else
  3830. #define ipw_debug_config(x) do {} while (0)
  3831. #endif
  3832. static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
  3833. struct ieee80211_network *network)
  3834. {
  3835. /* TODO: Verify that this works... */
  3836. struct ipw_fixed_rate fr = {
  3837. .tx_rates = priv->rates_mask
  3838. };
  3839. u32 reg;
  3840. u16 mask = 0;
  3841. /* Identify 'current FW band' and match it with the fixed
  3842. * Tx rates */
  3843. switch (priv->ieee->freq_band) {
  3844. case IEEE80211_52GHZ_BAND: /* A only */
  3845. /* IEEE_A */
  3846. if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
  3847. /* Invalid fixed rate mask */
  3848. fr.tx_rates = 0;
  3849. break;
  3850. }
  3851. fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
  3852. break;
  3853. default: /* 2.4Ghz or Mixed */
  3854. /* IEEE_B */
  3855. if (network->mode == IEEE_B) {
  3856. if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
  3857. /* Invalid fixed rate mask */
  3858. fr.tx_rates = 0;
  3859. }
  3860. break;
  3861. }
  3862. /* IEEE_G */
  3863. if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
  3864. IEEE80211_OFDM_RATES_MASK)) {
  3865. /* Invalid fixed rate mask */
  3866. fr.tx_rates = 0;
  3867. break;
  3868. }
  3869. if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
  3870. mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
  3871. fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
  3872. }
  3873. if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
  3874. mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
  3875. fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
  3876. }
  3877. if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
  3878. mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
  3879. fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
  3880. }
  3881. fr.tx_rates |= mask;
  3882. break;
  3883. }
  3884. reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
  3885. ipw_write_reg32(priv, reg, *(u32 *) & fr);
  3886. }
  3887. static int ipw_associate_network(struct ipw_priv *priv,
  3888. struct ieee80211_network *network,
  3889. struct ipw_supported_rates *rates, int roaming)
  3890. {
  3891. int err;
  3892. if (priv->config & CFG_FIXED_RATE)
  3893. ipw_set_fixed_rate(priv, network);
  3894. if (!(priv->config & CFG_STATIC_ESSID)) {
  3895. priv->essid_len = min(network->ssid_len,
  3896. (u8) IW_ESSID_MAX_SIZE);
  3897. memcpy(priv->essid, network->ssid, priv->essid_len);
  3898. }
  3899. network->last_associate = jiffies;
  3900. memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
  3901. priv->assoc_request.channel = network->channel;
  3902. if ((priv->capability & CAP_PRIVACY_ON) &&
  3903. (priv->capability & CAP_SHARED_KEY)) {
  3904. priv->assoc_request.auth_type = AUTH_SHARED_KEY;
  3905. priv->assoc_request.auth_key = priv->sec.active_key;
  3906. } else {
  3907. priv->assoc_request.auth_type = AUTH_OPEN;
  3908. priv->assoc_request.auth_key = 0;
  3909. }
  3910. if (priv->capability & CAP_PRIVACY_ON)
  3911. ipw_send_wep_keys(priv);
  3912. /*
  3913. * It is valid for our ieee device to support multiple modes, but
  3914. * when it comes to associating to a given network we have to choose
  3915. * just one mode.
  3916. */
  3917. if (network->mode & priv->ieee->mode & IEEE_A)
  3918. priv->assoc_request.ieee_mode = IPW_A_MODE;
  3919. else if (network->mode & priv->ieee->mode & IEEE_G)
  3920. priv->assoc_request.ieee_mode = IPW_G_MODE;
  3921. else if (network->mode & priv->ieee->mode & IEEE_B)
  3922. priv->assoc_request.ieee_mode = IPW_B_MODE;
  3923. IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
  3924. "802.11%c [%d], enc=%s%s%s%c%c\n",
  3925. roaming ? "Rea" : "A",
  3926. escape_essid(priv->essid, priv->essid_len),
  3927. network->channel,
  3928. ipw_modes[priv->assoc_request.ieee_mode],
  3929. rates->num_rates,
  3930. priv->capability & CAP_PRIVACY_ON ? "on " : "off",
  3931. priv->capability & CAP_PRIVACY_ON ?
  3932. (priv->capability & CAP_SHARED_KEY ? "(shared)" :
  3933. "(open)") : "",
  3934. priv->capability & CAP_PRIVACY_ON ? " key=" : "",
  3935. priv->capability & CAP_PRIVACY_ON ?
  3936. '1' + priv->sec.active_key : '.',
  3937. priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
  3938. priv->assoc_request.beacon_interval = network->beacon_interval;
  3939. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  3940. (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
  3941. priv->assoc_request.assoc_type = HC_IBSS_START;
  3942. priv->assoc_request.assoc_tsf_msw = 0;
  3943. priv->assoc_request.assoc_tsf_lsw = 0;
  3944. } else {
  3945. if (unlikely(roaming))
  3946. priv->assoc_request.assoc_type = HC_REASSOCIATE;
  3947. else
  3948. priv->assoc_request.assoc_type = HC_ASSOCIATE;
  3949. priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
  3950. priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
  3951. }
  3952. memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
  3953. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  3954. memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
  3955. priv->assoc_request.atim_window = network->atim_window;
  3956. } else {
  3957. memcpy(&priv->assoc_request.dest, network->bssid, ETH_ALEN);
  3958. priv->assoc_request.atim_window = 0;
  3959. }
  3960. priv->assoc_request.capability = network->capability;
  3961. priv->assoc_request.listen_interval = network->listen_interval;
  3962. err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
  3963. if (err) {
  3964. IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
  3965. return err;
  3966. }
  3967. rates->ieee_mode = priv->assoc_request.ieee_mode;
  3968. rates->purpose = IPW_RATE_CONNECT;
  3969. ipw_send_supported_rates(priv, rates);
  3970. if (priv->assoc_request.ieee_mode == IPW_G_MODE)
  3971. priv->sys_config.dot11g_auto_detection = 1;
  3972. else
  3973. priv->sys_config.dot11g_auto_detection = 0;
  3974. err = ipw_send_system_config(priv, &priv->sys_config);
  3975. if (err) {
  3976. IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
  3977. return err;
  3978. }
  3979. IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
  3980. err = ipw_set_sensitivity(priv, network->stats.rssi);
  3981. if (err) {
  3982. IPW_DEBUG_HC("Attempt to send associate command failed.\n");
  3983. return err;
  3984. }
  3985. /*
  3986. * If preemption is enabled, it is possible for the association
  3987. * to complete before we return from ipw_send_associate. Therefore
  3988. * we have to be sure and update our priviate data first.
  3989. */
  3990. priv->channel = network->channel;
  3991. memcpy(priv->bssid, network->bssid, ETH_ALEN);
  3992. priv->status |= STATUS_ASSOCIATING;
  3993. priv->status &= ~STATUS_SECURITY_UPDATED;
  3994. priv->assoc_network = network;
  3995. err = ipw_send_associate(priv, &priv->assoc_request);
  3996. if (err) {
  3997. IPW_DEBUG_HC("Attempt to send associate command failed.\n");
  3998. return err;
  3999. }
  4000. IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
  4001. escape_essid(priv->essid, priv->essid_len),
  4002. MAC_ARG(priv->bssid));
  4003. return 0;
  4004. }
  4005. static void ipw_roam(void *data)
  4006. {
  4007. struct ipw_priv *priv = data;
  4008. struct ieee80211_network *network = NULL;
  4009. struct ipw_network_match match = {
  4010. .network = priv->assoc_network
  4011. };
  4012. /* The roaming process is as follows:
  4013. *
  4014. * 1. Missed beacon threshold triggers the roaming process by
  4015. * setting the status ROAM bit and requesting a scan.
  4016. * 2. When the scan completes, it schedules the ROAM work
  4017. * 3. The ROAM work looks at all of the known networks for one that
  4018. * is a better network than the currently associated. If none
  4019. * found, the ROAM process is over (ROAM bit cleared)
  4020. * 4. If a better network is found, a disassociation request is
  4021. * sent.
  4022. * 5. When the disassociation completes, the roam work is again
  4023. * scheduled. The second time through, the driver is no longer
  4024. * associated, and the newly selected network is sent an
  4025. * association request.
  4026. * 6. At this point ,the roaming process is complete and the ROAM
  4027. * status bit is cleared.
  4028. */
  4029. /* If we are no longer associated, and the roaming bit is no longer
  4030. * set, then we are not actively roaming, so just return */
  4031. if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
  4032. return;
  4033. if (priv->status & STATUS_ASSOCIATED) {
  4034. /* First pass through ROAM process -- look for a better
  4035. * network */
  4036. u8 rssi = priv->assoc_network->stats.rssi;
  4037. priv->assoc_network->stats.rssi = -128;
  4038. list_for_each_entry(network, &priv->ieee->network_list, list) {
  4039. if (network != priv->assoc_network)
  4040. ipw_best_network(priv, &match, network, 1);
  4041. }
  4042. priv->assoc_network->stats.rssi = rssi;
  4043. if (match.network == priv->assoc_network) {
  4044. IPW_DEBUG_ASSOC("No better APs in this network to "
  4045. "roam to.\n");
  4046. priv->status &= ~STATUS_ROAMING;
  4047. ipw_debug_config(priv);
  4048. return;
  4049. }
  4050. ipw_send_disassociate(priv, 1);
  4051. priv->assoc_network = match.network;
  4052. return;
  4053. }
  4054. /* Second pass through ROAM process -- request association */
  4055. ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
  4056. ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
  4057. priv->status &= ~STATUS_ROAMING;
  4058. }
  4059. static void ipw_associate(void *data)
  4060. {
  4061. struct ipw_priv *priv = data;
  4062. struct ieee80211_network *network = NULL;
  4063. struct ipw_network_match match = {
  4064. .network = NULL
  4065. };
  4066. struct ipw_supported_rates *rates;
  4067. struct list_head *element;
  4068. if (!(priv->config & CFG_ASSOCIATE) &&
  4069. !(priv->config & (CFG_STATIC_ESSID |
  4070. CFG_STATIC_CHANNEL | CFG_STATIC_BSSID))) {
  4071. IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
  4072. return;
  4073. }
  4074. list_for_each_entry(network, &priv->ieee->network_list, list)
  4075. ipw_best_network(priv, &match, network, 0);
  4076. network = match.network;
  4077. rates = &match.rates;
  4078. if (network == NULL &&
  4079. priv->ieee->iw_mode == IW_MODE_ADHOC &&
  4080. priv->config & CFG_ADHOC_CREATE &&
  4081. priv->config & CFG_STATIC_ESSID &&
  4082. !list_empty(&priv->ieee->network_free_list)) {
  4083. element = priv->ieee->network_free_list.next;
  4084. network = list_entry(element, struct ieee80211_network, list);
  4085. ipw_adhoc_create(priv, network);
  4086. rates = &priv->rates;
  4087. list_del(element);
  4088. list_add_tail(&network->list, &priv->ieee->network_list);
  4089. }
  4090. /* If we reached the end of the list, then we don't have any valid
  4091. * matching APs */
  4092. if (!network) {
  4093. ipw_debug_config(priv);
  4094. queue_delayed_work(priv->workqueue, &priv->request_scan,
  4095. SCAN_INTERVAL);
  4096. return;
  4097. }
  4098. ipw_associate_network(priv, network, rates, 0);
  4099. }
  4100. static inline void ipw_handle_data_packet(struct ipw_priv *priv,
  4101. struct ipw_rx_mem_buffer *rxb,
  4102. struct ieee80211_rx_stats *stats)
  4103. {
  4104. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  4105. /* We received data from the HW, so stop the watchdog */
  4106. priv->net_dev->trans_start = jiffies;
  4107. /* We only process data packets if the
  4108. * interface is open */
  4109. if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
  4110. skb_tailroom(rxb->skb))) {
  4111. priv->ieee->stats.rx_errors++;
  4112. priv->wstats.discard.misc++;
  4113. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  4114. return;
  4115. } else if (unlikely(!netif_running(priv->net_dev))) {
  4116. priv->ieee->stats.rx_dropped++;
  4117. priv->wstats.discard.misc++;
  4118. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  4119. return;
  4120. }
  4121. /* Advance skb->data to the start of the actual payload */
  4122. skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
  4123. /* Set the size of the skb to the size of the frame */
  4124. skb_put(rxb->skb, pkt->u.frame.length);
  4125. IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
  4126. if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
  4127. priv->ieee->stats.rx_errors++;
  4128. else /* ieee80211_rx succeeded, so it now owns the SKB */
  4129. rxb->skb = NULL;
  4130. }
  4131. /*
  4132. * Main entry function for recieving a packet with 80211 headers. This
  4133. * should be called when ever the FW has notified us that there is a new
  4134. * skb in the recieve queue.
  4135. */
  4136. static void ipw_rx(struct ipw_priv *priv)
  4137. {
  4138. struct ipw_rx_mem_buffer *rxb;
  4139. struct ipw_rx_packet *pkt;
  4140. struct ieee80211_hdr *header;
  4141. u32 r, w, i;
  4142. u8 network_packet;
  4143. r = ipw_read32(priv, CX2_RX_READ_INDEX);
  4144. w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
  4145. i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
  4146. while (i != r) {
  4147. rxb = priv->rxq->queue[i];
  4148. #ifdef CONFIG_IPW_DEBUG
  4149. if (unlikely(rxb == NULL)) {
  4150. printk(KERN_CRIT "Queue not allocated!\n");
  4151. break;
  4152. }
  4153. #endif
  4154. priv->rxq->queue[i] = NULL;
  4155. pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
  4156. CX2_RX_BUF_SIZE,
  4157. PCI_DMA_FROMDEVICE);
  4158. pkt = (struct ipw_rx_packet *)rxb->skb->data;
  4159. IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
  4160. pkt->header.message_type,
  4161. pkt->header.rx_seq_num, pkt->header.control_bits);
  4162. switch (pkt->header.message_type) {
  4163. case RX_FRAME_TYPE: /* 802.11 frame */ {
  4164. struct ieee80211_rx_stats stats = {
  4165. .rssi = pkt->u.frame.rssi_dbm -
  4166. IPW_RSSI_TO_DBM,
  4167. .signal = pkt->u.frame.signal,
  4168. .rate = pkt->u.frame.rate,
  4169. .mac_time = jiffies,
  4170. .received_channel =
  4171. pkt->u.frame.received_channel,
  4172. .freq =
  4173. (pkt->u.frame.
  4174. control & (1 << 0)) ?
  4175. IEEE80211_24GHZ_BAND :
  4176. IEEE80211_52GHZ_BAND,
  4177. .len = pkt->u.frame.length,
  4178. };
  4179. if (stats.rssi != 0)
  4180. stats.mask |= IEEE80211_STATMASK_RSSI;
  4181. if (stats.signal != 0)
  4182. stats.mask |= IEEE80211_STATMASK_SIGNAL;
  4183. if (stats.rate != 0)
  4184. stats.mask |= IEEE80211_STATMASK_RATE;
  4185. priv->rx_packets++;
  4186. #ifdef CONFIG_IPW_PROMISC
  4187. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  4188. ipw_handle_data_packet(priv, rxb,
  4189. &stats);
  4190. break;
  4191. }
  4192. #endif
  4193. header =
  4194. (struct ieee80211_hdr *)(rxb->skb->data +
  4195. IPW_RX_FRAME_SIZE);
  4196. /* TODO: Check Ad-Hoc dest/source and make sure
  4197. * that we are actually parsing these packets
  4198. * correctly -- we should probably use the
  4199. * frame control of the packet and disregard
  4200. * the current iw_mode */
  4201. switch (priv->ieee->iw_mode) {
  4202. case IW_MODE_ADHOC:
  4203. network_packet =
  4204. !memcmp(header->addr1,
  4205. priv->net_dev->dev_addr,
  4206. ETH_ALEN) ||
  4207. !memcmp(header->addr3,
  4208. priv->bssid, ETH_ALEN) ||
  4209. is_broadcast_ether_addr(header->
  4210. addr1)
  4211. || is_multicast_ether_addr(header->
  4212. addr1);
  4213. break;
  4214. case IW_MODE_INFRA:
  4215. default:
  4216. network_packet =
  4217. !memcmp(header->addr3,
  4218. priv->bssid, ETH_ALEN) ||
  4219. !memcmp(header->addr1,
  4220. priv->net_dev->dev_addr,
  4221. ETH_ALEN) ||
  4222. is_broadcast_ether_addr(header->
  4223. addr1)
  4224. || is_multicast_ether_addr(header->
  4225. addr1);
  4226. break;
  4227. }
  4228. if (network_packet && priv->assoc_network) {
  4229. priv->assoc_network->stats.rssi =
  4230. stats.rssi;
  4231. average_add(&priv->average_rssi,
  4232. stats.rssi);
  4233. priv->last_rx_rssi = stats.rssi;
  4234. }
  4235. IPW_DEBUG_RX("Frame: len=%u\n",
  4236. pkt->u.frame.length);
  4237. if (pkt->u.frame.length < frame_hdr_len(header)) {
  4238. IPW_DEBUG_DROP
  4239. ("Received packet is too small. "
  4240. "Dropping.\n");
  4241. priv->ieee->stats.rx_errors++;
  4242. priv->wstats.discard.misc++;
  4243. break;
  4244. }
  4245. switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
  4246. case IEEE80211_FTYPE_MGMT:
  4247. ieee80211_rx_mgt(priv->ieee, header,
  4248. &stats);
  4249. if (priv->ieee->iw_mode == IW_MODE_ADHOC
  4250. &&
  4251. ((WLAN_FC_GET_STYPE
  4252. (header->frame_ctl) ==
  4253. IEEE80211_STYPE_PROBE_RESP)
  4254. ||
  4255. (WLAN_FC_GET_STYPE
  4256. (header->frame_ctl) ==
  4257. IEEE80211_STYPE_BEACON))
  4258. && !memcmp(header->addr3,
  4259. priv->bssid, ETH_ALEN))
  4260. ipw_add_station(priv,
  4261. header->addr2);
  4262. break;
  4263. case IEEE80211_FTYPE_CTL:
  4264. break;
  4265. case IEEE80211_FTYPE_DATA:
  4266. if (network_packet)
  4267. ipw_handle_data_packet(priv,
  4268. rxb,
  4269. &stats);
  4270. else
  4271. IPW_DEBUG_DROP("Dropping: "
  4272. MAC_FMT ", "
  4273. MAC_FMT ", "
  4274. MAC_FMT "\n",
  4275. MAC_ARG(header->
  4276. addr1),
  4277. MAC_ARG(header->
  4278. addr2),
  4279. MAC_ARG(header->
  4280. addr3));
  4281. break;
  4282. }
  4283. break;
  4284. }
  4285. case RX_HOST_NOTIFICATION_TYPE:{
  4286. IPW_DEBUG_RX
  4287. ("Notification: subtype=%02X flags=%02X size=%d\n",
  4288. pkt->u.notification.subtype,
  4289. pkt->u.notification.flags,
  4290. pkt->u.notification.size);
  4291. ipw_rx_notification(priv, &pkt->u.notification);
  4292. break;
  4293. }
  4294. default:
  4295. IPW_DEBUG_RX("Bad Rx packet of type %d\n",
  4296. pkt->header.message_type);
  4297. break;
  4298. }
  4299. /* For now we just don't re-use anything. We can tweak this
  4300. * later to try and re-use notification packets and SKBs that
  4301. * fail to Rx correctly */
  4302. if (rxb->skb != NULL) {
  4303. dev_kfree_skb_any(rxb->skb);
  4304. rxb->skb = NULL;
  4305. }
  4306. pci_unmap_single(priv->pci_dev, rxb->dma_addr,
  4307. CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  4308. list_add_tail(&rxb->list, &priv->rxq->rx_used);
  4309. i = (i + 1) % RX_QUEUE_SIZE;
  4310. }
  4311. /* Backtrack one entry */
  4312. priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
  4313. ipw_rx_queue_restock(priv);
  4314. }
  4315. static void ipw_abort_scan(struct ipw_priv *priv)
  4316. {
  4317. int err;
  4318. if (priv->status & STATUS_SCAN_ABORTING) {
  4319. IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
  4320. return;
  4321. }
  4322. priv->status |= STATUS_SCAN_ABORTING;
  4323. err = ipw_send_scan_abort(priv);
  4324. if (err)
  4325. IPW_DEBUG_HC("Request to abort scan failed.\n");
  4326. }
  4327. static int ipw_request_scan(struct ipw_priv *priv)
  4328. {
  4329. struct ipw_scan_request_ext scan;
  4330. int channel_index = 0;
  4331. int i, err, scan_type;
  4332. if (priv->status & STATUS_EXIT_PENDING) {
  4333. IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
  4334. priv->status |= STATUS_SCAN_PENDING;
  4335. return 0;
  4336. }
  4337. if (priv->status & STATUS_SCANNING) {
  4338. IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
  4339. priv->status |= STATUS_SCAN_PENDING;
  4340. ipw_abort_scan(priv);
  4341. return 0;
  4342. }
  4343. if (priv->status & STATUS_SCAN_ABORTING) {
  4344. IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
  4345. priv->status |= STATUS_SCAN_PENDING;
  4346. return 0;
  4347. }
  4348. if (priv->status & STATUS_RF_KILL_MASK) {
  4349. IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
  4350. priv->status |= STATUS_SCAN_PENDING;
  4351. return 0;
  4352. }
  4353. memset(&scan, 0, sizeof(scan));
  4354. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
  4355. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
  4356. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
  4357. scan.full_scan_index = ieee80211_get_scans(priv->ieee);
  4358. /* If we are roaming, then make this a directed scan for the current
  4359. * network. Otherwise, ensure that every other scan is a fast
  4360. * channel hop scan */
  4361. if ((priv->status & STATUS_ROAMING)
  4362. || (!(priv->status & STATUS_ASSOCIATED)
  4363. && (priv->config & CFG_STATIC_ESSID)
  4364. && (scan.full_scan_index % 2))) {
  4365. err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
  4366. if (err) {
  4367. IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
  4368. return err;
  4369. }
  4370. scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
  4371. } else {
  4372. scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
  4373. }
  4374. if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
  4375. int start = channel_index;
  4376. for (i = 0; i < MAX_A_CHANNELS; i++) {
  4377. if (band_a_active_channel[i] == 0)
  4378. break;
  4379. if ((priv->status & STATUS_ASSOCIATED) &&
  4380. band_a_active_channel[i] == priv->channel)
  4381. continue;
  4382. channel_index++;
  4383. scan.channels_list[channel_index] =
  4384. band_a_active_channel[i];
  4385. ipw_set_scan_type(&scan, channel_index, scan_type);
  4386. }
  4387. if (start != channel_index) {
  4388. scan.channels_list[start] = (u8) (IPW_A_MODE << 6) |
  4389. (channel_index - start);
  4390. channel_index++;
  4391. }
  4392. }
  4393. if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
  4394. int start = channel_index;
  4395. for (i = 0; i < MAX_B_CHANNELS; i++) {
  4396. if (band_b_active_channel[i] == 0)
  4397. break;
  4398. if ((priv->status & STATUS_ASSOCIATED) &&
  4399. band_b_active_channel[i] == priv->channel)
  4400. continue;
  4401. channel_index++;
  4402. scan.channels_list[channel_index] =
  4403. band_b_active_channel[i];
  4404. ipw_set_scan_type(&scan, channel_index, scan_type);
  4405. }
  4406. if (start != channel_index) {
  4407. scan.channels_list[start] = (u8) (IPW_B_MODE << 6) |
  4408. (channel_index - start);
  4409. }
  4410. }
  4411. err = ipw_send_scan_request_ext(priv, &scan);
  4412. if (err) {
  4413. IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
  4414. return -EIO;
  4415. }
  4416. priv->status |= STATUS_SCANNING;
  4417. priv->status &= ~STATUS_SCAN_PENDING;
  4418. return 0;
  4419. }
  4420. /*
  4421. * This file defines the Wireless Extension handlers. It does not
  4422. * define any methods of hardware manipulation and relies on the
  4423. * functions defined in ipw_main to provide the HW interaction.
  4424. *
  4425. * The exception to this is the use of the ipw_get_ordinal()
  4426. * function used to poll the hardware vs. making unecessary calls.
  4427. *
  4428. */
  4429. static int ipw_wx_get_name(struct net_device *dev,
  4430. struct iw_request_info *info,
  4431. union iwreq_data *wrqu, char *extra)
  4432. {
  4433. struct ipw_priv *priv = ieee80211_priv(dev);
  4434. if (!(priv->status & STATUS_ASSOCIATED))
  4435. strcpy(wrqu->name, "unassociated");
  4436. else
  4437. snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
  4438. ipw_modes[priv->assoc_request.ieee_mode]);
  4439. IPW_DEBUG_WX("Name: %s\n", wrqu->name);
  4440. return 0;
  4441. }
  4442. static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
  4443. {
  4444. if (channel == 0) {
  4445. IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
  4446. priv->config &= ~CFG_STATIC_CHANNEL;
  4447. if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
  4448. STATUS_ASSOCIATING))) {
  4449. IPW_DEBUG_ASSOC("Attempting to associate with new "
  4450. "parameters.\n");
  4451. ipw_associate(priv);
  4452. }
  4453. return 0;
  4454. }
  4455. priv->config |= CFG_STATIC_CHANNEL;
  4456. if (priv->channel == channel) {
  4457. IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
  4458. channel);
  4459. return 0;
  4460. }
  4461. IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
  4462. priv->channel = channel;
  4463. /* If we are currently associated, or trying to associate
  4464. * then see if this is a new channel (causing us to disassociate) */
  4465. if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  4466. IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
  4467. ipw_disassociate(priv);
  4468. } else {
  4469. ipw_associate(priv);
  4470. }
  4471. return 0;
  4472. }
  4473. static int ipw_wx_set_freq(struct net_device *dev,
  4474. struct iw_request_info *info,
  4475. union iwreq_data *wrqu, char *extra)
  4476. {
  4477. struct ipw_priv *priv = ieee80211_priv(dev);
  4478. struct iw_freq *fwrq = &wrqu->freq;
  4479. /* if setting by freq convert to channel */
  4480. if (fwrq->e == 1) {
  4481. if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
  4482. int f = fwrq->m / 100000;
  4483. int c = 0;
  4484. while ((c < REG_MAX_CHANNEL) &&
  4485. (f != ipw_frequencies[c]))
  4486. c++;
  4487. /* hack to fall through */
  4488. fwrq->e = 0;
  4489. fwrq->m = c + 1;
  4490. }
  4491. }
  4492. if (fwrq->e > 0 || fwrq->m > 1000)
  4493. return -EOPNOTSUPP;
  4494. IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
  4495. return ipw_set_channel(priv, (u8) fwrq->m);
  4496. return 0;
  4497. }
  4498. static int ipw_wx_get_freq(struct net_device *dev,
  4499. struct iw_request_info *info,
  4500. union iwreq_data *wrqu, char *extra)
  4501. {
  4502. struct ipw_priv *priv = ieee80211_priv(dev);
  4503. wrqu->freq.e = 0;
  4504. /* If we are associated, trying to associate, or have a statically
  4505. * configured CHANNEL then return that; otherwise return ANY */
  4506. if (priv->config & CFG_STATIC_CHANNEL ||
  4507. priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
  4508. wrqu->freq.m = priv->channel;
  4509. else
  4510. wrqu->freq.m = 0;
  4511. IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
  4512. return 0;
  4513. }
  4514. static int ipw_wx_set_mode(struct net_device *dev,
  4515. struct iw_request_info *info,
  4516. union iwreq_data *wrqu, char *extra)
  4517. {
  4518. struct ipw_priv *priv = ieee80211_priv(dev);
  4519. int err = 0;
  4520. IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
  4521. if (wrqu->mode == priv->ieee->iw_mode)
  4522. return 0;
  4523. switch (wrqu->mode) {
  4524. #ifdef CONFIG_IPW_PROMISC
  4525. case IW_MODE_MONITOR:
  4526. #endif
  4527. case IW_MODE_ADHOC:
  4528. case IW_MODE_INFRA:
  4529. break;
  4530. case IW_MODE_AUTO:
  4531. wrqu->mode = IW_MODE_INFRA;
  4532. break;
  4533. default:
  4534. return -EINVAL;
  4535. }
  4536. #ifdef CONFIG_IPW_PROMISC
  4537. if (priv->ieee->iw_mode == IW_MODE_MONITOR)
  4538. priv->net_dev->type = ARPHRD_ETHER;
  4539. if (wrqu->mode == IW_MODE_MONITOR)
  4540. priv->net_dev->type = ARPHRD_IEEE80211;
  4541. #endif /* CONFIG_IPW_PROMISC */
  4542. #ifdef CONFIG_PM
  4543. /* Free the existing firmware and reset the fw_loaded
  4544. * flag so ipw_load() will bring in the new firmawre */
  4545. if (fw_loaded) {
  4546. fw_loaded = 0;
  4547. }
  4548. release_firmware(bootfw);
  4549. release_firmware(ucode);
  4550. release_firmware(firmware);
  4551. bootfw = ucode = firmware = NULL;
  4552. #endif
  4553. priv->ieee->iw_mode = wrqu->mode;
  4554. ipw_adapter_restart(priv);
  4555. return err;
  4556. }
  4557. static int ipw_wx_get_mode(struct net_device *dev,
  4558. struct iw_request_info *info,
  4559. union iwreq_data *wrqu, char *extra)
  4560. {
  4561. struct ipw_priv *priv = ieee80211_priv(dev);
  4562. wrqu->mode = priv->ieee->iw_mode;
  4563. IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
  4564. return 0;
  4565. }
  4566. #define DEFAULT_RTS_THRESHOLD 2304U
  4567. #define MIN_RTS_THRESHOLD 1U
  4568. #define MAX_RTS_THRESHOLD 2304U
  4569. #define DEFAULT_BEACON_INTERVAL 100U
  4570. #define DEFAULT_SHORT_RETRY_LIMIT 7U
  4571. #define DEFAULT_LONG_RETRY_LIMIT 4U
  4572. /* Values are in microsecond */
  4573. static const s32 timeout_duration[] = {
  4574. 350000,
  4575. 250000,
  4576. 75000,
  4577. 37000,
  4578. 25000,
  4579. };
  4580. static const s32 period_duration[] = {
  4581. 400000,
  4582. 700000,
  4583. 1000000,
  4584. 1000000,
  4585. 1000000
  4586. };
  4587. static int ipw_wx_get_range(struct net_device *dev,
  4588. struct iw_request_info *info,
  4589. union iwreq_data *wrqu, char *extra)
  4590. {
  4591. struct ipw_priv *priv = ieee80211_priv(dev);
  4592. struct iw_range *range = (struct iw_range *)extra;
  4593. u16 val;
  4594. int i;
  4595. wrqu->data.length = sizeof(*range);
  4596. memset(range, 0, sizeof(*range));
  4597. /* 54Mbs == ~27 Mb/s real (802.11g) */
  4598. range->throughput = 27 * 1000 * 1000;
  4599. range->max_qual.qual = 100;
  4600. /* TODO: Find real max RSSI and stick here */
  4601. range->max_qual.level = 0;
  4602. range->max_qual.noise = 0;
  4603. range->max_qual.updated = 7; /* Updated all three */
  4604. range->avg_qual.qual = 70;
  4605. /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
  4606. range->avg_qual.level = 0; /* FIXME to real average level */
  4607. range->avg_qual.noise = 0;
  4608. range->avg_qual.updated = 7; /* Updated all three */
  4609. range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
  4610. for (i = 0; i < range->num_bitrates; i++)
  4611. range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
  4612. 500000;
  4613. range->max_rts = DEFAULT_RTS_THRESHOLD;
  4614. range->min_frag = MIN_FRAG_THRESHOLD;
  4615. range->max_frag = MAX_FRAG_THRESHOLD;
  4616. range->encoding_size[0] = 5;
  4617. range->encoding_size[1] = 13;
  4618. range->num_encoding_sizes = 2;
  4619. range->max_encoding_tokens = WEP_KEYS;
  4620. /* Set the Wireless Extension versions */
  4621. range->we_version_compiled = WIRELESS_EXT;
  4622. range->we_version_source = 16;
  4623. range->num_channels = FREQ_COUNT;
  4624. val = 0;
  4625. for (i = 0; i < FREQ_COUNT; i++) {
  4626. range->freq[val].i = i + 1;
  4627. range->freq[val].m = ipw_frequencies[i] * 100000;
  4628. range->freq[val].e = 1;
  4629. val++;
  4630. if (val == IW_MAX_FREQUENCIES)
  4631. break;
  4632. }
  4633. range->num_frequency = val;
  4634. IPW_DEBUG_WX("GET Range\n");
  4635. return 0;
  4636. }
  4637. static int ipw_wx_set_wap(struct net_device *dev,
  4638. struct iw_request_info *info,
  4639. union iwreq_data *wrqu, char *extra)
  4640. {
  4641. struct ipw_priv *priv = ieee80211_priv(dev);
  4642. static const unsigned char any[] = {
  4643. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  4644. };
  4645. static const unsigned char off[] = {
  4646. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  4647. };
  4648. if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
  4649. return -EINVAL;
  4650. if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
  4651. !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
  4652. /* we disable mandatory BSSID association */
  4653. IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
  4654. priv->config &= ~CFG_STATIC_BSSID;
  4655. if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
  4656. STATUS_ASSOCIATING))) {
  4657. IPW_DEBUG_ASSOC("Attempting to associate with new "
  4658. "parameters.\n");
  4659. ipw_associate(priv);
  4660. }
  4661. return 0;
  4662. }
  4663. priv->config |= CFG_STATIC_BSSID;
  4664. if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
  4665. IPW_DEBUG_WX("BSSID set to current BSSID.\n");
  4666. return 0;
  4667. }
  4668. IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
  4669. MAC_ARG(wrqu->ap_addr.sa_data));
  4670. memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
  4671. /* If we are currently associated, or trying to associate
  4672. * then see if this is a new BSSID (causing us to disassociate) */
  4673. if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  4674. IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
  4675. ipw_disassociate(priv);
  4676. } else {
  4677. ipw_associate(priv);
  4678. }
  4679. return 0;
  4680. }
  4681. static int ipw_wx_get_wap(struct net_device *dev,
  4682. struct iw_request_info *info,
  4683. union iwreq_data *wrqu, char *extra)
  4684. {
  4685. struct ipw_priv *priv = ieee80211_priv(dev);
  4686. /* If we are associated, trying to associate, or have a statically
  4687. * configured BSSID then return that; otherwise return ANY */
  4688. if (priv->config & CFG_STATIC_BSSID ||
  4689. priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  4690. wrqu->ap_addr.sa_family = ARPHRD_ETHER;
  4691. memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
  4692. } else
  4693. memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
  4694. IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
  4695. MAC_ARG(wrqu->ap_addr.sa_data));
  4696. return 0;
  4697. }
  4698. static int ipw_wx_set_essid(struct net_device *dev,
  4699. struct iw_request_info *info,
  4700. union iwreq_data *wrqu, char *extra)
  4701. {
  4702. struct ipw_priv *priv = ieee80211_priv(dev);
  4703. char *essid = ""; /* ANY */
  4704. int length = 0;
  4705. if (wrqu->essid.flags && wrqu->essid.length) {
  4706. length = wrqu->essid.length - 1;
  4707. essid = extra;
  4708. }
  4709. if (length == 0) {
  4710. IPW_DEBUG_WX("Setting ESSID to ANY\n");
  4711. priv->config &= ~CFG_STATIC_ESSID;
  4712. if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
  4713. STATUS_ASSOCIATING))) {
  4714. IPW_DEBUG_ASSOC("Attempting to associate with new "
  4715. "parameters.\n");
  4716. ipw_associate(priv);
  4717. }
  4718. return 0;
  4719. }
  4720. length = min(length, IW_ESSID_MAX_SIZE);
  4721. priv->config |= CFG_STATIC_ESSID;
  4722. if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
  4723. IPW_DEBUG_WX("ESSID set to current ESSID.\n");
  4724. return 0;
  4725. }
  4726. IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
  4727. length);
  4728. priv->essid_len = length;
  4729. memcpy(priv->essid, essid, priv->essid_len);
  4730. /* If we are currently associated, or trying to associate
  4731. * then see if this is a new ESSID (causing us to disassociate) */
  4732. if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  4733. IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
  4734. ipw_disassociate(priv);
  4735. } else {
  4736. ipw_associate(priv);
  4737. }
  4738. return 0;
  4739. }
  4740. static int ipw_wx_get_essid(struct net_device *dev,
  4741. struct iw_request_info *info,
  4742. union iwreq_data *wrqu, char *extra)
  4743. {
  4744. struct ipw_priv *priv = ieee80211_priv(dev);
  4745. /* If we are associated, trying to associate, or have a statically
  4746. * configured ESSID then return that; otherwise return ANY */
  4747. if (priv->config & CFG_STATIC_ESSID ||
  4748. priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  4749. IPW_DEBUG_WX("Getting essid: '%s'\n",
  4750. escape_essid(priv->essid, priv->essid_len));
  4751. memcpy(extra, priv->essid, priv->essid_len);
  4752. wrqu->essid.length = priv->essid_len;
  4753. wrqu->essid.flags = 1; /* active */
  4754. } else {
  4755. IPW_DEBUG_WX("Getting essid: ANY\n");
  4756. wrqu->essid.length = 0;
  4757. wrqu->essid.flags = 0; /* active */
  4758. }
  4759. return 0;
  4760. }
  4761. static int ipw_wx_set_nick(struct net_device *dev,
  4762. struct iw_request_info *info,
  4763. union iwreq_data *wrqu, char *extra)
  4764. {
  4765. struct ipw_priv *priv = ieee80211_priv(dev);
  4766. IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
  4767. if (wrqu->data.length > IW_ESSID_MAX_SIZE)
  4768. return -E2BIG;
  4769. wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
  4770. memset(priv->nick, 0, sizeof(priv->nick));
  4771. memcpy(priv->nick, extra, wrqu->data.length);
  4772. IPW_DEBUG_TRACE("<<\n");
  4773. return 0;
  4774. }
  4775. static int ipw_wx_get_nick(struct net_device *dev,
  4776. struct iw_request_info *info,
  4777. union iwreq_data *wrqu, char *extra)
  4778. {
  4779. struct ipw_priv *priv = ieee80211_priv(dev);
  4780. IPW_DEBUG_WX("Getting nick\n");
  4781. wrqu->data.length = strlen(priv->nick) + 1;
  4782. memcpy(extra, priv->nick, wrqu->data.length);
  4783. wrqu->data.flags = 1; /* active */
  4784. return 0;
  4785. }
  4786. static int ipw_wx_set_rate(struct net_device *dev,
  4787. struct iw_request_info *info,
  4788. union iwreq_data *wrqu, char *extra)
  4789. {
  4790. IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
  4791. return -EOPNOTSUPP;
  4792. }
  4793. static int ipw_wx_get_rate(struct net_device *dev,
  4794. struct iw_request_info *info,
  4795. union iwreq_data *wrqu, char *extra)
  4796. {
  4797. struct ipw_priv *priv = ieee80211_priv(dev);
  4798. wrqu->bitrate.value = priv->last_rate;
  4799. IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
  4800. return 0;
  4801. }
  4802. static int ipw_wx_set_rts(struct net_device *dev,
  4803. struct iw_request_info *info,
  4804. union iwreq_data *wrqu, char *extra)
  4805. {
  4806. struct ipw_priv *priv = ieee80211_priv(dev);
  4807. if (wrqu->rts.disabled)
  4808. priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
  4809. else {
  4810. if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
  4811. wrqu->rts.value > MAX_RTS_THRESHOLD)
  4812. return -EINVAL;
  4813. priv->rts_threshold = wrqu->rts.value;
  4814. }
  4815. ipw_send_rts_threshold(priv, priv->rts_threshold);
  4816. IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
  4817. return 0;
  4818. }
  4819. static int ipw_wx_get_rts(struct net_device *dev,
  4820. struct iw_request_info *info,
  4821. union iwreq_data *wrqu, char *extra)
  4822. {
  4823. struct ipw_priv *priv = ieee80211_priv(dev);
  4824. wrqu->rts.value = priv->rts_threshold;
  4825. wrqu->rts.fixed = 0; /* no auto select */
  4826. wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
  4827. IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
  4828. return 0;
  4829. }
  4830. static int ipw_wx_set_txpow(struct net_device *dev,
  4831. struct iw_request_info *info,
  4832. union iwreq_data *wrqu, char *extra)
  4833. {
  4834. struct ipw_priv *priv = ieee80211_priv(dev);
  4835. struct ipw_tx_power tx_power;
  4836. int i;
  4837. if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
  4838. return -EINPROGRESS;
  4839. if (wrqu->power.flags != IW_TXPOW_DBM)
  4840. return -EINVAL;
  4841. if ((wrqu->power.value > 20) || (wrqu->power.value < -12))
  4842. return -EINVAL;
  4843. priv->tx_power = wrqu->power.value;
  4844. memset(&tx_power, 0, sizeof(tx_power));
  4845. /* configure device for 'G' band */
  4846. tx_power.ieee_mode = IPW_G_MODE;
  4847. tx_power.num_channels = 11;
  4848. for (i = 0; i < 11; i++) {
  4849. tx_power.channels_tx_power[i].channel_number = i + 1;
  4850. tx_power.channels_tx_power[i].tx_power = priv->tx_power;
  4851. }
  4852. if (ipw_send_tx_power(priv, &tx_power))
  4853. goto error;
  4854. /* configure device to also handle 'B' band */
  4855. tx_power.ieee_mode = IPW_B_MODE;
  4856. if (ipw_send_tx_power(priv, &tx_power))
  4857. goto error;
  4858. return 0;
  4859. error:
  4860. return -EIO;
  4861. }
  4862. static int ipw_wx_get_txpow(struct net_device *dev,
  4863. struct iw_request_info *info,
  4864. union iwreq_data *wrqu, char *extra)
  4865. {
  4866. struct ipw_priv *priv = ieee80211_priv(dev);
  4867. wrqu->power.value = priv->tx_power;
  4868. wrqu->power.fixed = 1;
  4869. wrqu->power.flags = IW_TXPOW_DBM;
  4870. wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
  4871. IPW_DEBUG_WX("GET TX Power -> %s %d \n",
  4872. wrqu->power.disabled ? "ON" : "OFF", wrqu->power.value);
  4873. return 0;
  4874. }
  4875. static int ipw_wx_set_frag(struct net_device *dev,
  4876. struct iw_request_info *info,
  4877. union iwreq_data *wrqu, char *extra)
  4878. {
  4879. struct ipw_priv *priv = ieee80211_priv(dev);
  4880. if (wrqu->frag.disabled)
  4881. priv->ieee->fts = DEFAULT_FTS;
  4882. else {
  4883. if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
  4884. wrqu->frag.value > MAX_FRAG_THRESHOLD)
  4885. return -EINVAL;
  4886. priv->ieee->fts = wrqu->frag.value & ~0x1;
  4887. }
  4888. ipw_send_frag_threshold(priv, wrqu->frag.value);
  4889. IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
  4890. return 0;
  4891. }
  4892. static int ipw_wx_get_frag(struct net_device *dev,
  4893. struct iw_request_info *info,
  4894. union iwreq_data *wrqu, char *extra)
  4895. {
  4896. struct ipw_priv *priv = ieee80211_priv(dev);
  4897. wrqu->frag.value = priv->ieee->fts;
  4898. wrqu->frag.fixed = 0; /* no auto select */
  4899. wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
  4900. IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
  4901. return 0;
  4902. }
  4903. static int ipw_wx_set_retry(struct net_device *dev,
  4904. struct iw_request_info *info,
  4905. union iwreq_data *wrqu, char *extra)
  4906. {
  4907. IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
  4908. return -EOPNOTSUPP;
  4909. }
  4910. static int ipw_wx_get_retry(struct net_device *dev,
  4911. struct iw_request_info *info,
  4912. union iwreq_data *wrqu, char *extra)
  4913. {
  4914. IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
  4915. return -EOPNOTSUPP;
  4916. }
  4917. static int ipw_wx_set_scan(struct net_device *dev,
  4918. struct iw_request_info *info,
  4919. union iwreq_data *wrqu, char *extra)
  4920. {
  4921. struct ipw_priv *priv = ieee80211_priv(dev);
  4922. IPW_DEBUG_WX("Start scan\n");
  4923. if (ipw_request_scan(priv))
  4924. return -EIO;
  4925. return 0;
  4926. }
  4927. static int ipw_wx_get_scan(struct net_device *dev,
  4928. struct iw_request_info *info,
  4929. union iwreq_data *wrqu, char *extra)
  4930. {
  4931. struct ipw_priv *priv = ieee80211_priv(dev);
  4932. return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
  4933. }
  4934. static int ipw_wx_set_encode(struct net_device *dev,
  4935. struct iw_request_info *info,
  4936. union iwreq_data *wrqu, char *key)
  4937. {
  4938. struct ipw_priv *priv = ieee80211_priv(dev);
  4939. return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
  4940. }
  4941. static int ipw_wx_get_encode(struct net_device *dev,
  4942. struct iw_request_info *info,
  4943. union iwreq_data *wrqu, char *key)
  4944. {
  4945. struct ipw_priv *priv = ieee80211_priv(dev);
  4946. return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
  4947. }
  4948. static int ipw_wx_set_power(struct net_device *dev,
  4949. struct iw_request_info *info,
  4950. union iwreq_data *wrqu, char *extra)
  4951. {
  4952. struct ipw_priv *priv = ieee80211_priv(dev);
  4953. int err;
  4954. if (wrqu->power.disabled) {
  4955. priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
  4956. err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
  4957. if (err) {
  4958. IPW_DEBUG_WX("failed setting power mode.\n");
  4959. return err;
  4960. }
  4961. IPW_DEBUG_WX("SET Power Management Mode -> off\n");
  4962. return 0;
  4963. }
  4964. switch (wrqu->power.flags & IW_POWER_MODE) {
  4965. case IW_POWER_ON: /* If not specified */
  4966. case IW_POWER_MODE: /* If set all mask */
  4967. case IW_POWER_ALL_R: /* If explicitely state all */
  4968. break;
  4969. default: /* Otherwise we don't support it */
  4970. IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
  4971. wrqu->power.flags);
  4972. return -EOPNOTSUPP;
  4973. }
  4974. /* If the user hasn't specified a power management mode yet, default
  4975. * to BATTERY */
  4976. if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
  4977. priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
  4978. else
  4979. priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
  4980. err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
  4981. if (err) {
  4982. IPW_DEBUG_WX("failed setting power mode.\n");
  4983. return err;
  4984. }
  4985. IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
  4986. return 0;
  4987. }
  4988. static int ipw_wx_get_power(struct net_device *dev,
  4989. struct iw_request_info *info,
  4990. union iwreq_data *wrqu, char *extra)
  4991. {
  4992. struct ipw_priv *priv = ieee80211_priv(dev);
  4993. if (!(priv->power_mode & IPW_POWER_ENABLED)) {
  4994. wrqu->power.disabled = 1;
  4995. } else {
  4996. wrqu->power.disabled = 0;
  4997. }
  4998. IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
  4999. return 0;
  5000. }
  5001. static int ipw_wx_set_powermode(struct net_device *dev,
  5002. struct iw_request_info *info,
  5003. union iwreq_data *wrqu, char *extra)
  5004. {
  5005. struct ipw_priv *priv = ieee80211_priv(dev);
  5006. int mode = *(int *)extra;
  5007. int err;
  5008. if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
  5009. mode = IPW_POWER_AC;
  5010. priv->power_mode = mode;
  5011. } else {
  5012. priv->power_mode = IPW_POWER_ENABLED | mode;
  5013. }
  5014. if (priv->power_mode != mode) {
  5015. err = ipw_send_power_mode(priv, mode);
  5016. if (err) {
  5017. IPW_DEBUG_WX("failed setting power mode.\n");
  5018. return err;
  5019. }
  5020. }
  5021. return 0;
  5022. }
  5023. #define MAX_WX_STRING 80
  5024. static int ipw_wx_get_powermode(struct net_device *dev,
  5025. struct iw_request_info *info,
  5026. union iwreq_data *wrqu, char *extra)
  5027. {
  5028. struct ipw_priv *priv = ieee80211_priv(dev);
  5029. int level = IPW_POWER_LEVEL(priv->power_mode);
  5030. char *p = extra;
  5031. p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
  5032. switch (level) {
  5033. case IPW_POWER_AC:
  5034. p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
  5035. break;
  5036. case IPW_POWER_BATTERY:
  5037. p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
  5038. break;
  5039. default:
  5040. p += snprintf(p, MAX_WX_STRING - (p - extra),
  5041. "(Timeout %dms, Period %dms)",
  5042. timeout_duration[level - 1] / 1000,
  5043. period_duration[level - 1] / 1000);
  5044. }
  5045. if (!(priv->power_mode & IPW_POWER_ENABLED))
  5046. p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
  5047. wrqu->data.length = p - extra + 1;
  5048. return 0;
  5049. }
  5050. static int ipw_wx_set_wireless_mode(struct net_device *dev,
  5051. struct iw_request_info *info,
  5052. union iwreq_data *wrqu, char *extra)
  5053. {
  5054. struct ipw_priv *priv = ieee80211_priv(dev);
  5055. int mode = *(int *)extra;
  5056. u8 band = 0, modulation = 0;
  5057. if (mode == 0 || mode & ~IEEE_MODE_MASK) {
  5058. IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
  5059. return -EINVAL;
  5060. }
  5061. if (priv->adapter == IPW_2915ABG) {
  5062. priv->ieee->abg_ture = 1;
  5063. if (mode & IEEE_A) {
  5064. band |= IEEE80211_52GHZ_BAND;
  5065. modulation |= IEEE80211_OFDM_MODULATION;
  5066. } else
  5067. priv->ieee->abg_ture = 0;
  5068. } else {
  5069. if (mode & IEEE_A) {
  5070. IPW_WARNING("Attempt to set 2200BG into "
  5071. "802.11a mode\n");
  5072. return -EINVAL;
  5073. }
  5074. priv->ieee->abg_ture = 0;
  5075. }
  5076. if (mode & IEEE_B) {
  5077. band |= IEEE80211_24GHZ_BAND;
  5078. modulation |= IEEE80211_CCK_MODULATION;
  5079. } else
  5080. priv->ieee->abg_ture = 0;
  5081. if (mode & IEEE_G) {
  5082. band |= IEEE80211_24GHZ_BAND;
  5083. modulation |= IEEE80211_OFDM_MODULATION;
  5084. } else
  5085. priv->ieee->abg_ture = 0;
  5086. priv->ieee->mode = mode;
  5087. priv->ieee->freq_band = band;
  5088. priv->ieee->modulation = modulation;
  5089. init_supported_rates(priv, &priv->rates);
  5090. /* If we are currently associated, or trying to associate
  5091. * then see if this is a new configuration (causing us to
  5092. * disassociate) */
  5093. if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  5094. /* The resulting association will trigger
  5095. * the new rates to be sent to the device */
  5096. IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
  5097. ipw_disassociate(priv);
  5098. } else
  5099. ipw_send_supported_rates(priv, &priv->rates);
  5100. IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
  5101. mode & IEEE_A ? 'a' : '.',
  5102. mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
  5103. return 0;
  5104. }
  5105. static int ipw_wx_get_wireless_mode(struct net_device *dev,
  5106. struct iw_request_info *info,
  5107. union iwreq_data *wrqu, char *extra)
  5108. {
  5109. struct ipw_priv *priv = ieee80211_priv(dev);
  5110. switch (priv->ieee->freq_band) {
  5111. case IEEE80211_24GHZ_BAND:
  5112. switch (priv->ieee->modulation) {
  5113. case IEEE80211_CCK_MODULATION:
  5114. strncpy(extra, "802.11b (2)", MAX_WX_STRING);
  5115. break;
  5116. case IEEE80211_OFDM_MODULATION:
  5117. strncpy(extra, "802.11g (4)", MAX_WX_STRING);
  5118. break;
  5119. default:
  5120. strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
  5121. break;
  5122. }
  5123. break;
  5124. case IEEE80211_52GHZ_BAND:
  5125. strncpy(extra, "802.11a (1)", MAX_WX_STRING);
  5126. break;
  5127. default: /* Mixed Band */
  5128. switch (priv->ieee->modulation) {
  5129. case IEEE80211_CCK_MODULATION:
  5130. strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
  5131. break;
  5132. case IEEE80211_OFDM_MODULATION:
  5133. strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
  5134. break;
  5135. default:
  5136. strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
  5137. break;
  5138. }
  5139. break;
  5140. }
  5141. IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
  5142. wrqu->data.length = strlen(extra) + 1;
  5143. return 0;
  5144. }
  5145. #ifdef CONFIG_IPW_PROMISC
  5146. static int ipw_wx_set_promisc(struct net_device *dev,
  5147. struct iw_request_info *info,
  5148. union iwreq_data *wrqu, char *extra)
  5149. {
  5150. struct ipw_priv *priv = ieee80211_priv(dev);
  5151. int *parms = (int *)extra;
  5152. int enable = (parms[0] > 0);
  5153. IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
  5154. if (enable) {
  5155. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  5156. priv->net_dev->type = ARPHRD_IEEE80211;
  5157. ipw_adapter_restart(priv);
  5158. }
  5159. ipw_set_channel(priv, parms[1]);
  5160. } else {
  5161. if (priv->ieee->iw_mode != IW_MODE_MONITOR)
  5162. return 0;
  5163. priv->net_dev->type = ARPHRD_ETHER;
  5164. ipw_adapter_restart(priv);
  5165. }
  5166. return 0;
  5167. }
  5168. static int ipw_wx_reset(struct net_device *dev,
  5169. struct iw_request_info *info,
  5170. union iwreq_data *wrqu, char *extra)
  5171. {
  5172. struct ipw_priv *priv = ieee80211_priv(dev);
  5173. IPW_DEBUG_WX("RESET\n");
  5174. ipw_adapter_restart(priv);
  5175. return 0;
  5176. }
  5177. #endif // CONFIG_IPW_PROMISC
  5178. /* Rebase the WE IOCTLs to zero for the handler array */
  5179. #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
  5180. static iw_handler ipw_wx_handlers[] = {
  5181. IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
  5182. IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
  5183. IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
  5184. IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
  5185. IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
  5186. IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
  5187. IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
  5188. IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
  5189. IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
  5190. IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
  5191. IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
  5192. IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
  5193. IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
  5194. IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
  5195. IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
  5196. IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
  5197. IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
  5198. IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
  5199. IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
  5200. IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
  5201. IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
  5202. IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
  5203. IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
  5204. IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
  5205. IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
  5206. IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
  5207. IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
  5208. IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
  5209. };
  5210. #define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
  5211. #define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
  5212. #define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
  5213. #define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
  5214. #define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
  5215. #define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
  5216. static struct iw_priv_args ipw_priv_args[] = {
  5217. {
  5218. .cmd = IPW_PRIV_SET_POWER,
  5219. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  5220. .name = "set_power"},
  5221. {
  5222. .cmd = IPW_PRIV_GET_POWER,
  5223. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
  5224. .name = "get_power"},
  5225. {
  5226. .cmd = IPW_PRIV_SET_MODE,
  5227. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  5228. .name = "set_mode"},
  5229. {
  5230. .cmd = IPW_PRIV_GET_MODE,
  5231. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
  5232. .name = "get_mode"},
  5233. #ifdef CONFIG_IPW_PROMISC
  5234. {
  5235. IPW_PRIV_SET_PROMISC,
  5236. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
  5237. {
  5238. IPW_PRIV_RESET,
  5239. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
  5240. #endif /* CONFIG_IPW_PROMISC */
  5241. };
  5242. static iw_handler ipw_priv_handler[] = {
  5243. ipw_wx_set_powermode,
  5244. ipw_wx_get_powermode,
  5245. ipw_wx_set_wireless_mode,
  5246. ipw_wx_get_wireless_mode,
  5247. #ifdef CONFIG_IPW_PROMISC
  5248. ipw_wx_set_promisc,
  5249. ipw_wx_reset,
  5250. #endif
  5251. };
  5252. static struct iw_handler_def ipw_wx_handler_def = {
  5253. .standard = ipw_wx_handlers,
  5254. .num_standard = ARRAY_SIZE(ipw_wx_handlers),
  5255. .num_private = ARRAY_SIZE(ipw_priv_handler),
  5256. .num_private_args = ARRAY_SIZE(ipw_priv_args),
  5257. .private = ipw_priv_handler,
  5258. .private_args = ipw_priv_args,
  5259. };
  5260. /*
  5261. * Get wireless statistics.
  5262. * Called by /proc/net/wireless
  5263. * Also called by SIOCGIWSTATS
  5264. */
  5265. static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
  5266. {
  5267. struct ipw_priv *priv = ieee80211_priv(dev);
  5268. struct iw_statistics *wstats;
  5269. wstats = &priv->wstats;
  5270. /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
  5271. * ipw2100_wx_wireless_stats seems to be called before fw is
  5272. * initialized. STATUS_ASSOCIATED will only be set if the hw is up
  5273. * and associated; if not associcated, the values are all meaningless
  5274. * anyway, so set them all to NULL and INVALID */
  5275. if (!(priv->status & STATUS_ASSOCIATED)) {
  5276. wstats->miss.beacon = 0;
  5277. wstats->discard.retries = 0;
  5278. wstats->qual.qual = 0;
  5279. wstats->qual.level = 0;
  5280. wstats->qual.noise = 0;
  5281. wstats->qual.updated = 7;
  5282. wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
  5283. IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
  5284. return wstats;
  5285. }
  5286. wstats->qual.qual = priv->quality;
  5287. wstats->qual.level = average_value(&priv->average_rssi);
  5288. wstats->qual.noise = average_value(&priv->average_noise);
  5289. wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
  5290. IW_QUAL_NOISE_UPDATED;
  5291. wstats->miss.beacon = average_value(&priv->average_missed_beacons);
  5292. wstats->discard.retries = priv->last_tx_failures;
  5293. wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
  5294. /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
  5295. goto fail_get_ordinal;
  5296. wstats->discard.retries += tx_retry; */
  5297. return wstats;
  5298. }
  5299. /* net device stuff */
  5300. static inline void init_sys_config(struct ipw_sys_config *sys_config)
  5301. {
  5302. memset(sys_config, 0, sizeof(struct ipw_sys_config));
  5303. sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
  5304. sys_config->answer_broadcast_ssid_probe = 0;
  5305. sys_config->accept_all_data_frames = 0;
  5306. sys_config->accept_non_directed_frames = 1;
  5307. sys_config->exclude_unicast_unencrypted = 0;
  5308. sys_config->disable_unicast_decryption = 1;
  5309. sys_config->exclude_multicast_unencrypted = 0;
  5310. sys_config->disable_multicast_decryption = 1;
  5311. sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
  5312. sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
  5313. sys_config->dot11g_auto_detection = 0;
  5314. sys_config->enable_cts_to_self = 0;
  5315. sys_config->bt_coexist_collision_thr = 0;
  5316. sys_config->pass_noise_stats_to_host = 1;
  5317. }
  5318. static int ipw_net_open(struct net_device *dev)
  5319. {
  5320. struct ipw_priv *priv = ieee80211_priv(dev);
  5321. IPW_DEBUG_INFO("dev->open\n");
  5322. /* we should be verifying the device is ready to be opened */
  5323. if (!(priv->status & STATUS_RF_KILL_MASK) &&
  5324. (priv->status & STATUS_ASSOCIATED))
  5325. netif_start_queue(dev);
  5326. return 0;
  5327. }
  5328. static int ipw_net_stop(struct net_device *dev)
  5329. {
  5330. IPW_DEBUG_INFO("dev->close\n");
  5331. netif_stop_queue(dev);
  5332. return 0;
  5333. }
  5334. /*
  5335. todo:
  5336. modify to send one tfd per fragment instead of using chunking. otherwise
  5337. we need to heavily modify the ieee80211_skb_to_txb.
  5338. */
  5339. static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
  5340. {
  5341. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
  5342. txb->fragments[0]->data;
  5343. int i = 0;
  5344. struct tfd_frame *tfd;
  5345. struct clx2_tx_queue *txq = &priv->txq[0];
  5346. struct clx2_queue *q = &txq->q;
  5347. u8 id, hdr_len, unicast;
  5348. u16 remaining_bytes;
  5349. switch (priv->ieee->iw_mode) {
  5350. case IW_MODE_ADHOC:
  5351. hdr_len = IEEE80211_3ADDR_LEN;
  5352. unicast = !is_broadcast_ether_addr(hdr->addr1) &&
  5353. !is_multicast_ether_addr(hdr->addr1);
  5354. id = ipw_find_station(priv, hdr->addr1);
  5355. if (id == IPW_INVALID_STATION) {
  5356. id = ipw_add_station(priv, hdr->addr1);
  5357. if (id == IPW_INVALID_STATION) {
  5358. IPW_WARNING("Attempt to send data to "
  5359. "invalid cell: " MAC_FMT "\n",
  5360. MAC_ARG(hdr->addr1));
  5361. goto drop;
  5362. }
  5363. }
  5364. break;
  5365. case IW_MODE_INFRA:
  5366. default:
  5367. unicast = !is_broadcast_ether_addr(hdr->addr3) &&
  5368. !is_multicast_ether_addr(hdr->addr3);
  5369. hdr_len = IEEE80211_3ADDR_LEN;
  5370. id = 0;
  5371. break;
  5372. }
  5373. tfd = &txq->bd[q->first_empty];
  5374. txq->txb[q->first_empty] = txb;
  5375. memset(tfd, 0, sizeof(*tfd));
  5376. tfd->u.data.station_number = id;
  5377. tfd->control_flags.message_type = TX_FRAME_TYPE;
  5378. tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
  5379. tfd->u.data.cmd_id = DINO_CMD_TX;
  5380. tfd->u.data.len = txb->payload_size;
  5381. remaining_bytes = txb->payload_size;
  5382. if (unlikely(!unicast))
  5383. tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
  5384. else
  5385. tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
  5386. if (priv->assoc_request.ieee_mode == IPW_B_MODE)
  5387. tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
  5388. else
  5389. tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
  5390. if (priv->config & CFG_PREAMBLE)
  5391. tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
  5392. memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
  5393. /* payload */
  5394. tfd->u.data.num_chunks = min((u8) (NUM_TFD_CHUNKS - 2), txb->nr_frags);
  5395. for (i = 0; i < tfd->u.data.num_chunks; i++) {
  5396. IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
  5397. i, tfd->u.data.num_chunks,
  5398. txb->fragments[i]->len - hdr_len);
  5399. printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
  5400. txb->fragments[i]->len - hdr_len);
  5401. tfd->u.data.chunk_ptr[i] =
  5402. pci_map_single(priv->pci_dev,
  5403. txb->fragments[i]->data + hdr_len,
  5404. txb->fragments[i]->len - hdr_len,
  5405. PCI_DMA_TODEVICE);
  5406. tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
  5407. }
  5408. if (i != txb->nr_frags) {
  5409. struct sk_buff *skb;
  5410. u16 remaining_bytes = 0;
  5411. int j;
  5412. for (j = i; j < txb->nr_frags; j++)
  5413. remaining_bytes += txb->fragments[j]->len - hdr_len;
  5414. printk(KERN_INFO "Trying to reallocate for %d bytes\n",
  5415. remaining_bytes);
  5416. skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
  5417. if (skb != NULL) {
  5418. tfd->u.data.chunk_len[i] = remaining_bytes;
  5419. for (j = i; j < txb->nr_frags; j++) {
  5420. int size = txb->fragments[j]->len - hdr_len;
  5421. printk(KERN_INFO "Adding frag %d %d...\n",
  5422. j, size);
  5423. memcpy(skb_put(skb, size),
  5424. txb->fragments[j]->data + hdr_len, size);
  5425. }
  5426. dev_kfree_skb_any(txb->fragments[i]);
  5427. txb->fragments[i] = skb;
  5428. tfd->u.data.chunk_ptr[i] =
  5429. pci_map_single(priv->pci_dev, skb->data,
  5430. tfd->u.data.chunk_len[i],
  5431. PCI_DMA_TODEVICE);
  5432. tfd->u.data.num_chunks++;
  5433. }
  5434. }
  5435. /* kick DMA */
  5436. q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
  5437. ipw_write32(priv, q->reg_w, q->first_empty);
  5438. if (ipw_queue_space(q) < q->high_mark)
  5439. netif_stop_queue(priv->net_dev);
  5440. return;
  5441. drop:
  5442. IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
  5443. ieee80211_txb_free(txb);
  5444. }
  5445. static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
  5446. struct net_device *dev)
  5447. {
  5448. struct ipw_priv *priv = ieee80211_priv(dev);
  5449. unsigned long flags;
  5450. IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
  5451. spin_lock_irqsave(&priv->lock, flags);
  5452. if (!(priv->status & STATUS_ASSOCIATED)) {
  5453. IPW_DEBUG_INFO("Tx attempt while not associated.\n");
  5454. priv->ieee->stats.tx_carrier_errors++;
  5455. netif_stop_queue(dev);
  5456. goto fail_unlock;
  5457. }
  5458. ipw_tx_skb(priv, txb);
  5459. spin_unlock_irqrestore(&priv->lock, flags);
  5460. return 0;
  5461. fail_unlock:
  5462. spin_unlock_irqrestore(&priv->lock, flags);
  5463. return 1;
  5464. }
  5465. static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
  5466. {
  5467. struct ipw_priv *priv = ieee80211_priv(dev);
  5468. priv->ieee->stats.tx_packets = priv->tx_packets;
  5469. priv->ieee->stats.rx_packets = priv->rx_packets;
  5470. return &priv->ieee->stats;
  5471. }
  5472. static void ipw_net_set_multicast_list(struct net_device *dev)
  5473. {
  5474. }
  5475. static int ipw_net_set_mac_address(struct net_device *dev, void *p)
  5476. {
  5477. struct ipw_priv *priv = ieee80211_priv(dev);
  5478. struct sockaddr *addr = p;
  5479. if (!is_valid_ether_addr(addr->sa_data))
  5480. return -EADDRNOTAVAIL;
  5481. priv->config |= CFG_CUSTOM_MAC;
  5482. memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
  5483. printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
  5484. priv->net_dev->name, MAC_ARG(priv->mac_addr));
  5485. ipw_adapter_restart(priv);
  5486. return 0;
  5487. }
  5488. static void ipw_ethtool_get_drvinfo(struct net_device *dev,
  5489. struct ethtool_drvinfo *info)
  5490. {
  5491. struct ipw_priv *p = ieee80211_priv(dev);
  5492. char vers[64];
  5493. char date[32];
  5494. u32 len;
  5495. strcpy(info->driver, DRV_NAME);
  5496. strcpy(info->version, DRV_VERSION);
  5497. len = sizeof(vers);
  5498. ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
  5499. len = sizeof(date);
  5500. ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
  5501. snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
  5502. vers, date);
  5503. strcpy(info->bus_info, pci_name(p->pci_dev));
  5504. info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
  5505. }
  5506. static u32 ipw_ethtool_get_link(struct net_device *dev)
  5507. {
  5508. struct ipw_priv *priv = ieee80211_priv(dev);
  5509. return (priv->status & STATUS_ASSOCIATED) != 0;
  5510. }
  5511. static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
  5512. {
  5513. return CX2_EEPROM_IMAGE_SIZE;
  5514. }
  5515. static int ipw_ethtool_get_eeprom(struct net_device *dev,
  5516. struct ethtool_eeprom *eeprom, u8 * bytes)
  5517. {
  5518. struct ipw_priv *p = ieee80211_priv(dev);
  5519. if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
  5520. return -EINVAL;
  5521. memcpy(bytes, &((u8 *) p->eeprom)[eeprom->offset], eeprom->len);
  5522. return 0;
  5523. }
  5524. static int ipw_ethtool_set_eeprom(struct net_device *dev,
  5525. struct ethtool_eeprom *eeprom, u8 * bytes)
  5526. {
  5527. struct ipw_priv *p = ieee80211_priv(dev);
  5528. int i;
  5529. if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
  5530. return -EINVAL;
  5531. memcpy(&((u8 *) p->eeprom)[eeprom->offset], bytes, eeprom->len);
  5532. for (i = IPW_EEPROM_DATA;
  5533. i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE; i++)
  5534. ipw_write8(p, i, p->eeprom[i]);
  5535. return 0;
  5536. }
  5537. static struct ethtool_ops ipw_ethtool_ops = {
  5538. .get_link = ipw_ethtool_get_link,
  5539. .get_drvinfo = ipw_ethtool_get_drvinfo,
  5540. .get_eeprom_len = ipw_ethtool_get_eeprom_len,
  5541. .get_eeprom = ipw_ethtool_get_eeprom,
  5542. .set_eeprom = ipw_ethtool_set_eeprom,
  5543. };
  5544. static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
  5545. {
  5546. struct ipw_priv *priv = data;
  5547. u32 inta, inta_mask;
  5548. if (!priv)
  5549. return IRQ_NONE;
  5550. spin_lock(&priv->lock);
  5551. if (!(priv->status & STATUS_INT_ENABLED)) {
  5552. /* Shared IRQ */
  5553. goto none;
  5554. }
  5555. inta = ipw_read32(priv, CX2_INTA_RW);
  5556. inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
  5557. if (inta == 0xFFFFFFFF) {
  5558. /* Hardware disappeared */
  5559. IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
  5560. goto none;
  5561. }
  5562. if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
  5563. /* Shared interrupt */
  5564. goto none;
  5565. }
  5566. /* tell the device to stop sending interrupts */
  5567. ipw_disable_interrupts(priv);
  5568. /* ack current interrupts */
  5569. inta &= (CX2_INTA_MASK_ALL & inta_mask);
  5570. ipw_write32(priv, CX2_INTA_RW, inta);
  5571. /* Cache INTA value for our tasklet */
  5572. priv->isr_inta = inta;
  5573. tasklet_schedule(&priv->irq_tasklet);
  5574. spin_unlock(&priv->lock);
  5575. return IRQ_HANDLED;
  5576. none:
  5577. spin_unlock(&priv->lock);
  5578. return IRQ_NONE;
  5579. }
  5580. static void ipw_rf_kill(void *adapter)
  5581. {
  5582. struct ipw_priv *priv = adapter;
  5583. unsigned long flags;
  5584. spin_lock_irqsave(&priv->lock, flags);
  5585. if (rf_kill_active(priv)) {
  5586. IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
  5587. if (priv->workqueue)
  5588. queue_delayed_work(priv->workqueue,
  5589. &priv->rf_kill, 2 * HZ);
  5590. goto exit_unlock;
  5591. }
  5592. /* RF Kill is now disabled, so bring the device back up */
  5593. if (!(priv->status & STATUS_RF_KILL_MASK)) {
  5594. IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
  5595. "device\n");
  5596. /* we can not do an adapter restart while inside an irq lock */
  5597. queue_work(priv->workqueue, &priv->adapter_restart);
  5598. } else
  5599. IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
  5600. "enabled\n");
  5601. exit_unlock:
  5602. spin_unlock_irqrestore(&priv->lock, flags);
  5603. }
  5604. static int ipw_setup_deferred_work(struct ipw_priv *priv)
  5605. {
  5606. int ret = 0;
  5607. priv->workqueue = create_workqueue(DRV_NAME);
  5608. init_waitqueue_head(&priv->wait_command_queue);
  5609. INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
  5610. INIT_WORK(&priv->associate, ipw_associate, priv);
  5611. INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
  5612. INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
  5613. INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
  5614. INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
  5615. INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
  5616. INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
  5617. INIT_WORK(&priv->request_scan,
  5618. (void (*)(void *))ipw_request_scan, priv);
  5619. INIT_WORK(&priv->gather_stats,
  5620. (void (*)(void *))ipw_gather_stats, priv);
  5621. INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
  5622. INIT_WORK(&priv->roam, ipw_roam, priv);
  5623. INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
  5624. tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
  5625. ipw_irq_tasklet, (unsigned long)priv);
  5626. return ret;
  5627. }
  5628. static void shim__set_security(struct net_device *dev,
  5629. struct ieee80211_security *sec)
  5630. {
  5631. struct ipw_priv *priv = ieee80211_priv(dev);
  5632. int i;
  5633. for (i = 0; i < 4; i++) {
  5634. if (sec->flags & (1 << i)) {
  5635. priv->sec.key_sizes[i] = sec->key_sizes[i];
  5636. if (sec->key_sizes[i] == 0)
  5637. priv->sec.flags &= ~(1 << i);
  5638. else
  5639. memcpy(priv->sec.keys[i], sec->keys[i],
  5640. sec->key_sizes[i]);
  5641. priv->sec.flags |= (1 << i);
  5642. priv->status |= STATUS_SECURITY_UPDATED;
  5643. }
  5644. }
  5645. if ((sec->flags & SEC_ACTIVE_KEY) &&
  5646. priv->sec.active_key != sec->active_key) {
  5647. if (sec->active_key <= 3) {
  5648. priv->sec.active_key = sec->active_key;
  5649. priv->sec.flags |= SEC_ACTIVE_KEY;
  5650. } else
  5651. priv->sec.flags &= ~SEC_ACTIVE_KEY;
  5652. priv->status |= STATUS_SECURITY_UPDATED;
  5653. }
  5654. if ((sec->flags & SEC_AUTH_MODE) &&
  5655. (priv->sec.auth_mode != sec->auth_mode)) {
  5656. priv->sec.auth_mode = sec->auth_mode;
  5657. priv->sec.flags |= SEC_AUTH_MODE;
  5658. if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
  5659. priv->capability |= CAP_SHARED_KEY;
  5660. else
  5661. priv->capability &= ~CAP_SHARED_KEY;
  5662. priv->status |= STATUS_SECURITY_UPDATED;
  5663. }
  5664. if (sec->flags & SEC_ENABLED && priv->sec.enabled != sec->enabled) {
  5665. priv->sec.flags |= SEC_ENABLED;
  5666. priv->sec.enabled = sec->enabled;
  5667. priv->status |= STATUS_SECURITY_UPDATED;
  5668. if (sec->enabled)
  5669. priv->capability |= CAP_PRIVACY_ON;
  5670. else
  5671. priv->capability &= ~CAP_PRIVACY_ON;
  5672. }
  5673. if (sec->flags & SEC_LEVEL && priv->sec.level != sec->level) {
  5674. priv->sec.level = sec->level;
  5675. priv->sec.flags |= SEC_LEVEL;
  5676. priv->status |= STATUS_SECURITY_UPDATED;
  5677. }
  5678. /* To match current functionality of ipw2100 (which works well w/
  5679. * various supplicants, we don't force a disassociate if the
  5680. * privacy capability changes ... */
  5681. #if 0
  5682. if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
  5683. (((priv->assoc_request.capability &
  5684. WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
  5685. (!(priv->assoc_request.capability &
  5686. WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
  5687. IPW_DEBUG_ASSOC("Disassociating due to capability "
  5688. "change.\n");
  5689. ipw_disassociate(priv);
  5690. }
  5691. #endif
  5692. }
  5693. static int init_supported_rates(struct ipw_priv *priv,
  5694. struct ipw_supported_rates *rates)
  5695. {
  5696. /* TODO: Mask out rates based on priv->rates_mask */
  5697. memset(rates, 0, sizeof(*rates));
  5698. /* configure supported rates */
  5699. switch (priv->ieee->freq_band) {
  5700. case IEEE80211_52GHZ_BAND:
  5701. rates->ieee_mode = IPW_A_MODE;
  5702. rates->purpose = IPW_RATE_CAPABILITIES;
  5703. ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
  5704. IEEE80211_OFDM_DEFAULT_RATES_MASK);
  5705. break;
  5706. default: /* Mixed or 2.4Ghz */
  5707. rates->ieee_mode = IPW_G_MODE;
  5708. rates->purpose = IPW_RATE_CAPABILITIES;
  5709. ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
  5710. IEEE80211_CCK_DEFAULT_RATES_MASK);
  5711. if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
  5712. ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
  5713. IEEE80211_OFDM_DEFAULT_RATES_MASK);
  5714. }
  5715. break;
  5716. }
  5717. return 0;
  5718. }
  5719. static int ipw_config(struct ipw_priv *priv)
  5720. {
  5721. int i;
  5722. struct ipw_tx_power tx_power;
  5723. memset(&priv->sys_config, 0, sizeof(priv->sys_config));
  5724. memset(&tx_power, 0, sizeof(tx_power));
  5725. /* This is only called from ipw_up, which resets/reloads the firmware
  5726. so, we don't need to first disable the card before we configure
  5727. it */
  5728. /* configure device for 'G' band */
  5729. tx_power.ieee_mode = IPW_G_MODE;
  5730. tx_power.num_channels = 11;
  5731. for (i = 0; i < 11; i++) {
  5732. tx_power.channels_tx_power[i].channel_number = i + 1;
  5733. tx_power.channels_tx_power[i].tx_power = priv->tx_power;
  5734. }
  5735. if (ipw_send_tx_power(priv, &tx_power))
  5736. goto error;
  5737. /* configure device to also handle 'B' band */
  5738. tx_power.ieee_mode = IPW_B_MODE;
  5739. if (ipw_send_tx_power(priv, &tx_power))
  5740. goto error;
  5741. /* initialize adapter address */
  5742. if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
  5743. goto error;
  5744. /* set basic system config settings */
  5745. init_sys_config(&priv->sys_config);
  5746. if (ipw_send_system_config(priv, &priv->sys_config))
  5747. goto error;
  5748. init_supported_rates(priv, &priv->rates);
  5749. if (ipw_send_supported_rates(priv, &priv->rates))
  5750. goto error;
  5751. /* Set request-to-send threshold */
  5752. if (priv->rts_threshold) {
  5753. if (ipw_send_rts_threshold(priv, priv->rts_threshold))
  5754. goto error;
  5755. }
  5756. if (ipw_set_random_seed(priv))
  5757. goto error;
  5758. /* final state transition to the RUN state */
  5759. if (ipw_send_host_complete(priv))
  5760. goto error;
  5761. /* If configured to try and auto-associate, kick off a scan */
  5762. if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
  5763. goto error;
  5764. return 0;
  5765. error:
  5766. return -EIO;
  5767. }
  5768. #define MAX_HW_RESTARTS 5
  5769. static int ipw_up(struct ipw_priv *priv)
  5770. {
  5771. int rc, i;
  5772. if (priv->status & STATUS_EXIT_PENDING)
  5773. return -EIO;
  5774. for (i = 0; i < MAX_HW_RESTARTS; i++) {
  5775. /* Load the microcode, firmware, and eeprom.
  5776. * Also start the clocks. */
  5777. rc = ipw_load(priv);
  5778. if (rc) {
  5779. IPW_ERROR("Unable to load firmware: 0x%08X\n", rc);
  5780. return rc;
  5781. }
  5782. ipw_init_ordinals(priv);
  5783. if (!(priv->config & CFG_CUSTOM_MAC))
  5784. eeprom_parse_mac(priv, priv->mac_addr);
  5785. memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
  5786. if (priv->status & STATUS_RF_KILL_MASK)
  5787. return 0;
  5788. rc = ipw_config(priv);
  5789. if (!rc) {
  5790. IPW_DEBUG_INFO("Configured device on count %i\n", i);
  5791. priv->notif_missed_beacons = 0;
  5792. netif_start_queue(priv->net_dev);
  5793. return 0;
  5794. } else {
  5795. IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
  5796. rc);
  5797. }
  5798. IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
  5799. i, MAX_HW_RESTARTS);
  5800. /* We had an error bringing up the hardware, so take it
  5801. * all the way back down so we can try again */
  5802. ipw_down(priv);
  5803. }
  5804. /* tried to restart and config the device for as long as our
  5805. * patience could withstand */
  5806. IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
  5807. return -EIO;
  5808. }
  5809. static void ipw_down(struct ipw_priv *priv)
  5810. {
  5811. /* Attempt to disable the card */
  5812. #if 0
  5813. ipw_send_card_disable(priv, 0);
  5814. #endif
  5815. /* tell the device to stop sending interrupts */
  5816. ipw_disable_interrupts(priv);
  5817. /* Clear all bits but the RF Kill */
  5818. priv->status &= STATUS_RF_KILL_MASK;
  5819. netif_carrier_off(priv->net_dev);
  5820. netif_stop_queue(priv->net_dev);
  5821. ipw_stop_nic(priv);
  5822. }
  5823. /* Called by register_netdev() */
  5824. static int ipw_net_init(struct net_device *dev)
  5825. {
  5826. struct ipw_priv *priv = ieee80211_priv(dev);
  5827. if (priv->status & STATUS_RF_KILL_SW) {
  5828. IPW_WARNING("Radio disabled by module parameter.\n");
  5829. return 0;
  5830. } else if (rf_kill_active(priv)) {
  5831. IPW_WARNING("Radio Frequency Kill Switch is On:\n"
  5832. "Kill switch must be turned off for "
  5833. "wireless networking to work.\n");
  5834. queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
  5835. return 0;
  5836. }
  5837. if (ipw_up(priv))
  5838. return -EIO;
  5839. return 0;
  5840. }
  5841. /* PCI driver stuff */
  5842. static struct pci_device_id card_ids[] = {
  5843. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
  5844. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
  5845. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
  5846. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
  5847. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
  5848. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
  5849. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
  5850. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
  5851. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
  5852. {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
  5853. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
  5854. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
  5855. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
  5856. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
  5857. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
  5858. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
  5859. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
  5860. {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  5861. {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
  5862. {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
  5863. {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
  5864. {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
  5865. /* required last entry */
  5866. {0,}
  5867. };
  5868. MODULE_DEVICE_TABLE(pci, card_ids);
  5869. static struct attribute *ipw_sysfs_entries[] = {
  5870. &dev_attr_rf_kill.attr,
  5871. &dev_attr_direct_dword.attr,
  5872. &dev_attr_indirect_byte.attr,
  5873. &dev_attr_indirect_dword.attr,
  5874. &dev_attr_mem_gpio_reg.attr,
  5875. &dev_attr_command_event_reg.attr,
  5876. &dev_attr_nic_type.attr,
  5877. &dev_attr_status.attr,
  5878. &dev_attr_cfg.attr,
  5879. &dev_attr_dump_errors.attr,
  5880. &dev_attr_dump_events.attr,
  5881. &dev_attr_eeprom_delay.attr,
  5882. &dev_attr_ucode_version.attr,
  5883. &dev_attr_rtc.attr,
  5884. NULL
  5885. };
  5886. static struct attribute_group ipw_attribute_group = {
  5887. .name = NULL, /* put in device directory */
  5888. .attrs = ipw_sysfs_entries,
  5889. };
  5890. static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  5891. {
  5892. int err = 0;
  5893. struct net_device *net_dev;
  5894. void __iomem *base;
  5895. u32 length, val;
  5896. struct ipw_priv *priv;
  5897. int band, modulation;
  5898. net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
  5899. if (net_dev == NULL) {
  5900. err = -ENOMEM;
  5901. goto out;
  5902. }
  5903. priv = ieee80211_priv(net_dev);
  5904. priv->ieee = netdev_priv(net_dev);
  5905. priv->net_dev = net_dev;
  5906. priv->pci_dev = pdev;
  5907. #ifdef CONFIG_IPW_DEBUG
  5908. ipw_debug_level = debug;
  5909. #endif
  5910. spin_lock_init(&priv->lock);
  5911. if (pci_enable_device(pdev)) {
  5912. err = -ENODEV;
  5913. goto out_free_ieee80211;
  5914. }
  5915. pci_set_master(pdev);
  5916. err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  5917. if (!err)
  5918. err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  5919. if (err) {
  5920. printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
  5921. goto out_pci_disable_device;
  5922. }
  5923. pci_set_drvdata(pdev, priv);
  5924. err = pci_request_regions(pdev, DRV_NAME);
  5925. if (err)
  5926. goto out_pci_disable_device;
  5927. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  5928. * PCI Tx retries from interfering with C3 CPU state */
  5929. pci_read_config_dword(pdev, 0x40, &val);
  5930. if ((val & 0x0000ff00) != 0)
  5931. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  5932. length = pci_resource_len(pdev, 0);
  5933. priv->hw_len = length;
  5934. base = ioremap_nocache(pci_resource_start(pdev, 0), length);
  5935. if (!base) {
  5936. err = -ENODEV;
  5937. goto out_pci_release_regions;
  5938. }
  5939. priv->hw_base = base;
  5940. IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
  5941. IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
  5942. err = ipw_setup_deferred_work(priv);
  5943. if (err) {
  5944. IPW_ERROR("Unable to setup deferred work\n");
  5945. goto out_iounmap;
  5946. }
  5947. /* Initialize module parameter values here */
  5948. if (ifname)
  5949. strncpy(net_dev->name, ifname, IFNAMSIZ);
  5950. if (associate)
  5951. priv->config |= CFG_ASSOCIATE;
  5952. else
  5953. IPW_DEBUG_INFO("Auto associate disabled.\n");
  5954. if (auto_create)
  5955. priv->config |= CFG_ADHOC_CREATE;
  5956. else
  5957. IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
  5958. if (disable) {
  5959. priv->status |= STATUS_RF_KILL_SW;
  5960. IPW_DEBUG_INFO("Radio disabled.\n");
  5961. }
  5962. if (channel != 0) {
  5963. priv->config |= CFG_STATIC_CHANNEL;
  5964. priv->channel = channel;
  5965. IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
  5966. IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
  5967. /* TODO: Validate that provided channel is in range */
  5968. }
  5969. switch (mode) {
  5970. case 1:
  5971. priv->ieee->iw_mode = IW_MODE_ADHOC;
  5972. break;
  5973. #ifdef CONFIG_IPW_PROMISC
  5974. case 2:
  5975. priv->ieee->iw_mode = IW_MODE_MONITOR;
  5976. break;
  5977. #endif
  5978. default:
  5979. case 0:
  5980. priv->ieee->iw_mode = IW_MODE_INFRA;
  5981. break;
  5982. }
  5983. if ((priv->pci_dev->device == 0x4223) ||
  5984. (priv->pci_dev->device == 0x4224)) {
  5985. printk(KERN_INFO DRV_NAME
  5986. ": Detected Intel PRO/Wireless 2915ABG Network "
  5987. "Connection\n");
  5988. priv->ieee->abg_ture = 1;
  5989. band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
  5990. modulation = IEEE80211_OFDM_MODULATION |
  5991. IEEE80211_CCK_MODULATION;
  5992. priv->adapter = IPW_2915ABG;
  5993. priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
  5994. } else {
  5995. if (priv->pci_dev->device == 0x4221)
  5996. printk(KERN_INFO DRV_NAME
  5997. ": Detected Intel PRO/Wireless 2225BG Network "
  5998. "Connection\n");
  5999. else
  6000. printk(KERN_INFO DRV_NAME
  6001. ": Detected Intel PRO/Wireless 2200BG Network "
  6002. "Connection\n");
  6003. priv->ieee->abg_ture = 0;
  6004. band = IEEE80211_24GHZ_BAND;
  6005. modulation = IEEE80211_OFDM_MODULATION |
  6006. IEEE80211_CCK_MODULATION;
  6007. priv->adapter = IPW_2200BG;
  6008. priv->ieee->mode = IEEE_G | IEEE_B;
  6009. }
  6010. priv->ieee->freq_band = band;
  6011. priv->ieee->modulation = modulation;
  6012. priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
  6013. priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
  6014. priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
  6015. priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
  6016. /* If power management is turned on, default to AC mode */
  6017. priv->power_mode = IPW_POWER_AC;
  6018. priv->tx_power = IPW_DEFAULT_TX_POWER;
  6019. err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME, priv);
  6020. if (err) {
  6021. IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
  6022. goto out_destroy_workqueue;
  6023. }
  6024. SET_MODULE_OWNER(net_dev);
  6025. SET_NETDEV_DEV(net_dev, &pdev->dev);
  6026. priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
  6027. priv->ieee->set_security = shim__set_security;
  6028. net_dev->open = ipw_net_open;
  6029. net_dev->stop = ipw_net_stop;
  6030. net_dev->init = ipw_net_init;
  6031. net_dev->get_stats = ipw_net_get_stats;
  6032. net_dev->set_multicast_list = ipw_net_set_multicast_list;
  6033. net_dev->set_mac_address = ipw_net_set_mac_address;
  6034. net_dev->get_wireless_stats = ipw_get_wireless_stats;
  6035. net_dev->wireless_handlers = &ipw_wx_handler_def;
  6036. net_dev->ethtool_ops = &ipw_ethtool_ops;
  6037. net_dev->irq = pdev->irq;
  6038. net_dev->base_addr = (unsigned long)priv->hw_base;
  6039. net_dev->mem_start = pci_resource_start(pdev, 0);
  6040. net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
  6041. err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
  6042. if (err) {
  6043. IPW_ERROR("failed to create sysfs device attributes\n");
  6044. goto out_release_irq;
  6045. }
  6046. err = register_netdev(net_dev);
  6047. if (err) {
  6048. IPW_ERROR("failed to register network device\n");
  6049. goto out_remove_group;
  6050. }
  6051. return 0;
  6052. out_remove_group:
  6053. sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
  6054. out_release_irq:
  6055. free_irq(pdev->irq, priv);
  6056. out_destroy_workqueue:
  6057. destroy_workqueue(priv->workqueue);
  6058. priv->workqueue = NULL;
  6059. out_iounmap:
  6060. iounmap(priv->hw_base);
  6061. out_pci_release_regions:
  6062. pci_release_regions(pdev);
  6063. out_pci_disable_device:
  6064. pci_disable_device(pdev);
  6065. pci_set_drvdata(pdev, NULL);
  6066. out_free_ieee80211:
  6067. free_ieee80211(priv->net_dev);
  6068. out:
  6069. return err;
  6070. }
  6071. static void ipw_pci_remove(struct pci_dev *pdev)
  6072. {
  6073. struct ipw_priv *priv = pci_get_drvdata(pdev);
  6074. if (!priv)
  6075. return;
  6076. priv->status |= STATUS_EXIT_PENDING;
  6077. sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
  6078. ipw_down(priv);
  6079. unregister_netdev(priv->net_dev);
  6080. if (priv->rxq) {
  6081. ipw_rx_queue_free(priv, priv->rxq);
  6082. priv->rxq = NULL;
  6083. }
  6084. ipw_tx_queue_free(priv);
  6085. /* ipw_down will ensure that there is no more pending work
  6086. * in the workqueue's, so we can safely remove them now. */
  6087. if (priv->workqueue) {
  6088. cancel_delayed_work(&priv->adhoc_check);
  6089. cancel_delayed_work(&priv->gather_stats);
  6090. cancel_delayed_work(&priv->request_scan);
  6091. cancel_delayed_work(&priv->rf_kill);
  6092. cancel_delayed_work(&priv->scan_check);
  6093. destroy_workqueue(priv->workqueue);
  6094. priv->workqueue = NULL;
  6095. }
  6096. free_irq(pdev->irq, priv);
  6097. iounmap(priv->hw_base);
  6098. pci_release_regions(pdev);
  6099. pci_disable_device(pdev);
  6100. pci_set_drvdata(pdev, NULL);
  6101. free_ieee80211(priv->net_dev);
  6102. #ifdef CONFIG_PM
  6103. if (fw_loaded) {
  6104. release_firmware(bootfw);
  6105. release_firmware(ucode);
  6106. release_firmware(firmware);
  6107. fw_loaded = 0;
  6108. }
  6109. #endif
  6110. }
  6111. #ifdef CONFIG_PM
  6112. static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  6113. {
  6114. struct ipw_priv *priv = pci_get_drvdata(pdev);
  6115. struct net_device *dev = priv->net_dev;
  6116. printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
  6117. /* Take down the device; powers it off, etc. */
  6118. ipw_down(priv);
  6119. /* Remove the PRESENT state of the device */
  6120. netif_device_detach(dev);
  6121. pci_save_state(pdev);
  6122. pci_disable_device(pdev);
  6123. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  6124. return 0;
  6125. }
  6126. static int ipw_pci_resume(struct pci_dev *pdev)
  6127. {
  6128. struct ipw_priv *priv = pci_get_drvdata(pdev);
  6129. struct net_device *dev = priv->net_dev;
  6130. u32 val;
  6131. printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
  6132. pci_set_power_state(pdev, 0);
  6133. pci_enable_device(pdev);
  6134. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
  6135. pci_restore_state(pdev, priv->pm_state);
  6136. #else
  6137. pci_restore_state(pdev);
  6138. #endif
  6139. /*
  6140. * Suspend/Resume resets the PCI configuration space, so we have to
  6141. * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
  6142. * from interfering with C3 CPU state. pci_restore_state won't help
  6143. * here since it only restores the first 64 bytes pci config header.
  6144. */
  6145. pci_read_config_dword(pdev, 0x40, &val);
  6146. if ((val & 0x0000ff00) != 0)
  6147. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  6148. /* Set the device back into the PRESENT state; this will also wake
  6149. * the queue of needed */
  6150. netif_device_attach(dev);
  6151. /* Bring the device back up */
  6152. queue_work(priv->workqueue, &priv->up);
  6153. return 0;
  6154. }
  6155. #endif
  6156. /* driver initialization stuff */
  6157. static struct pci_driver ipw_driver = {
  6158. .name = DRV_NAME,
  6159. .id_table = card_ids,
  6160. .probe = ipw_pci_probe,
  6161. .remove = __devexit_p(ipw_pci_remove),
  6162. #ifdef CONFIG_PM
  6163. .suspend = ipw_pci_suspend,
  6164. .resume = ipw_pci_resume,
  6165. #endif
  6166. };
  6167. static int __init ipw_init(void)
  6168. {
  6169. int ret;
  6170. printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
  6171. printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
  6172. ret = pci_module_init(&ipw_driver);
  6173. if (ret) {
  6174. IPW_ERROR("Unable to initialize PCI module\n");
  6175. return ret;
  6176. }
  6177. ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
  6178. if (ret) {
  6179. IPW_ERROR("Unable to create driver sysfs file\n");
  6180. pci_unregister_driver(&ipw_driver);
  6181. return ret;
  6182. }
  6183. return ret;
  6184. }
  6185. static void __exit ipw_exit(void)
  6186. {
  6187. driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
  6188. pci_unregister_driver(&ipw_driver);
  6189. }
  6190. module_param(disable, int, 0444);
  6191. MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
  6192. module_param(associate, int, 0444);
  6193. MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
  6194. module_param(auto_create, int, 0444);
  6195. MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
  6196. module_param(debug, int, 0444);
  6197. MODULE_PARM_DESC(debug, "debug output mask");
  6198. module_param(channel, int, 0444);
  6199. MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
  6200. module_param(ifname, charp, 0444);
  6201. MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
  6202. #ifdef CONFIG_IPW_PROMISC
  6203. module_param(mode, int, 0444);
  6204. MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
  6205. #else
  6206. module_param(mode, int, 0444);
  6207. MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
  6208. #endif
  6209. module_exit(ipw_exit);
  6210. module_init(ipw_init);