ipw2200.c 324 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009
  1. /******************************************************************************
  2. Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
  3. 802.11 status code portion of this file from ethereal-0.10.6:
  4. Copyright 2000, Axis Communications AB
  5. Ethereal - Network traffic analyzer
  6. By Gerald Combs <gerald@ethereal.com>
  7. Copyright 1998 Gerald Combs
  8. This program is free software; you can redistribute it and/or modify it
  9. under the terms of version 2 of the GNU General Public License as
  10. published by the Free Software Foundation.
  11. This program is distributed in the hope that it will be useful, but WITHOUT
  12. ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. more details.
  15. You should have received a copy of the GNU General Public License along with
  16. this program; if not, write to the Free Software Foundation, Inc., 59
  17. Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. The full GNU General Public License is included in this distribution in the
  19. file called LICENSE.
  20. Contact Information:
  21. James P. Ketrenos <ipw2100-admin@linux.intel.com>
  22. Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  23. ******************************************************************************/
  24. #include "ipw2200.h"
  25. #ifndef KBUILD_EXTMOD
  26. #define VK "k"
  27. #else
  28. #define VK
  29. #endif
  30. #ifdef CONFIG_IPW2200_DEBUG
  31. #define VD "d"
  32. #else
  33. #define VD
  34. #endif
  35. #ifdef CONFIG_IPW2200_MONITOR
  36. #define VM "m"
  37. #else
  38. #define VM
  39. #endif
  40. #ifdef CONFIG_IPW2200_PROMISCUOUS
  41. #define VP "p"
  42. #else
  43. #define VP
  44. #endif
  45. #ifdef CONFIG_IPW2200_RADIOTAP
  46. #define VR "r"
  47. #else
  48. #define VR
  49. #endif
  50. #ifdef CONFIG_IPW2200_QOS
  51. #define VQ "q"
  52. #else
  53. #define VQ
  54. #endif
  55. #define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
  56. #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
  57. #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
  58. #define DRV_VERSION IPW2200_VERSION
  59. #define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
  60. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  61. MODULE_VERSION(DRV_VERSION);
  62. MODULE_AUTHOR(DRV_COPYRIGHT);
  63. MODULE_LICENSE("GPL");
  64. static int cmdlog = 0;
  65. static int debug = 0;
  66. static int channel = 0;
  67. static int mode = 0;
  68. static u32 ipw_debug_level;
  69. static int associate;
  70. static int auto_create = 1;
  71. static int led = 0;
  72. static int disable = 0;
  73. static int bt_coexist = 0;
  74. static int hwcrypto = 0;
  75. static int roaming = 1;
  76. static const char ipw_modes[] = {
  77. 'a', 'b', 'g', '?'
  78. };
  79. static int antenna = CFG_SYS_ANTENNA_BOTH;
  80. #ifdef CONFIG_IPW2200_PROMISCUOUS
  81. static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
  82. #endif
  83. #ifdef CONFIG_IPW2200_QOS
  84. static int qos_enable = 0;
  85. static int qos_burst_enable = 0;
  86. static int qos_no_ack_mask = 0;
  87. static int burst_duration_CCK = 0;
  88. static int burst_duration_OFDM = 0;
  89. static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
  90. {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
  91. QOS_TX3_CW_MIN_OFDM},
  92. {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
  93. QOS_TX3_CW_MAX_OFDM},
  94. {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
  95. {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
  96. {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
  97. QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
  98. };
  99. static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
  100. {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
  101. QOS_TX3_CW_MIN_CCK},
  102. {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
  103. QOS_TX3_CW_MAX_CCK},
  104. {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
  105. {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
  106. {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
  107. QOS_TX3_TXOP_LIMIT_CCK}
  108. };
  109. static struct ieee80211_qos_parameters def_parameters_OFDM = {
  110. {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
  111. DEF_TX3_CW_MIN_OFDM},
  112. {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
  113. DEF_TX3_CW_MAX_OFDM},
  114. {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
  115. {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
  116. {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
  117. DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
  118. };
  119. static struct ieee80211_qos_parameters def_parameters_CCK = {
  120. {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
  121. DEF_TX3_CW_MIN_CCK},
  122. {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
  123. DEF_TX3_CW_MAX_CCK},
  124. {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
  125. {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
  126. {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
  127. DEF_TX3_TXOP_LIMIT_CCK}
  128. };
  129. static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
  130. static int from_priority_to_tx_queue[] = {
  131. IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
  132. IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
  133. };
  134. static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
  135. static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
  136. *qos_param);
  137. static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
  138. *qos_param);
  139. #endif /* CONFIG_IPW2200_QOS */
  140. static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
  141. static void ipw_remove_current_network(struct ipw_priv *priv);
  142. static void ipw_rx(struct ipw_priv *priv);
  143. static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
  144. struct clx2_tx_queue *txq, int qindex);
  145. static int ipw_queue_reset(struct ipw_priv *priv);
  146. static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
  147. int len, int sync);
  148. static void ipw_tx_queue_free(struct ipw_priv *);
  149. static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
  150. static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
  151. static void ipw_rx_queue_replenish(void *);
  152. static int ipw_up(struct ipw_priv *);
  153. static void ipw_bg_up(struct work_struct *work);
  154. static void ipw_down(struct ipw_priv *);
  155. static void ipw_bg_down(struct work_struct *work);
  156. static int ipw_config(struct ipw_priv *);
  157. static int init_supported_rates(struct ipw_priv *priv,
  158. struct ipw_supported_rates *prates);
  159. static void ipw_set_hwcrypto_keys(struct ipw_priv *);
  160. static void ipw_send_wep_keys(struct ipw_priv *, int);
  161. static int snprint_line(char *buf, size_t count,
  162. const u8 * data, u32 len, u32 ofs)
  163. {
  164. int out, i, j, l;
  165. char c;
  166. out = snprintf(buf, count, "%08X", ofs);
  167. for (l = 0, i = 0; i < 2; i++) {
  168. out += snprintf(buf + out, count - out, " ");
  169. for (j = 0; j < 8 && l < len; j++, l++)
  170. out += snprintf(buf + out, count - out, "%02X ",
  171. data[(i * 8 + j)]);
  172. for (; j < 8; j++)
  173. out += snprintf(buf + out, count - out, " ");
  174. }
  175. out += snprintf(buf + out, count - out, " ");
  176. for (l = 0, i = 0; i < 2; i++) {
  177. out += snprintf(buf + out, count - out, " ");
  178. for (j = 0; j < 8 && l < len; j++, l++) {
  179. c = data[(i * 8 + j)];
  180. if (!isascii(c) || !isprint(c))
  181. c = '.';
  182. out += snprintf(buf + out, count - out, "%c", c);
  183. }
  184. for (; j < 8; j++)
  185. out += snprintf(buf + out, count - out, " ");
  186. }
  187. return out;
  188. }
  189. static void printk_buf(int level, const u8 * data, u32 len)
  190. {
  191. char line[81];
  192. u32 ofs = 0;
  193. if (!(ipw_debug_level & level))
  194. return;
  195. while (len) {
  196. snprint_line(line, sizeof(line), &data[ofs],
  197. min(len, 16U), ofs);
  198. printk(KERN_DEBUG "%s\n", line);
  199. ofs += 16;
  200. len -= min(len, 16U);
  201. }
  202. }
  203. static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
  204. {
  205. size_t out = size;
  206. u32 ofs = 0;
  207. int total = 0;
  208. while (size && len) {
  209. out = snprint_line(output, size, &data[ofs],
  210. min_t(size_t, len, 16U), ofs);
  211. ofs += 16;
  212. output += out;
  213. size -= out;
  214. len -= min_t(size_t, len, 16U);
  215. total += out;
  216. }
  217. return total;
  218. }
  219. /* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
  220. static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
  221. #define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
  222. /* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
  223. static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
  224. #define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
  225. /* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
  226. static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
  227. static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
  228. {
  229. IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
  230. __LINE__, (u32) (b), (u32) (c));
  231. _ipw_write_reg8(a, b, c);
  232. }
  233. /* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
  234. static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
  235. static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
  236. {
  237. IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
  238. __LINE__, (u32) (b), (u32) (c));
  239. _ipw_write_reg16(a, b, c);
  240. }
  241. /* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
  242. static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
  243. static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
  244. {
  245. IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
  246. __LINE__, (u32) (b), (u32) (c));
  247. _ipw_write_reg32(a, b, c);
  248. }
  249. /* 8-bit direct write (low 4K) */
  250. static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
  251. u8 val)
  252. {
  253. writeb(val, ipw->hw_base + ofs);
  254. }
  255. /* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
  256. #define ipw_write8(ipw, ofs, val) do { \
  257. IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
  258. __LINE__, (u32)(ofs), (u32)(val)); \
  259. _ipw_write8(ipw, ofs, val); \
  260. } while (0)
  261. /* 16-bit direct write (low 4K) */
  262. static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
  263. u16 val)
  264. {
  265. writew(val, ipw->hw_base + ofs);
  266. }
  267. /* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
  268. #define ipw_write16(ipw, ofs, val) do { \
  269. IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
  270. __LINE__, (u32)(ofs), (u32)(val)); \
  271. _ipw_write16(ipw, ofs, val); \
  272. } while (0)
  273. /* 32-bit direct write (low 4K) */
  274. static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
  275. u32 val)
  276. {
  277. writel(val, ipw->hw_base + ofs);
  278. }
  279. /* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
  280. #define ipw_write32(ipw, ofs, val) do { \
  281. IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
  282. __LINE__, (u32)(ofs), (u32)(val)); \
  283. _ipw_write32(ipw, ofs, val); \
  284. } while (0)
  285. /* 8-bit direct read (low 4K) */
  286. static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
  287. {
  288. return readb(ipw->hw_base + ofs);
  289. }
  290. /* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
  291. #define ipw_read8(ipw, ofs) ({ \
  292. IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
  293. (u32)(ofs)); \
  294. _ipw_read8(ipw, ofs); \
  295. })
  296. /* 16-bit direct read (low 4K) */
  297. static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
  298. {
  299. return readw(ipw->hw_base + ofs);
  300. }
  301. /* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
  302. #define ipw_read16(ipw, ofs) ({ \
  303. IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
  304. (u32)(ofs)); \
  305. _ipw_read16(ipw, ofs); \
  306. })
  307. /* 32-bit direct read (low 4K) */
  308. static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
  309. {
  310. return readl(ipw->hw_base + ofs);
  311. }
  312. /* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
  313. #define ipw_read32(ipw, ofs) ({ \
  314. IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
  315. (u32)(ofs)); \
  316. _ipw_read32(ipw, ofs); \
  317. })
  318. static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
  319. /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
  320. #define ipw_read_indirect(a, b, c, d) ({ \
  321. IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
  322. __LINE__, (u32)(b), (u32)(d)); \
  323. _ipw_read_indirect(a, b, c, d); \
  324. })
  325. /* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
  326. static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
  327. int num);
  328. #define ipw_write_indirect(a, b, c, d) do { \
  329. IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
  330. __LINE__, (u32)(b), (u32)(d)); \
  331. _ipw_write_indirect(a, b, c, d); \
  332. } while (0)
  333. /* 32-bit indirect write (above 4K) */
  334. static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
  335. {
  336. IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
  337. _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
  338. _ipw_write32(priv, IPW_INDIRECT_DATA, value);
  339. }
  340. /* 8-bit indirect write (above 4K) */
  341. static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
  342. {
  343. u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
  344. u32 dif_len = reg - aligned_addr;
  345. IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
  346. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  347. _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
  348. }
  349. /* 16-bit indirect write (above 4K) */
  350. static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
  351. {
  352. u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
  353. u32 dif_len = (reg - aligned_addr) & (~0x1ul);
  354. IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
  355. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  356. _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
  357. }
  358. /* 8-bit indirect read (above 4K) */
  359. static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
  360. {
  361. u32 word;
  362. _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
  363. IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
  364. word = _ipw_read32(priv, IPW_INDIRECT_DATA);
  365. return (word >> ((reg & 0x3) * 8)) & 0xff;
  366. }
  367. /* 32-bit indirect read (above 4K) */
  368. static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
  369. {
  370. u32 value;
  371. IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
  372. _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
  373. value = _ipw_read32(priv, IPW_INDIRECT_DATA);
  374. IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
  375. return value;
  376. }
  377. /* General purpose, no alignment requirement, iterative (multi-byte) read, */
  378. /* for area above 1st 4K of SRAM/reg space */
  379. static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
  380. int num)
  381. {
  382. u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
  383. u32 dif_len = addr - aligned_addr;
  384. u32 i;
  385. IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
  386. if (num <= 0) {
  387. return;
  388. }
  389. /* Read the first dword (or portion) byte by byte */
  390. if (unlikely(dif_len)) {
  391. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  392. /* Start reading at aligned_addr + dif_len */
  393. for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
  394. *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
  395. aligned_addr += 4;
  396. }
  397. /* Read all of the middle dwords as dwords, with auto-increment */
  398. _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
  399. for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
  400. *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
  401. /* Read the last dword (or portion) byte by byte */
  402. if (unlikely(num)) {
  403. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  404. for (i = 0; num > 0; i++, num--)
  405. *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
  406. }
  407. }
  408. /* General purpose, no alignment requirement, iterative (multi-byte) write, */
  409. /* for area above 1st 4K of SRAM/reg space */
  410. static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
  411. int num)
  412. {
  413. u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
  414. u32 dif_len = addr - aligned_addr;
  415. u32 i;
  416. IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
  417. if (num <= 0) {
  418. return;
  419. }
  420. /* Write the first dword (or portion) byte by byte */
  421. if (unlikely(dif_len)) {
  422. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  423. /* Start writing at aligned_addr + dif_len */
  424. for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
  425. _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
  426. aligned_addr += 4;
  427. }
  428. /* Write all of the middle dwords as dwords, with auto-increment */
  429. _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
  430. for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
  431. _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
  432. /* Write the last dword (or portion) byte by byte */
  433. if (unlikely(num)) {
  434. _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
  435. for (i = 0; num > 0; i++, num--, buf++)
  436. _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
  437. }
  438. }
  439. /* General purpose, no alignment requirement, iterative (multi-byte) write, */
  440. /* for 1st 4K of SRAM/regs space */
  441. static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
  442. int num)
  443. {
  444. memcpy_toio((priv->hw_base + addr), buf, num);
  445. }
  446. /* Set bit(s) in low 4K of SRAM/regs */
  447. static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
  448. {
  449. ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
  450. }
  451. /* Clear bit(s) in low 4K of SRAM/regs */
  452. static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
  453. {
  454. ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
  455. }
  456. static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
  457. {
  458. if (priv->status & STATUS_INT_ENABLED)
  459. return;
  460. priv->status |= STATUS_INT_ENABLED;
  461. ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
  462. }
  463. static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
  464. {
  465. if (!(priv->status & STATUS_INT_ENABLED))
  466. return;
  467. priv->status &= ~STATUS_INT_ENABLED;
  468. ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
  469. }
  470. static inline void ipw_enable_interrupts(struct ipw_priv *priv)
  471. {
  472. unsigned long flags;
  473. spin_lock_irqsave(&priv->irq_lock, flags);
  474. __ipw_enable_interrupts(priv);
  475. spin_unlock_irqrestore(&priv->irq_lock, flags);
  476. }
  477. static inline void ipw_disable_interrupts(struct ipw_priv *priv)
  478. {
  479. unsigned long flags;
  480. spin_lock_irqsave(&priv->irq_lock, flags);
  481. __ipw_disable_interrupts(priv);
  482. spin_unlock_irqrestore(&priv->irq_lock, flags);
  483. }
  484. static char *ipw_error_desc(u32 val)
  485. {
  486. switch (val) {
  487. case IPW_FW_ERROR_OK:
  488. return "ERROR_OK";
  489. case IPW_FW_ERROR_FAIL:
  490. return "ERROR_FAIL";
  491. case IPW_FW_ERROR_MEMORY_UNDERFLOW:
  492. return "MEMORY_UNDERFLOW";
  493. case IPW_FW_ERROR_MEMORY_OVERFLOW:
  494. return "MEMORY_OVERFLOW";
  495. case IPW_FW_ERROR_BAD_PARAM:
  496. return "BAD_PARAM";
  497. case IPW_FW_ERROR_BAD_CHECKSUM:
  498. return "BAD_CHECKSUM";
  499. case IPW_FW_ERROR_NMI_INTERRUPT:
  500. return "NMI_INTERRUPT";
  501. case IPW_FW_ERROR_BAD_DATABASE:
  502. return "BAD_DATABASE";
  503. case IPW_FW_ERROR_ALLOC_FAIL:
  504. return "ALLOC_FAIL";
  505. case IPW_FW_ERROR_DMA_UNDERRUN:
  506. return "DMA_UNDERRUN";
  507. case IPW_FW_ERROR_DMA_STATUS:
  508. return "DMA_STATUS";
  509. case IPW_FW_ERROR_DINO_ERROR:
  510. return "DINO_ERROR";
  511. case IPW_FW_ERROR_EEPROM_ERROR:
  512. return "EEPROM_ERROR";
  513. case IPW_FW_ERROR_SYSASSERT:
  514. return "SYSASSERT";
  515. case IPW_FW_ERROR_FATAL_ERROR:
  516. return "FATAL_ERROR";
  517. default:
  518. return "UNKNOWN_ERROR";
  519. }
  520. }
  521. static void ipw_dump_error_log(struct ipw_priv *priv,
  522. struct ipw_fw_error *error)
  523. {
  524. u32 i;
  525. if (!error) {
  526. IPW_ERROR("Error allocating and capturing error log. "
  527. "Nothing to dump.\n");
  528. return;
  529. }
  530. IPW_ERROR("Start IPW Error Log Dump:\n");
  531. IPW_ERROR("Status: 0x%08X, Config: %08X\n",
  532. error->status, error->config);
  533. for (i = 0; i < error->elem_len; i++)
  534. IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
  535. ipw_error_desc(error->elem[i].desc),
  536. error->elem[i].time,
  537. error->elem[i].blink1,
  538. error->elem[i].blink2,
  539. error->elem[i].link1,
  540. error->elem[i].link2, error->elem[i].data);
  541. for (i = 0; i < error->log_len; i++)
  542. IPW_ERROR("%i\t0x%08x\t%i\n",
  543. error->log[i].time,
  544. error->log[i].data, error->log[i].event);
  545. }
  546. static inline int ipw_is_init(struct ipw_priv *priv)
  547. {
  548. return (priv->status & STATUS_INIT) ? 1 : 0;
  549. }
  550. static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
  551. {
  552. u32 addr, field_info, field_len, field_count, total_len;
  553. IPW_DEBUG_ORD("ordinal = %i\n", ord);
  554. if (!priv || !val || !len) {
  555. IPW_DEBUG_ORD("Invalid argument\n");
  556. return -EINVAL;
  557. }
  558. /* verify device ordinal tables have been initialized */
  559. if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
  560. IPW_DEBUG_ORD("Access ordinals before initialization\n");
  561. return -EINVAL;
  562. }
  563. switch (IPW_ORD_TABLE_ID_MASK & ord) {
  564. case IPW_ORD_TABLE_0_MASK:
  565. /*
  566. * TABLE 0: Direct access to a table of 32 bit values
  567. *
  568. * This is a very simple table with the data directly
  569. * read from the table
  570. */
  571. /* remove the table id from the ordinal */
  572. ord &= IPW_ORD_TABLE_VALUE_MASK;
  573. /* boundary check */
  574. if (ord > priv->table0_len) {
  575. IPW_DEBUG_ORD("ordinal value (%i) longer then "
  576. "max (%i)\n", ord, priv->table0_len);
  577. return -EINVAL;
  578. }
  579. /* verify we have enough room to store the value */
  580. if (*len < sizeof(u32)) {
  581. IPW_DEBUG_ORD("ordinal buffer length too small, "
  582. "need %zd\n", sizeof(u32));
  583. return -EINVAL;
  584. }
  585. IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
  586. ord, priv->table0_addr + (ord << 2));
  587. *len = sizeof(u32);
  588. ord <<= 2;
  589. *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
  590. break;
  591. case IPW_ORD_TABLE_1_MASK:
  592. /*
  593. * TABLE 1: Indirect access to a table of 32 bit values
  594. *
  595. * This is a fairly large table of u32 values each
  596. * representing starting addr for the data (which is
  597. * also a u32)
  598. */
  599. /* remove the table id from the ordinal */
  600. ord &= IPW_ORD_TABLE_VALUE_MASK;
  601. /* boundary check */
  602. if (ord > priv->table1_len) {
  603. IPW_DEBUG_ORD("ordinal value too long\n");
  604. return -EINVAL;
  605. }
  606. /* verify we have enough room to store the value */
  607. if (*len < sizeof(u32)) {
  608. IPW_DEBUG_ORD("ordinal buffer length too small, "
  609. "need %zd\n", sizeof(u32));
  610. return -EINVAL;
  611. }
  612. *((u32 *) val) =
  613. ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
  614. *len = sizeof(u32);
  615. break;
  616. case IPW_ORD_TABLE_2_MASK:
  617. /*
  618. * TABLE 2: Indirect access to a table of variable sized values
  619. *
  620. * This table consist of six values, each containing
  621. * - dword containing the starting offset of the data
  622. * - dword containing the lengh in the first 16bits
  623. * and the count in the second 16bits
  624. */
  625. /* remove the table id from the ordinal */
  626. ord &= IPW_ORD_TABLE_VALUE_MASK;
  627. /* boundary check */
  628. if (ord > priv->table2_len) {
  629. IPW_DEBUG_ORD("ordinal value too long\n");
  630. return -EINVAL;
  631. }
  632. /* get the address of statistic */
  633. addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
  634. /* get the second DW of statistics ;
  635. * two 16-bit words - first is length, second is count */
  636. field_info =
  637. ipw_read_reg32(priv,
  638. priv->table2_addr + (ord << 3) +
  639. sizeof(u32));
  640. /* get each entry length */
  641. field_len = *((u16 *) & field_info);
  642. /* get number of entries */
  643. field_count = *(((u16 *) & field_info) + 1);
  644. /* abort if not enought memory */
  645. total_len = field_len * field_count;
  646. if (total_len > *len) {
  647. *len = total_len;
  648. return -EINVAL;
  649. }
  650. *len = total_len;
  651. if (!total_len)
  652. return 0;
  653. IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
  654. "field_info = 0x%08x\n",
  655. addr, total_len, field_info);
  656. ipw_read_indirect(priv, addr, val, total_len);
  657. break;
  658. default:
  659. IPW_DEBUG_ORD("Invalid ordinal!\n");
  660. return -EINVAL;
  661. }
  662. return 0;
  663. }
  664. static void ipw_init_ordinals(struct ipw_priv *priv)
  665. {
  666. priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
  667. priv->table0_len = ipw_read32(priv, priv->table0_addr);
  668. IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
  669. priv->table0_addr, priv->table0_len);
  670. priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
  671. priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
  672. IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
  673. priv->table1_addr, priv->table1_len);
  674. priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
  675. priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
  676. priv->table2_len &= 0x0000ffff; /* use first two bytes */
  677. IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
  678. priv->table2_addr, priv->table2_len);
  679. }
  680. static u32 ipw_register_toggle(u32 reg)
  681. {
  682. reg &= ~IPW_START_STANDBY;
  683. if (reg & IPW_GATE_ODMA)
  684. reg &= ~IPW_GATE_ODMA;
  685. if (reg & IPW_GATE_IDMA)
  686. reg &= ~IPW_GATE_IDMA;
  687. if (reg & IPW_GATE_ADMA)
  688. reg &= ~IPW_GATE_ADMA;
  689. return reg;
  690. }
  691. /*
  692. * LED behavior:
  693. * - On radio ON, turn on any LEDs that require to be on during start
  694. * - On initialization, start unassociated blink
  695. * - On association, disable unassociated blink
  696. * - On disassociation, start unassociated blink
  697. * - On radio OFF, turn off any LEDs started during radio on
  698. *
  699. */
  700. #define LD_TIME_LINK_ON msecs_to_jiffies(300)
  701. #define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
  702. #define LD_TIME_ACT_ON msecs_to_jiffies(250)
  703. static void ipw_led_link_on(struct ipw_priv *priv)
  704. {
  705. unsigned long flags;
  706. u32 led;
  707. /* If configured to not use LEDs, or nic_type is 1,
  708. * then we don't toggle a LINK led */
  709. if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
  710. return;
  711. spin_lock_irqsave(&priv->lock, flags);
  712. if (!(priv->status & STATUS_RF_KILL_MASK) &&
  713. !(priv->status & STATUS_LED_LINK_ON)) {
  714. IPW_DEBUG_LED("Link LED On\n");
  715. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  716. led |= priv->led_association_on;
  717. led = ipw_register_toggle(led);
  718. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  719. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  720. priv->status |= STATUS_LED_LINK_ON;
  721. /* If we aren't associated, schedule turning the LED off */
  722. if (!(priv->status & STATUS_ASSOCIATED))
  723. queue_delayed_work(priv->workqueue,
  724. &priv->led_link_off,
  725. LD_TIME_LINK_ON);
  726. }
  727. spin_unlock_irqrestore(&priv->lock, flags);
  728. }
  729. static void ipw_bg_led_link_on(struct work_struct *work)
  730. {
  731. struct ipw_priv *priv =
  732. container_of(work, struct ipw_priv, led_link_on.work);
  733. mutex_lock(&priv->mutex);
  734. ipw_led_link_on(priv);
  735. mutex_unlock(&priv->mutex);
  736. }
  737. static void ipw_led_link_off(struct ipw_priv *priv)
  738. {
  739. unsigned long flags;
  740. u32 led;
  741. /* If configured not to use LEDs, or nic type is 1,
  742. * then we don't goggle the LINK led. */
  743. if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
  744. return;
  745. spin_lock_irqsave(&priv->lock, flags);
  746. if (priv->status & STATUS_LED_LINK_ON) {
  747. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  748. led &= priv->led_association_off;
  749. led = ipw_register_toggle(led);
  750. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  751. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  752. IPW_DEBUG_LED("Link LED Off\n");
  753. priv->status &= ~STATUS_LED_LINK_ON;
  754. /* If we aren't associated and the radio is on, schedule
  755. * turning the LED on (blink while unassociated) */
  756. if (!(priv->status & STATUS_RF_KILL_MASK) &&
  757. !(priv->status & STATUS_ASSOCIATED))
  758. queue_delayed_work(priv->workqueue, &priv->led_link_on,
  759. LD_TIME_LINK_OFF);
  760. }
  761. spin_unlock_irqrestore(&priv->lock, flags);
  762. }
  763. static void ipw_bg_led_link_off(struct work_struct *work)
  764. {
  765. struct ipw_priv *priv =
  766. container_of(work, struct ipw_priv, led_link_off.work);
  767. mutex_lock(&priv->mutex);
  768. ipw_led_link_off(priv);
  769. mutex_unlock(&priv->mutex);
  770. }
  771. static void __ipw_led_activity_on(struct ipw_priv *priv)
  772. {
  773. u32 led;
  774. if (priv->config & CFG_NO_LED)
  775. return;
  776. if (priv->status & STATUS_RF_KILL_MASK)
  777. return;
  778. if (!(priv->status & STATUS_LED_ACT_ON)) {
  779. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  780. led |= priv->led_activity_on;
  781. led = ipw_register_toggle(led);
  782. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  783. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  784. IPW_DEBUG_LED("Activity LED On\n");
  785. priv->status |= STATUS_LED_ACT_ON;
  786. cancel_delayed_work(&priv->led_act_off);
  787. queue_delayed_work(priv->workqueue, &priv->led_act_off,
  788. LD_TIME_ACT_ON);
  789. } else {
  790. /* Reschedule LED off for full time period */
  791. cancel_delayed_work(&priv->led_act_off);
  792. queue_delayed_work(priv->workqueue, &priv->led_act_off,
  793. LD_TIME_ACT_ON);
  794. }
  795. }
  796. #if 0
  797. void ipw_led_activity_on(struct ipw_priv *priv)
  798. {
  799. unsigned long flags;
  800. spin_lock_irqsave(&priv->lock, flags);
  801. __ipw_led_activity_on(priv);
  802. spin_unlock_irqrestore(&priv->lock, flags);
  803. }
  804. #endif /* 0 */
  805. static void ipw_led_activity_off(struct ipw_priv *priv)
  806. {
  807. unsigned long flags;
  808. u32 led;
  809. if (priv->config & CFG_NO_LED)
  810. return;
  811. spin_lock_irqsave(&priv->lock, flags);
  812. if (priv->status & STATUS_LED_ACT_ON) {
  813. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  814. led &= priv->led_activity_off;
  815. led = ipw_register_toggle(led);
  816. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  817. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  818. IPW_DEBUG_LED("Activity LED Off\n");
  819. priv->status &= ~STATUS_LED_ACT_ON;
  820. }
  821. spin_unlock_irqrestore(&priv->lock, flags);
  822. }
  823. static void ipw_bg_led_activity_off(struct work_struct *work)
  824. {
  825. struct ipw_priv *priv =
  826. container_of(work, struct ipw_priv, led_act_off.work);
  827. mutex_lock(&priv->mutex);
  828. ipw_led_activity_off(priv);
  829. mutex_unlock(&priv->mutex);
  830. }
  831. static void ipw_led_band_on(struct ipw_priv *priv)
  832. {
  833. unsigned long flags;
  834. u32 led;
  835. /* Only nic type 1 supports mode LEDs */
  836. if (priv->config & CFG_NO_LED ||
  837. priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
  838. return;
  839. spin_lock_irqsave(&priv->lock, flags);
  840. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  841. if (priv->assoc_network->mode == IEEE_A) {
  842. led |= priv->led_ofdm_on;
  843. led &= priv->led_association_off;
  844. IPW_DEBUG_LED("Mode LED On: 802.11a\n");
  845. } else if (priv->assoc_network->mode == IEEE_G) {
  846. led |= priv->led_ofdm_on;
  847. led |= priv->led_association_on;
  848. IPW_DEBUG_LED("Mode LED On: 802.11g\n");
  849. } else {
  850. led &= priv->led_ofdm_off;
  851. led |= priv->led_association_on;
  852. IPW_DEBUG_LED("Mode LED On: 802.11b\n");
  853. }
  854. led = ipw_register_toggle(led);
  855. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  856. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  857. spin_unlock_irqrestore(&priv->lock, flags);
  858. }
  859. static void ipw_led_band_off(struct ipw_priv *priv)
  860. {
  861. unsigned long flags;
  862. u32 led;
  863. /* Only nic type 1 supports mode LEDs */
  864. if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
  865. return;
  866. spin_lock_irqsave(&priv->lock, flags);
  867. led = ipw_read_reg32(priv, IPW_EVENT_REG);
  868. led &= priv->led_ofdm_off;
  869. led &= priv->led_association_off;
  870. led = ipw_register_toggle(led);
  871. IPW_DEBUG_LED("Reg: 0x%08X\n", led);
  872. ipw_write_reg32(priv, IPW_EVENT_REG, led);
  873. spin_unlock_irqrestore(&priv->lock, flags);
  874. }
  875. static void ipw_led_radio_on(struct ipw_priv *priv)
  876. {
  877. ipw_led_link_on(priv);
  878. }
  879. static void ipw_led_radio_off(struct ipw_priv *priv)
  880. {
  881. ipw_led_activity_off(priv);
  882. ipw_led_link_off(priv);
  883. }
  884. static void ipw_led_link_up(struct ipw_priv *priv)
  885. {
  886. /* Set the Link Led on for all nic types */
  887. ipw_led_link_on(priv);
  888. }
  889. static void ipw_led_link_down(struct ipw_priv *priv)
  890. {
  891. ipw_led_activity_off(priv);
  892. ipw_led_link_off(priv);
  893. if (priv->status & STATUS_RF_KILL_MASK)
  894. ipw_led_radio_off(priv);
  895. }
  896. static void ipw_led_init(struct ipw_priv *priv)
  897. {
  898. priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
  899. /* Set the default PINs for the link and activity leds */
  900. priv->led_activity_on = IPW_ACTIVITY_LED;
  901. priv->led_activity_off = ~(IPW_ACTIVITY_LED);
  902. priv->led_association_on = IPW_ASSOCIATED_LED;
  903. priv->led_association_off = ~(IPW_ASSOCIATED_LED);
  904. /* Set the default PINs for the OFDM leds */
  905. priv->led_ofdm_on = IPW_OFDM_LED;
  906. priv->led_ofdm_off = ~(IPW_OFDM_LED);
  907. switch (priv->nic_type) {
  908. case EEPROM_NIC_TYPE_1:
  909. /* In this NIC type, the LEDs are reversed.... */
  910. priv->led_activity_on = IPW_ASSOCIATED_LED;
  911. priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
  912. priv->led_association_on = IPW_ACTIVITY_LED;
  913. priv->led_association_off = ~(IPW_ACTIVITY_LED);
  914. if (!(priv->config & CFG_NO_LED))
  915. ipw_led_band_on(priv);
  916. /* And we don't blink link LEDs for this nic, so
  917. * just return here */
  918. return;
  919. case EEPROM_NIC_TYPE_3:
  920. case EEPROM_NIC_TYPE_2:
  921. case EEPROM_NIC_TYPE_4:
  922. case EEPROM_NIC_TYPE_0:
  923. break;
  924. default:
  925. IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
  926. priv->nic_type);
  927. priv->nic_type = EEPROM_NIC_TYPE_0;
  928. break;
  929. }
  930. if (!(priv->config & CFG_NO_LED)) {
  931. if (priv->status & STATUS_ASSOCIATED)
  932. ipw_led_link_on(priv);
  933. else
  934. ipw_led_link_off(priv);
  935. }
  936. }
  937. static void ipw_led_shutdown(struct ipw_priv *priv)
  938. {
  939. ipw_led_activity_off(priv);
  940. ipw_led_link_off(priv);
  941. ipw_led_band_off(priv);
  942. cancel_delayed_work(&priv->led_link_on);
  943. cancel_delayed_work(&priv->led_link_off);
  944. cancel_delayed_work(&priv->led_act_off);
  945. }
  946. /*
  947. * The following adds a new attribute to the sysfs representation
  948. * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
  949. * used for controling the debug level.
  950. *
  951. * See the level definitions in ipw for details.
  952. */
  953. static ssize_t show_debug_level(struct device_driver *d, char *buf)
  954. {
  955. return sprintf(buf, "0x%08X\n", ipw_debug_level);
  956. }
  957. static ssize_t store_debug_level(struct device_driver *d, const char *buf,
  958. size_t count)
  959. {
  960. char *p = (char *)buf;
  961. u32 val;
  962. if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
  963. p++;
  964. if (p[0] == 'x' || p[0] == 'X')
  965. p++;
  966. val = simple_strtoul(p, &p, 16);
  967. } else
  968. val = simple_strtoul(p, &p, 10);
  969. if (p == buf)
  970. printk(KERN_INFO DRV_NAME
  971. ": %s is not in hex or decimal form.\n", buf);
  972. else
  973. ipw_debug_level = val;
  974. return strnlen(buf, count);
  975. }
  976. static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
  977. show_debug_level, store_debug_level);
  978. static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
  979. {
  980. /* length = 1st dword in log */
  981. return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
  982. }
  983. static void ipw_capture_event_log(struct ipw_priv *priv,
  984. u32 log_len, struct ipw_event *log)
  985. {
  986. u32 base;
  987. if (log_len) {
  988. base = ipw_read32(priv, IPW_EVENT_LOG);
  989. ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
  990. (u8 *) log, sizeof(*log) * log_len);
  991. }
  992. }
  993. static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
  994. {
  995. struct ipw_fw_error *error;
  996. u32 log_len = ipw_get_event_log_len(priv);
  997. u32 base = ipw_read32(priv, IPW_ERROR_LOG);
  998. u32 elem_len = ipw_read_reg32(priv, base);
  999. error = kmalloc(sizeof(*error) +
  1000. sizeof(*error->elem) * elem_len +
  1001. sizeof(*error->log) * log_len, GFP_ATOMIC);
  1002. if (!error) {
  1003. IPW_ERROR("Memory allocation for firmware error log "
  1004. "failed.\n");
  1005. return NULL;
  1006. }
  1007. error->jiffies = jiffies;
  1008. error->status = priv->status;
  1009. error->config = priv->config;
  1010. error->elem_len = elem_len;
  1011. error->log_len = log_len;
  1012. error->elem = (struct ipw_error_elem *)error->payload;
  1013. error->log = (struct ipw_event *)(error->elem + elem_len);
  1014. ipw_capture_event_log(priv, log_len, error->log);
  1015. if (elem_len)
  1016. ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
  1017. sizeof(*error->elem) * elem_len);
  1018. return error;
  1019. }
  1020. static ssize_t show_event_log(struct device *d,
  1021. struct device_attribute *attr, char *buf)
  1022. {
  1023. struct ipw_priv *priv = dev_get_drvdata(d);
  1024. u32 log_len = ipw_get_event_log_len(priv);
  1025. u32 log_size;
  1026. struct ipw_event *log;
  1027. u32 len = 0, i;
  1028. /* not using min() because of its strict type checking */
  1029. log_size = PAGE_SIZE / sizeof(*log) > log_len ?
  1030. sizeof(*log) * log_len : PAGE_SIZE;
  1031. log = kzalloc(log_size, GFP_KERNEL);
  1032. if (!log) {
  1033. IPW_ERROR("Unable to allocate memory for log\n");
  1034. return 0;
  1035. }
  1036. log_len = log_size / sizeof(*log);
  1037. ipw_capture_event_log(priv, log_len, log);
  1038. len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
  1039. for (i = 0; i < log_len; i++)
  1040. len += snprintf(buf + len, PAGE_SIZE - len,
  1041. "\n%08X%08X%08X",
  1042. log[i].time, log[i].event, log[i].data);
  1043. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  1044. kfree(log);
  1045. return len;
  1046. }
  1047. static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
  1048. static ssize_t show_error(struct device *d,
  1049. struct device_attribute *attr, char *buf)
  1050. {
  1051. struct ipw_priv *priv = dev_get_drvdata(d);
  1052. u32 len = 0, i;
  1053. if (!priv->error)
  1054. return 0;
  1055. len += snprintf(buf + len, PAGE_SIZE - len,
  1056. "%08lX%08X%08X%08X",
  1057. priv->error->jiffies,
  1058. priv->error->status,
  1059. priv->error->config, priv->error->elem_len);
  1060. for (i = 0; i < priv->error->elem_len; i++)
  1061. len += snprintf(buf + len, PAGE_SIZE - len,
  1062. "\n%08X%08X%08X%08X%08X%08X%08X",
  1063. priv->error->elem[i].time,
  1064. priv->error->elem[i].desc,
  1065. priv->error->elem[i].blink1,
  1066. priv->error->elem[i].blink2,
  1067. priv->error->elem[i].link1,
  1068. priv->error->elem[i].link2,
  1069. priv->error->elem[i].data);
  1070. len += snprintf(buf + len, PAGE_SIZE - len,
  1071. "\n%08X", priv->error->log_len);
  1072. for (i = 0; i < priv->error->log_len; i++)
  1073. len += snprintf(buf + len, PAGE_SIZE - len,
  1074. "\n%08X%08X%08X",
  1075. priv->error->log[i].time,
  1076. priv->error->log[i].event,
  1077. priv->error->log[i].data);
  1078. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  1079. return len;
  1080. }
  1081. static ssize_t clear_error(struct device *d,
  1082. struct device_attribute *attr,
  1083. const char *buf, size_t count)
  1084. {
  1085. struct ipw_priv *priv = dev_get_drvdata(d);
  1086. kfree(priv->error);
  1087. priv->error = NULL;
  1088. return count;
  1089. }
  1090. static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
  1091. static ssize_t show_cmd_log(struct device *d,
  1092. struct device_attribute *attr, char *buf)
  1093. {
  1094. struct ipw_priv *priv = dev_get_drvdata(d);
  1095. u32 len = 0, i;
  1096. if (!priv->cmdlog)
  1097. return 0;
  1098. for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
  1099. (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
  1100. i = (i + 1) % priv->cmdlog_len) {
  1101. len +=
  1102. snprintf(buf + len, PAGE_SIZE - len,
  1103. "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
  1104. priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
  1105. priv->cmdlog[i].cmd.len);
  1106. len +=
  1107. snprintk_buf(buf + len, PAGE_SIZE - len,
  1108. (u8 *) priv->cmdlog[i].cmd.param,
  1109. priv->cmdlog[i].cmd.len);
  1110. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  1111. }
  1112. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  1113. return len;
  1114. }
  1115. static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
  1116. #ifdef CONFIG_IPW2200_PROMISCUOUS
  1117. static void ipw_prom_free(struct ipw_priv *priv);
  1118. static int ipw_prom_alloc(struct ipw_priv *priv);
  1119. static ssize_t store_rtap_iface(struct device *d,
  1120. struct device_attribute *attr,
  1121. const char *buf, size_t count)
  1122. {
  1123. struct ipw_priv *priv = dev_get_drvdata(d);
  1124. int rc = 0;
  1125. if (count < 1)
  1126. return -EINVAL;
  1127. switch (buf[0]) {
  1128. case '0':
  1129. if (!rtap_iface)
  1130. return count;
  1131. if (netif_running(priv->prom_net_dev)) {
  1132. IPW_WARNING("Interface is up. Cannot unregister.\n");
  1133. return count;
  1134. }
  1135. ipw_prom_free(priv);
  1136. rtap_iface = 0;
  1137. break;
  1138. case '1':
  1139. if (rtap_iface)
  1140. return count;
  1141. rc = ipw_prom_alloc(priv);
  1142. if (!rc)
  1143. rtap_iface = 1;
  1144. break;
  1145. default:
  1146. return -EINVAL;
  1147. }
  1148. if (rc) {
  1149. IPW_ERROR("Failed to register promiscuous network "
  1150. "device (error %d).\n", rc);
  1151. }
  1152. return count;
  1153. }
  1154. static ssize_t show_rtap_iface(struct device *d,
  1155. struct device_attribute *attr,
  1156. char *buf)
  1157. {
  1158. struct ipw_priv *priv = dev_get_drvdata(d);
  1159. if (rtap_iface)
  1160. return sprintf(buf, "%s", priv->prom_net_dev->name);
  1161. else {
  1162. buf[0] = '-';
  1163. buf[1] = '1';
  1164. buf[2] = '\0';
  1165. return 3;
  1166. }
  1167. }
  1168. static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
  1169. store_rtap_iface);
  1170. static ssize_t store_rtap_filter(struct device *d,
  1171. struct device_attribute *attr,
  1172. const char *buf, size_t count)
  1173. {
  1174. struct ipw_priv *priv = dev_get_drvdata(d);
  1175. if (!priv->prom_priv) {
  1176. IPW_ERROR("Attempting to set filter without "
  1177. "rtap_iface enabled.\n");
  1178. return -EPERM;
  1179. }
  1180. priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
  1181. IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
  1182. BIT_ARG16(priv->prom_priv->filter));
  1183. return count;
  1184. }
  1185. static ssize_t show_rtap_filter(struct device *d,
  1186. struct device_attribute *attr,
  1187. char *buf)
  1188. {
  1189. struct ipw_priv *priv = dev_get_drvdata(d);
  1190. return sprintf(buf, "0x%04X",
  1191. priv->prom_priv ? priv->prom_priv->filter : 0);
  1192. }
  1193. static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
  1194. store_rtap_filter);
  1195. #endif
  1196. static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
  1197. char *buf)
  1198. {
  1199. struct ipw_priv *priv = dev_get_drvdata(d);
  1200. return sprintf(buf, "%d\n", priv->ieee->scan_age);
  1201. }
  1202. static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
  1203. const char *buf, size_t count)
  1204. {
  1205. struct ipw_priv *priv = dev_get_drvdata(d);
  1206. struct net_device *dev = priv->net_dev;
  1207. char buffer[] = "00000000";
  1208. unsigned long len =
  1209. (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
  1210. unsigned long val;
  1211. char *p = buffer;
  1212. IPW_DEBUG_INFO("enter\n");
  1213. strncpy(buffer, buf, len);
  1214. buffer[len] = 0;
  1215. if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
  1216. p++;
  1217. if (p[0] == 'x' || p[0] == 'X')
  1218. p++;
  1219. val = simple_strtoul(p, &p, 16);
  1220. } else
  1221. val = simple_strtoul(p, &p, 10);
  1222. if (p == buffer) {
  1223. IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
  1224. } else {
  1225. priv->ieee->scan_age = val;
  1226. IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
  1227. }
  1228. IPW_DEBUG_INFO("exit\n");
  1229. return len;
  1230. }
  1231. static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
  1232. static ssize_t show_led(struct device *d, struct device_attribute *attr,
  1233. char *buf)
  1234. {
  1235. struct ipw_priv *priv = dev_get_drvdata(d);
  1236. return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
  1237. }
  1238. static ssize_t store_led(struct device *d, struct device_attribute *attr,
  1239. const char *buf, size_t count)
  1240. {
  1241. struct ipw_priv *priv = dev_get_drvdata(d);
  1242. IPW_DEBUG_INFO("enter\n");
  1243. if (count == 0)
  1244. return 0;
  1245. if (*buf == 0) {
  1246. IPW_DEBUG_LED("Disabling LED control.\n");
  1247. priv->config |= CFG_NO_LED;
  1248. ipw_led_shutdown(priv);
  1249. } else {
  1250. IPW_DEBUG_LED("Enabling LED control.\n");
  1251. priv->config &= ~CFG_NO_LED;
  1252. ipw_led_init(priv);
  1253. }
  1254. IPW_DEBUG_INFO("exit\n");
  1255. return count;
  1256. }
  1257. static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
  1258. static ssize_t show_status(struct device *d,
  1259. struct device_attribute *attr, char *buf)
  1260. {
  1261. struct ipw_priv *p = d->driver_data;
  1262. return sprintf(buf, "0x%08x\n", (int)p->status);
  1263. }
  1264. static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
  1265. static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
  1266. char *buf)
  1267. {
  1268. struct ipw_priv *p = d->driver_data;
  1269. return sprintf(buf, "0x%08x\n", (int)p->config);
  1270. }
  1271. static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
  1272. static ssize_t show_nic_type(struct device *d,
  1273. struct device_attribute *attr, char *buf)
  1274. {
  1275. struct ipw_priv *priv = d->driver_data;
  1276. return sprintf(buf, "TYPE: %d\n", priv->nic_type);
  1277. }
  1278. static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
  1279. static ssize_t show_ucode_version(struct device *d,
  1280. struct device_attribute *attr, char *buf)
  1281. {
  1282. u32 len = sizeof(u32), tmp = 0;
  1283. struct ipw_priv *p = d->driver_data;
  1284. if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
  1285. return 0;
  1286. return sprintf(buf, "0x%08x\n", tmp);
  1287. }
  1288. static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
  1289. static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
  1290. char *buf)
  1291. {
  1292. u32 len = sizeof(u32), tmp = 0;
  1293. struct ipw_priv *p = d->driver_data;
  1294. if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
  1295. return 0;
  1296. return sprintf(buf, "0x%08x\n", tmp);
  1297. }
  1298. static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
  1299. /*
  1300. * Add a device attribute to view/control the delay between eeprom
  1301. * operations.
  1302. */
  1303. static ssize_t show_eeprom_delay(struct device *d,
  1304. struct device_attribute *attr, char *buf)
  1305. {
  1306. int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
  1307. return sprintf(buf, "%i\n", n);
  1308. }
  1309. static ssize_t store_eeprom_delay(struct device *d,
  1310. struct device_attribute *attr,
  1311. const char *buf, size_t count)
  1312. {
  1313. struct ipw_priv *p = d->driver_data;
  1314. sscanf(buf, "%i", &p->eeprom_delay);
  1315. return strnlen(buf, count);
  1316. }
  1317. static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
  1318. show_eeprom_delay, store_eeprom_delay);
  1319. static ssize_t show_command_event_reg(struct device *d,
  1320. struct device_attribute *attr, char *buf)
  1321. {
  1322. u32 reg = 0;
  1323. struct ipw_priv *p = d->driver_data;
  1324. reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
  1325. return sprintf(buf, "0x%08x\n", reg);
  1326. }
  1327. static ssize_t store_command_event_reg(struct device *d,
  1328. struct device_attribute *attr,
  1329. const char *buf, size_t count)
  1330. {
  1331. u32 reg;
  1332. struct ipw_priv *p = d->driver_data;
  1333. sscanf(buf, "%x", &reg);
  1334. ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
  1335. return strnlen(buf, count);
  1336. }
  1337. static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
  1338. show_command_event_reg, store_command_event_reg);
  1339. static ssize_t show_mem_gpio_reg(struct device *d,
  1340. struct device_attribute *attr, char *buf)
  1341. {
  1342. u32 reg = 0;
  1343. struct ipw_priv *p = d->driver_data;
  1344. reg = ipw_read_reg32(p, 0x301100);
  1345. return sprintf(buf, "0x%08x\n", reg);
  1346. }
  1347. static ssize_t store_mem_gpio_reg(struct device *d,
  1348. struct device_attribute *attr,
  1349. const char *buf, size_t count)
  1350. {
  1351. u32 reg;
  1352. struct ipw_priv *p = d->driver_data;
  1353. sscanf(buf, "%x", &reg);
  1354. ipw_write_reg32(p, 0x301100, reg);
  1355. return strnlen(buf, count);
  1356. }
  1357. static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
  1358. show_mem_gpio_reg, store_mem_gpio_reg);
  1359. static ssize_t show_indirect_dword(struct device *d,
  1360. struct device_attribute *attr, char *buf)
  1361. {
  1362. u32 reg = 0;
  1363. struct ipw_priv *priv = d->driver_data;
  1364. if (priv->status & STATUS_INDIRECT_DWORD)
  1365. reg = ipw_read_reg32(priv, priv->indirect_dword);
  1366. else
  1367. reg = 0;
  1368. return sprintf(buf, "0x%08x\n", reg);
  1369. }
  1370. static ssize_t store_indirect_dword(struct device *d,
  1371. struct device_attribute *attr,
  1372. const char *buf, size_t count)
  1373. {
  1374. struct ipw_priv *priv = d->driver_data;
  1375. sscanf(buf, "%x", &priv->indirect_dword);
  1376. priv->status |= STATUS_INDIRECT_DWORD;
  1377. return strnlen(buf, count);
  1378. }
  1379. static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
  1380. show_indirect_dword, store_indirect_dword);
  1381. static ssize_t show_indirect_byte(struct device *d,
  1382. struct device_attribute *attr, char *buf)
  1383. {
  1384. u8 reg = 0;
  1385. struct ipw_priv *priv = d->driver_data;
  1386. if (priv->status & STATUS_INDIRECT_BYTE)
  1387. reg = ipw_read_reg8(priv, priv->indirect_byte);
  1388. else
  1389. reg = 0;
  1390. return sprintf(buf, "0x%02x\n", reg);
  1391. }
  1392. static ssize_t store_indirect_byte(struct device *d,
  1393. struct device_attribute *attr,
  1394. const char *buf, size_t count)
  1395. {
  1396. struct ipw_priv *priv = d->driver_data;
  1397. sscanf(buf, "%x", &priv->indirect_byte);
  1398. priv->status |= STATUS_INDIRECT_BYTE;
  1399. return strnlen(buf, count);
  1400. }
  1401. static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
  1402. show_indirect_byte, store_indirect_byte);
  1403. static ssize_t show_direct_dword(struct device *d,
  1404. struct device_attribute *attr, char *buf)
  1405. {
  1406. u32 reg = 0;
  1407. struct ipw_priv *priv = d->driver_data;
  1408. if (priv->status & STATUS_DIRECT_DWORD)
  1409. reg = ipw_read32(priv, priv->direct_dword);
  1410. else
  1411. reg = 0;
  1412. return sprintf(buf, "0x%08x\n", reg);
  1413. }
  1414. static ssize_t store_direct_dword(struct device *d,
  1415. struct device_attribute *attr,
  1416. const char *buf, size_t count)
  1417. {
  1418. struct ipw_priv *priv = d->driver_data;
  1419. sscanf(buf, "%x", &priv->direct_dword);
  1420. priv->status |= STATUS_DIRECT_DWORD;
  1421. return strnlen(buf, count);
  1422. }
  1423. static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
  1424. show_direct_dword, store_direct_dword);
  1425. static int rf_kill_active(struct ipw_priv *priv)
  1426. {
  1427. if (0 == (ipw_read32(priv, 0x30) & 0x10000))
  1428. priv->status |= STATUS_RF_KILL_HW;
  1429. else
  1430. priv->status &= ~STATUS_RF_KILL_HW;
  1431. return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
  1432. }
  1433. static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
  1434. char *buf)
  1435. {
  1436. /* 0 - RF kill not enabled
  1437. 1 - SW based RF kill active (sysfs)
  1438. 2 - HW based RF kill active
  1439. 3 - Both HW and SW baed RF kill active */
  1440. struct ipw_priv *priv = d->driver_data;
  1441. int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
  1442. (rf_kill_active(priv) ? 0x2 : 0x0);
  1443. return sprintf(buf, "%i\n", val);
  1444. }
  1445. static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
  1446. {
  1447. if ((disable_radio ? 1 : 0) ==
  1448. ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
  1449. return 0;
  1450. IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
  1451. disable_radio ? "OFF" : "ON");
  1452. if (disable_radio) {
  1453. priv->status |= STATUS_RF_KILL_SW;
  1454. if (priv->workqueue) {
  1455. cancel_delayed_work(&priv->request_scan);
  1456. cancel_delayed_work(&priv->request_direct_scan);
  1457. cancel_delayed_work(&priv->request_passive_scan);
  1458. cancel_delayed_work(&priv->scan_event);
  1459. }
  1460. queue_work(priv->workqueue, &priv->down);
  1461. } else {
  1462. priv->status &= ~STATUS_RF_KILL_SW;
  1463. if (rf_kill_active(priv)) {
  1464. IPW_DEBUG_RF_KILL("Can not turn radio back on - "
  1465. "disabled by HW switch\n");
  1466. /* Make sure the RF_KILL check timer is running */
  1467. cancel_delayed_work(&priv->rf_kill);
  1468. queue_delayed_work(priv->workqueue, &priv->rf_kill,
  1469. round_jiffies_relative(2 * HZ));
  1470. } else
  1471. queue_work(priv->workqueue, &priv->up);
  1472. }
  1473. return 1;
  1474. }
  1475. static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
  1476. const char *buf, size_t count)
  1477. {
  1478. struct ipw_priv *priv = d->driver_data;
  1479. ipw_radio_kill_sw(priv, buf[0] == '1');
  1480. return count;
  1481. }
  1482. static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
  1483. static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
  1484. char *buf)
  1485. {
  1486. struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
  1487. int pos = 0, len = 0;
  1488. if (priv->config & CFG_SPEED_SCAN) {
  1489. while (priv->speed_scan[pos] != 0)
  1490. len += sprintf(&buf[len], "%d ",
  1491. priv->speed_scan[pos++]);
  1492. return len + sprintf(&buf[len], "\n");
  1493. }
  1494. return sprintf(buf, "0\n");
  1495. }
  1496. static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
  1497. const char *buf, size_t count)
  1498. {
  1499. struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
  1500. int channel, pos = 0;
  1501. const char *p = buf;
  1502. /* list of space separated channels to scan, optionally ending with 0 */
  1503. while ((channel = simple_strtol(p, NULL, 0))) {
  1504. if (pos == MAX_SPEED_SCAN - 1) {
  1505. priv->speed_scan[pos] = 0;
  1506. break;
  1507. }
  1508. if (ieee80211_is_valid_channel(priv->ieee, channel))
  1509. priv->speed_scan[pos++] = channel;
  1510. else
  1511. IPW_WARNING("Skipping invalid channel request: %d\n",
  1512. channel);
  1513. p = strchr(p, ' ');
  1514. if (!p)
  1515. break;
  1516. while (*p == ' ' || *p == '\t')
  1517. p++;
  1518. }
  1519. if (pos == 0)
  1520. priv->config &= ~CFG_SPEED_SCAN;
  1521. else {
  1522. priv->speed_scan_pos = 0;
  1523. priv->config |= CFG_SPEED_SCAN;
  1524. }
  1525. return count;
  1526. }
  1527. static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
  1528. store_speed_scan);
  1529. static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
  1530. char *buf)
  1531. {
  1532. struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
  1533. return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
  1534. }
  1535. static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
  1536. const char *buf, size_t count)
  1537. {
  1538. struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
  1539. if (buf[0] == '1')
  1540. priv->config |= CFG_NET_STATS;
  1541. else
  1542. priv->config &= ~CFG_NET_STATS;
  1543. return count;
  1544. }
  1545. static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
  1546. show_net_stats, store_net_stats);
  1547. static ssize_t show_channels(struct device *d,
  1548. struct device_attribute *attr,
  1549. char *buf)
  1550. {
  1551. struct ipw_priv *priv = dev_get_drvdata(d);
  1552. const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
  1553. int len = 0, i;
  1554. len = sprintf(&buf[len],
  1555. "Displaying %d channels in 2.4Ghz band "
  1556. "(802.11bg):\n", geo->bg_channels);
  1557. for (i = 0; i < geo->bg_channels; i++) {
  1558. len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
  1559. geo->bg[i].channel,
  1560. geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
  1561. " (radar spectrum)" : "",
  1562. ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
  1563. (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
  1564. ? "" : ", IBSS",
  1565. geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
  1566. "passive only" : "active/passive",
  1567. geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
  1568. "B" : "B/G");
  1569. }
  1570. len += sprintf(&buf[len],
  1571. "Displaying %d channels in 5.2Ghz band "
  1572. "(802.11a):\n", geo->a_channels);
  1573. for (i = 0; i < geo->a_channels; i++) {
  1574. len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
  1575. geo->a[i].channel,
  1576. geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
  1577. " (radar spectrum)" : "",
  1578. ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
  1579. (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
  1580. ? "" : ", IBSS",
  1581. geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
  1582. "passive only" : "active/passive");
  1583. }
  1584. return len;
  1585. }
  1586. static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
  1587. static void notify_wx_assoc_event(struct ipw_priv *priv)
  1588. {
  1589. union iwreq_data wrqu;
  1590. wrqu.ap_addr.sa_family = ARPHRD_ETHER;
  1591. if (priv->status & STATUS_ASSOCIATED)
  1592. memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
  1593. else
  1594. memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
  1595. wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
  1596. }
  1597. static void ipw_irq_tasklet(struct ipw_priv *priv)
  1598. {
  1599. u32 inta, inta_mask, handled = 0;
  1600. unsigned long flags;
  1601. int rc = 0;
  1602. spin_lock_irqsave(&priv->irq_lock, flags);
  1603. inta = ipw_read32(priv, IPW_INTA_RW);
  1604. inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
  1605. inta &= (IPW_INTA_MASK_ALL & inta_mask);
  1606. /* Add any cached INTA values that need to be handled */
  1607. inta |= priv->isr_inta;
  1608. spin_unlock_irqrestore(&priv->irq_lock, flags);
  1609. spin_lock_irqsave(&priv->lock, flags);
  1610. /* handle all the justifications for the interrupt */
  1611. if (inta & IPW_INTA_BIT_RX_TRANSFER) {
  1612. ipw_rx(priv);
  1613. handled |= IPW_INTA_BIT_RX_TRANSFER;
  1614. }
  1615. if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
  1616. IPW_DEBUG_HC("Command completed.\n");
  1617. rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
  1618. priv->status &= ~STATUS_HCMD_ACTIVE;
  1619. wake_up_interruptible(&priv->wait_command_queue);
  1620. handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
  1621. }
  1622. if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
  1623. IPW_DEBUG_TX("TX_QUEUE_1\n");
  1624. rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
  1625. handled |= IPW_INTA_BIT_TX_QUEUE_1;
  1626. }
  1627. if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
  1628. IPW_DEBUG_TX("TX_QUEUE_2\n");
  1629. rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
  1630. handled |= IPW_INTA_BIT_TX_QUEUE_2;
  1631. }
  1632. if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
  1633. IPW_DEBUG_TX("TX_QUEUE_3\n");
  1634. rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
  1635. handled |= IPW_INTA_BIT_TX_QUEUE_3;
  1636. }
  1637. if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
  1638. IPW_DEBUG_TX("TX_QUEUE_4\n");
  1639. rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
  1640. handled |= IPW_INTA_BIT_TX_QUEUE_4;
  1641. }
  1642. if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
  1643. IPW_WARNING("STATUS_CHANGE\n");
  1644. handled |= IPW_INTA_BIT_STATUS_CHANGE;
  1645. }
  1646. if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
  1647. IPW_WARNING("TX_PERIOD_EXPIRED\n");
  1648. handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
  1649. }
  1650. if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
  1651. IPW_WARNING("HOST_CMD_DONE\n");
  1652. handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
  1653. }
  1654. if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
  1655. IPW_WARNING("FW_INITIALIZATION_DONE\n");
  1656. handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
  1657. }
  1658. if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
  1659. IPW_WARNING("PHY_OFF_DONE\n");
  1660. handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
  1661. }
  1662. if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
  1663. IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
  1664. priv->status |= STATUS_RF_KILL_HW;
  1665. wake_up_interruptible(&priv->wait_command_queue);
  1666. priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
  1667. cancel_delayed_work(&priv->request_scan);
  1668. cancel_delayed_work(&priv->request_direct_scan);
  1669. cancel_delayed_work(&priv->request_passive_scan);
  1670. cancel_delayed_work(&priv->scan_event);
  1671. schedule_work(&priv->link_down);
  1672. queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
  1673. handled |= IPW_INTA_BIT_RF_KILL_DONE;
  1674. }
  1675. if (inta & IPW_INTA_BIT_FATAL_ERROR) {
  1676. IPW_WARNING("Firmware error detected. Restarting.\n");
  1677. if (priv->error) {
  1678. IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
  1679. if (ipw_debug_level & IPW_DL_FW_ERRORS) {
  1680. struct ipw_fw_error *error =
  1681. ipw_alloc_error_log(priv);
  1682. ipw_dump_error_log(priv, error);
  1683. kfree(error);
  1684. }
  1685. } else {
  1686. priv->error = ipw_alloc_error_log(priv);
  1687. if (priv->error)
  1688. IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
  1689. else
  1690. IPW_DEBUG_FW("Error allocating sysfs 'error' "
  1691. "log.\n");
  1692. if (ipw_debug_level & IPW_DL_FW_ERRORS)
  1693. ipw_dump_error_log(priv, priv->error);
  1694. }
  1695. /* XXX: If hardware encryption is for WPA/WPA2,
  1696. * we have to notify the supplicant. */
  1697. if (priv->ieee->sec.encrypt) {
  1698. priv->status &= ~STATUS_ASSOCIATED;
  1699. notify_wx_assoc_event(priv);
  1700. }
  1701. /* Keep the restart process from trying to send host
  1702. * commands by clearing the INIT status bit */
  1703. priv->status &= ~STATUS_INIT;
  1704. /* Cancel currently queued command. */
  1705. priv->status &= ~STATUS_HCMD_ACTIVE;
  1706. wake_up_interruptible(&priv->wait_command_queue);
  1707. queue_work(priv->workqueue, &priv->adapter_restart);
  1708. handled |= IPW_INTA_BIT_FATAL_ERROR;
  1709. }
  1710. if (inta & IPW_INTA_BIT_PARITY_ERROR) {
  1711. IPW_ERROR("Parity error\n");
  1712. handled |= IPW_INTA_BIT_PARITY_ERROR;
  1713. }
  1714. if (handled != inta) {
  1715. IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
  1716. }
  1717. spin_unlock_irqrestore(&priv->lock, flags);
  1718. /* enable all interrupts */
  1719. ipw_enable_interrupts(priv);
  1720. }
  1721. #define IPW_CMD(x) case IPW_CMD_ ## x : return #x
  1722. static char *get_cmd_string(u8 cmd)
  1723. {
  1724. switch (cmd) {
  1725. IPW_CMD(HOST_COMPLETE);
  1726. IPW_CMD(POWER_DOWN);
  1727. IPW_CMD(SYSTEM_CONFIG);
  1728. IPW_CMD(MULTICAST_ADDRESS);
  1729. IPW_CMD(SSID);
  1730. IPW_CMD(ADAPTER_ADDRESS);
  1731. IPW_CMD(PORT_TYPE);
  1732. IPW_CMD(RTS_THRESHOLD);
  1733. IPW_CMD(FRAG_THRESHOLD);
  1734. IPW_CMD(POWER_MODE);
  1735. IPW_CMD(WEP_KEY);
  1736. IPW_CMD(TGI_TX_KEY);
  1737. IPW_CMD(SCAN_REQUEST);
  1738. IPW_CMD(SCAN_REQUEST_EXT);
  1739. IPW_CMD(ASSOCIATE);
  1740. IPW_CMD(SUPPORTED_RATES);
  1741. IPW_CMD(SCAN_ABORT);
  1742. IPW_CMD(TX_FLUSH);
  1743. IPW_CMD(QOS_PARAMETERS);
  1744. IPW_CMD(DINO_CONFIG);
  1745. IPW_CMD(RSN_CAPABILITIES);
  1746. IPW_CMD(RX_KEY);
  1747. IPW_CMD(CARD_DISABLE);
  1748. IPW_CMD(SEED_NUMBER);
  1749. IPW_CMD(TX_POWER);
  1750. IPW_CMD(COUNTRY_INFO);
  1751. IPW_CMD(AIRONET_INFO);
  1752. IPW_CMD(AP_TX_POWER);
  1753. IPW_CMD(CCKM_INFO);
  1754. IPW_CMD(CCX_VER_INFO);
  1755. IPW_CMD(SET_CALIBRATION);
  1756. IPW_CMD(SENSITIVITY_CALIB);
  1757. IPW_CMD(RETRY_LIMIT);
  1758. IPW_CMD(IPW_PRE_POWER_DOWN);
  1759. IPW_CMD(VAP_BEACON_TEMPLATE);
  1760. IPW_CMD(VAP_DTIM_PERIOD);
  1761. IPW_CMD(EXT_SUPPORTED_RATES);
  1762. IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
  1763. IPW_CMD(VAP_QUIET_INTERVALS);
  1764. IPW_CMD(VAP_CHANNEL_SWITCH);
  1765. IPW_CMD(VAP_MANDATORY_CHANNELS);
  1766. IPW_CMD(VAP_CELL_PWR_LIMIT);
  1767. IPW_CMD(VAP_CF_PARAM_SET);
  1768. IPW_CMD(VAP_SET_BEACONING_STATE);
  1769. IPW_CMD(MEASUREMENT);
  1770. IPW_CMD(POWER_CAPABILITY);
  1771. IPW_CMD(SUPPORTED_CHANNELS);
  1772. IPW_CMD(TPC_REPORT);
  1773. IPW_CMD(WME_INFO);
  1774. IPW_CMD(PRODUCTION_COMMAND);
  1775. default:
  1776. return "UNKNOWN";
  1777. }
  1778. }
  1779. #define HOST_COMPLETE_TIMEOUT HZ
  1780. static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
  1781. {
  1782. int rc = 0;
  1783. unsigned long flags;
  1784. spin_lock_irqsave(&priv->lock, flags);
  1785. if (priv->status & STATUS_HCMD_ACTIVE) {
  1786. IPW_ERROR("Failed to send %s: Already sending a command.\n",
  1787. get_cmd_string(cmd->cmd));
  1788. spin_unlock_irqrestore(&priv->lock, flags);
  1789. return -EAGAIN;
  1790. }
  1791. priv->status |= STATUS_HCMD_ACTIVE;
  1792. if (priv->cmdlog) {
  1793. priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
  1794. priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
  1795. priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
  1796. memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
  1797. cmd->len);
  1798. priv->cmdlog[priv->cmdlog_pos].retcode = -1;
  1799. }
  1800. IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
  1801. get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
  1802. priv->status);
  1803. #ifndef DEBUG_CMD_WEP_KEY
  1804. if (cmd->cmd == IPW_CMD_WEP_KEY)
  1805. IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
  1806. else
  1807. #endif
  1808. printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
  1809. rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
  1810. if (rc) {
  1811. priv->status &= ~STATUS_HCMD_ACTIVE;
  1812. IPW_ERROR("Failed to send %s: Reason %d\n",
  1813. get_cmd_string(cmd->cmd), rc);
  1814. spin_unlock_irqrestore(&priv->lock, flags);
  1815. goto exit;
  1816. }
  1817. spin_unlock_irqrestore(&priv->lock, flags);
  1818. rc = wait_event_interruptible_timeout(priv->wait_command_queue,
  1819. !(priv->
  1820. status & STATUS_HCMD_ACTIVE),
  1821. HOST_COMPLETE_TIMEOUT);
  1822. if (rc == 0) {
  1823. spin_lock_irqsave(&priv->lock, flags);
  1824. if (priv->status & STATUS_HCMD_ACTIVE) {
  1825. IPW_ERROR("Failed to send %s: Command timed out.\n",
  1826. get_cmd_string(cmd->cmd));
  1827. priv->status &= ~STATUS_HCMD_ACTIVE;
  1828. spin_unlock_irqrestore(&priv->lock, flags);
  1829. rc = -EIO;
  1830. goto exit;
  1831. }
  1832. spin_unlock_irqrestore(&priv->lock, flags);
  1833. } else
  1834. rc = 0;
  1835. if (priv->status & STATUS_RF_KILL_HW) {
  1836. IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
  1837. get_cmd_string(cmd->cmd));
  1838. rc = -EIO;
  1839. goto exit;
  1840. }
  1841. exit:
  1842. if (priv->cmdlog) {
  1843. priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
  1844. priv->cmdlog_pos %= priv->cmdlog_len;
  1845. }
  1846. return rc;
  1847. }
  1848. static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
  1849. {
  1850. struct host_cmd cmd = {
  1851. .cmd = command,
  1852. };
  1853. return __ipw_send_cmd(priv, &cmd);
  1854. }
  1855. static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
  1856. void *data)
  1857. {
  1858. struct host_cmd cmd = {
  1859. .cmd = command,
  1860. .len = len,
  1861. .param = data,
  1862. };
  1863. return __ipw_send_cmd(priv, &cmd);
  1864. }
  1865. static int ipw_send_host_complete(struct ipw_priv *priv)
  1866. {
  1867. if (!priv) {
  1868. IPW_ERROR("Invalid args\n");
  1869. return -1;
  1870. }
  1871. return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
  1872. }
  1873. static int ipw_send_system_config(struct ipw_priv *priv)
  1874. {
  1875. return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
  1876. sizeof(priv->sys_config),
  1877. &priv->sys_config);
  1878. }
  1879. static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
  1880. {
  1881. if (!priv || !ssid) {
  1882. IPW_ERROR("Invalid args\n");
  1883. return -1;
  1884. }
  1885. return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
  1886. ssid);
  1887. }
  1888. static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
  1889. {
  1890. if (!priv || !mac) {
  1891. IPW_ERROR("Invalid args\n");
  1892. return -1;
  1893. }
  1894. IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
  1895. priv->net_dev->name, mac);
  1896. return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
  1897. }
  1898. /*
  1899. * NOTE: This must be executed from our workqueue as it results in udelay
  1900. * being called which may corrupt the keyboard if executed on default
  1901. * workqueue
  1902. */
  1903. static void ipw_adapter_restart(void *adapter)
  1904. {
  1905. struct ipw_priv *priv = adapter;
  1906. if (priv->status & STATUS_RF_KILL_MASK)
  1907. return;
  1908. ipw_down(priv);
  1909. if (priv->assoc_network &&
  1910. (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
  1911. ipw_remove_current_network(priv);
  1912. if (ipw_up(priv)) {
  1913. IPW_ERROR("Failed to up device\n");
  1914. return;
  1915. }
  1916. }
  1917. static void ipw_bg_adapter_restart(struct work_struct *work)
  1918. {
  1919. struct ipw_priv *priv =
  1920. container_of(work, struct ipw_priv, adapter_restart);
  1921. mutex_lock(&priv->mutex);
  1922. ipw_adapter_restart(priv);
  1923. mutex_unlock(&priv->mutex);
  1924. }
  1925. #define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
  1926. static void ipw_scan_check(void *data)
  1927. {
  1928. struct ipw_priv *priv = data;
  1929. if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
  1930. IPW_DEBUG_SCAN("Scan completion watchdog resetting "
  1931. "adapter after (%dms).\n",
  1932. jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
  1933. queue_work(priv->workqueue, &priv->adapter_restart);
  1934. }
  1935. }
  1936. static void ipw_bg_scan_check(struct work_struct *work)
  1937. {
  1938. struct ipw_priv *priv =
  1939. container_of(work, struct ipw_priv, scan_check.work);
  1940. mutex_lock(&priv->mutex);
  1941. ipw_scan_check(priv);
  1942. mutex_unlock(&priv->mutex);
  1943. }
  1944. static int ipw_send_scan_request_ext(struct ipw_priv *priv,
  1945. struct ipw_scan_request_ext *request)
  1946. {
  1947. return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
  1948. sizeof(*request), request);
  1949. }
  1950. static int ipw_send_scan_abort(struct ipw_priv *priv)
  1951. {
  1952. if (!priv) {
  1953. IPW_ERROR("Invalid args\n");
  1954. return -1;
  1955. }
  1956. return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
  1957. }
  1958. static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
  1959. {
  1960. struct ipw_sensitivity_calib calib = {
  1961. .beacon_rssi_raw = cpu_to_le16(sens),
  1962. };
  1963. return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
  1964. &calib);
  1965. }
  1966. static int ipw_send_associate(struct ipw_priv *priv,
  1967. struct ipw_associate *associate)
  1968. {
  1969. if (!priv || !associate) {
  1970. IPW_ERROR("Invalid args\n");
  1971. return -1;
  1972. }
  1973. return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
  1974. associate);
  1975. }
  1976. static int ipw_send_supported_rates(struct ipw_priv *priv,
  1977. struct ipw_supported_rates *rates)
  1978. {
  1979. if (!priv || !rates) {
  1980. IPW_ERROR("Invalid args\n");
  1981. return -1;
  1982. }
  1983. return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
  1984. rates);
  1985. }
  1986. static int ipw_set_random_seed(struct ipw_priv *priv)
  1987. {
  1988. u32 val;
  1989. if (!priv) {
  1990. IPW_ERROR("Invalid args\n");
  1991. return -1;
  1992. }
  1993. get_random_bytes(&val, sizeof(val));
  1994. return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
  1995. }
  1996. static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
  1997. {
  1998. __le32 v = cpu_to_le32(phy_off);
  1999. if (!priv) {
  2000. IPW_ERROR("Invalid args\n");
  2001. return -1;
  2002. }
  2003. return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
  2004. }
  2005. static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
  2006. {
  2007. if (!priv || !power) {
  2008. IPW_ERROR("Invalid args\n");
  2009. return -1;
  2010. }
  2011. return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
  2012. }
  2013. static int ipw_set_tx_power(struct ipw_priv *priv)
  2014. {
  2015. const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
  2016. struct ipw_tx_power tx_power;
  2017. s8 max_power;
  2018. int i;
  2019. memset(&tx_power, 0, sizeof(tx_power));
  2020. /* configure device for 'G' band */
  2021. tx_power.ieee_mode = IPW_G_MODE;
  2022. tx_power.num_channels = geo->bg_channels;
  2023. for (i = 0; i < geo->bg_channels; i++) {
  2024. max_power = geo->bg[i].max_power;
  2025. tx_power.channels_tx_power[i].channel_number =
  2026. geo->bg[i].channel;
  2027. tx_power.channels_tx_power[i].tx_power = max_power ?
  2028. min(max_power, priv->tx_power) : priv->tx_power;
  2029. }
  2030. if (ipw_send_tx_power(priv, &tx_power))
  2031. return -EIO;
  2032. /* configure device to also handle 'B' band */
  2033. tx_power.ieee_mode = IPW_B_MODE;
  2034. if (ipw_send_tx_power(priv, &tx_power))
  2035. return -EIO;
  2036. /* configure device to also handle 'A' band */
  2037. if (priv->ieee->abg_true) {
  2038. tx_power.ieee_mode = IPW_A_MODE;
  2039. tx_power.num_channels = geo->a_channels;
  2040. for (i = 0; i < tx_power.num_channels; i++) {
  2041. max_power = geo->a[i].max_power;
  2042. tx_power.channels_tx_power[i].channel_number =
  2043. geo->a[i].channel;
  2044. tx_power.channels_tx_power[i].tx_power = max_power ?
  2045. min(max_power, priv->tx_power) : priv->tx_power;
  2046. }
  2047. if (ipw_send_tx_power(priv, &tx_power))
  2048. return -EIO;
  2049. }
  2050. return 0;
  2051. }
  2052. static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
  2053. {
  2054. struct ipw_rts_threshold rts_threshold = {
  2055. .rts_threshold = cpu_to_le16(rts),
  2056. };
  2057. if (!priv) {
  2058. IPW_ERROR("Invalid args\n");
  2059. return -1;
  2060. }
  2061. return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
  2062. sizeof(rts_threshold), &rts_threshold);
  2063. }
  2064. static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
  2065. {
  2066. struct ipw_frag_threshold frag_threshold = {
  2067. .frag_threshold = cpu_to_le16(frag),
  2068. };
  2069. if (!priv) {
  2070. IPW_ERROR("Invalid args\n");
  2071. return -1;
  2072. }
  2073. return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
  2074. sizeof(frag_threshold), &frag_threshold);
  2075. }
  2076. static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
  2077. {
  2078. __le32 param;
  2079. if (!priv) {
  2080. IPW_ERROR("Invalid args\n");
  2081. return -1;
  2082. }
  2083. /* If on battery, set to 3, if AC set to CAM, else user
  2084. * level */
  2085. switch (mode) {
  2086. case IPW_POWER_BATTERY:
  2087. param = cpu_to_le32(IPW_POWER_INDEX_3);
  2088. break;
  2089. case IPW_POWER_AC:
  2090. param = cpu_to_le32(IPW_POWER_MODE_CAM);
  2091. break;
  2092. default:
  2093. param = cpu_to_le32(mode);
  2094. break;
  2095. }
  2096. return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
  2097. &param);
  2098. }
  2099. static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
  2100. {
  2101. struct ipw_retry_limit retry_limit = {
  2102. .short_retry_limit = slimit,
  2103. .long_retry_limit = llimit
  2104. };
  2105. if (!priv) {
  2106. IPW_ERROR("Invalid args\n");
  2107. return -1;
  2108. }
  2109. return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
  2110. &retry_limit);
  2111. }
  2112. /*
  2113. * The IPW device contains a Microwire compatible EEPROM that stores
  2114. * various data like the MAC address. Usually the firmware has exclusive
  2115. * access to the eeprom, but during device initialization (before the
  2116. * device driver has sent the HostComplete command to the firmware) the
  2117. * device driver has read access to the EEPROM by way of indirect addressing
  2118. * through a couple of memory mapped registers.
  2119. *
  2120. * The following is a simplified implementation for pulling data out of the
  2121. * the eeprom, along with some helper functions to find information in
  2122. * the per device private data's copy of the eeprom.
  2123. *
  2124. * NOTE: To better understand how these functions work (i.e what is a chip
  2125. * select and why do have to keep driving the eeprom clock?), read
  2126. * just about any data sheet for a Microwire compatible EEPROM.
  2127. */
  2128. /* write a 32 bit value into the indirect accessor register */
  2129. static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
  2130. {
  2131. ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
  2132. /* the eeprom requires some time to complete the operation */
  2133. udelay(p->eeprom_delay);
  2134. return;
  2135. }
  2136. /* perform a chip select operation */
  2137. static void eeprom_cs(struct ipw_priv *priv)
  2138. {
  2139. eeprom_write_reg(priv, 0);
  2140. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2141. eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
  2142. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2143. }
  2144. /* perform a chip select operation */
  2145. static void eeprom_disable_cs(struct ipw_priv *priv)
  2146. {
  2147. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2148. eeprom_write_reg(priv, 0);
  2149. eeprom_write_reg(priv, EEPROM_BIT_SK);
  2150. }
  2151. /* push a single bit down to the eeprom */
  2152. static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
  2153. {
  2154. int d = (bit ? EEPROM_BIT_DI : 0);
  2155. eeprom_write_reg(p, EEPROM_BIT_CS | d);
  2156. eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
  2157. }
  2158. /* push an opcode followed by an address down to the eeprom */
  2159. static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
  2160. {
  2161. int i;
  2162. eeprom_cs(priv);
  2163. eeprom_write_bit(priv, 1);
  2164. eeprom_write_bit(priv, op & 2);
  2165. eeprom_write_bit(priv, op & 1);
  2166. for (i = 7; i >= 0; i--) {
  2167. eeprom_write_bit(priv, addr & (1 << i));
  2168. }
  2169. }
  2170. /* pull 16 bits off the eeprom, one bit at a time */
  2171. static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
  2172. {
  2173. int i;
  2174. u16 r = 0;
  2175. /* Send READ Opcode */
  2176. eeprom_op(priv, EEPROM_CMD_READ, addr);
  2177. /* Send dummy bit */
  2178. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2179. /* Read the byte off the eeprom one bit at a time */
  2180. for (i = 0; i < 16; i++) {
  2181. u32 data = 0;
  2182. eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
  2183. eeprom_write_reg(priv, EEPROM_BIT_CS);
  2184. data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
  2185. r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
  2186. }
  2187. /* Send another dummy bit */
  2188. eeprom_write_reg(priv, 0);
  2189. eeprom_disable_cs(priv);
  2190. return r;
  2191. }
  2192. /* helper function for pulling the mac address out of the private */
  2193. /* data's copy of the eeprom data */
  2194. static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
  2195. {
  2196. memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
  2197. }
  2198. /*
  2199. * Either the device driver (i.e. the host) or the firmware can
  2200. * load eeprom data into the designated region in SRAM. If neither
  2201. * happens then the FW will shutdown with a fatal error.
  2202. *
  2203. * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
  2204. * bit needs region of shared SRAM needs to be non-zero.
  2205. */
  2206. static void ipw_eeprom_init_sram(struct ipw_priv *priv)
  2207. {
  2208. int i;
  2209. __le16 *eeprom = (__le16 *) priv->eeprom;
  2210. IPW_DEBUG_TRACE(">>\n");
  2211. /* read entire contents of eeprom into private buffer */
  2212. for (i = 0; i < 128; i++)
  2213. eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
  2214. /*
  2215. If the data looks correct, then copy it to our private
  2216. copy. Otherwise let the firmware know to perform the operation
  2217. on its own.
  2218. */
  2219. if (priv->eeprom[EEPROM_VERSION] != 0) {
  2220. IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
  2221. /* write the eeprom data to sram */
  2222. for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
  2223. ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
  2224. /* Do not load eeprom data on fatal error or suspend */
  2225. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
  2226. } else {
  2227. IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
  2228. /* Load eeprom data on fatal error or suspend */
  2229. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
  2230. }
  2231. IPW_DEBUG_TRACE("<<\n");
  2232. }
  2233. static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
  2234. {
  2235. count >>= 2;
  2236. if (!count)
  2237. return;
  2238. _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
  2239. while (count--)
  2240. _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
  2241. }
  2242. static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
  2243. {
  2244. ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
  2245. CB_NUMBER_OF_ELEMENTS_SMALL *
  2246. sizeof(struct command_block));
  2247. }
  2248. static int ipw_fw_dma_enable(struct ipw_priv *priv)
  2249. { /* start dma engine but no transfers yet */
  2250. IPW_DEBUG_FW(">> : \n");
  2251. /* Start the dma */
  2252. ipw_fw_dma_reset_command_blocks(priv);
  2253. /* Write CB base address */
  2254. ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
  2255. IPW_DEBUG_FW("<< : \n");
  2256. return 0;
  2257. }
  2258. static void ipw_fw_dma_abort(struct ipw_priv *priv)
  2259. {
  2260. u32 control = 0;
  2261. IPW_DEBUG_FW(">> :\n");
  2262. /* set the Stop and Abort bit */
  2263. control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
  2264. ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
  2265. priv->sram_desc.last_cb_index = 0;
  2266. IPW_DEBUG_FW("<< \n");
  2267. }
  2268. static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
  2269. struct command_block *cb)
  2270. {
  2271. u32 address =
  2272. IPW_SHARED_SRAM_DMA_CONTROL +
  2273. (sizeof(struct command_block) * index);
  2274. IPW_DEBUG_FW(">> :\n");
  2275. ipw_write_indirect(priv, address, (u8 *) cb,
  2276. (int)sizeof(struct command_block));
  2277. IPW_DEBUG_FW("<< :\n");
  2278. return 0;
  2279. }
  2280. static int ipw_fw_dma_kick(struct ipw_priv *priv)
  2281. {
  2282. u32 control = 0;
  2283. u32 index = 0;
  2284. IPW_DEBUG_FW(">> :\n");
  2285. for (index = 0; index < priv->sram_desc.last_cb_index; index++)
  2286. ipw_fw_dma_write_command_block(priv, index,
  2287. &priv->sram_desc.cb_list[index]);
  2288. /* Enable the DMA in the CSR register */
  2289. ipw_clear_bit(priv, IPW_RESET_REG,
  2290. IPW_RESET_REG_MASTER_DISABLED |
  2291. IPW_RESET_REG_STOP_MASTER);
  2292. /* Set the Start bit. */
  2293. control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
  2294. ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
  2295. IPW_DEBUG_FW("<< :\n");
  2296. return 0;
  2297. }
  2298. static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
  2299. {
  2300. u32 address;
  2301. u32 register_value = 0;
  2302. u32 cb_fields_address = 0;
  2303. IPW_DEBUG_FW(">> :\n");
  2304. address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
  2305. IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
  2306. /* Read the DMA Controlor register */
  2307. register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
  2308. IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
  2309. /* Print the CB values */
  2310. cb_fields_address = address;
  2311. register_value = ipw_read_reg32(priv, cb_fields_address);
  2312. IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
  2313. cb_fields_address += sizeof(u32);
  2314. register_value = ipw_read_reg32(priv, cb_fields_address);
  2315. IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
  2316. cb_fields_address += sizeof(u32);
  2317. register_value = ipw_read_reg32(priv, cb_fields_address);
  2318. IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
  2319. register_value);
  2320. cb_fields_address += sizeof(u32);
  2321. register_value = ipw_read_reg32(priv, cb_fields_address);
  2322. IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
  2323. IPW_DEBUG_FW(">> :\n");
  2324. }
  2325. static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
  2326. {
  2327. u32 current_cb_address = 0;
  2328. u32 current_cb_index = 0;
  2329. IPW_DEBUG_FW("<< :\n");
  2330. current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
  2331. current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
  2332. sizeof(struct command_block);
  2333. IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
  2334. current_cb_index, current_cb_address);
  2335. IPW_DEBUG_FW(">> :\n");
  2336. return current_cb_index;
  2337. }
  2338. static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
  2339. u32 src_address,
  2340. u32 dest_address,
  2341. u32 length,
  2342. int interrupt_enabled, int is_last)
  2343. {
  2344. u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
  2345. CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
  2346. CB_DEST_SIZE_LONG;
  2347. struct command_block *cb;
  2348. u32 last_cb_element = 0;
  2349. IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
  2350. src_address, dest_address, length);
  2351. if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
  2352. return -1;
  2353. last_cb_element = priv->sram_desc.last_cb_index;
  2354. cb = &priv->sram_desc.cb_list[last_cb_element];
  2355. priv->sram_desc.last_cb_index++;
  2356. /* Calculate the new CB control word */
  2357. if (interrupt_enabled)
  2358. control |= CB_INT_ENABLED;
  2359. if (is_last)
  2360. control |= CB_LAST_VALID;
  2361. control |= length;
  2362. /* Calculate the CB Element's checksum value */
  2363. cb->status = control ^ src_address ^ dest_address;
  2364. /* Copy the Source and Destination addresses */
  2365. cb->dest_addr = dest_address;
  2366. cb->source_addr = src_address;
  2367. /* Copy the Control Word last */
  2368. cb->control = control;
  2369. return 0;
  2370. }
  2371. static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
  2372. u32 src_phys, u32 dest_address, u32 length)
  2373. {
  2374. u32 bytes_left = length;
  2375. u32 src_offset = 0;
  2376. u32 dest_offset = 0;
  2377. int status = 0;
  2378. IPW_DEBUG_FW(">> \n");
  2379. IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
  2380. src_phys, dest_address, length);
  2381. while (bytes_left > CB_MAX_LENGTH) {
  2382. status = ipw_fw_dma_add_command_block(priv,
  2383. src_phys + src_offset,
  2384. dest_address +
  2385. dest_offset,
  2386. CB_MAX_LENGTH, 0, 0);
  2387. if (status) {
  2388. IPW_DEBUG_FW_INFO(": Failed\n");
  2389. return -1;
  2390. } else
  2391. IPW_DEBUG_FW_INFO(": Added new cb\n");
  2392. src_offset += CB_MAX_LENGTH;
  2393. dest_offset += CB_MAX_LENGTH;
  2394. bytes_left -= CB_MAX_LENGTH;
  2395. }
  2396. /* add the buffer tail */
  2397. if (bytes_left > 0) {
  2398. status =
  2399. ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
  2400. dest_address + dest_offset,
  2401. bytes_left, 0, 0);
  2402. if (status) {
  2403. IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
  2404. return -1;
  2405. } else
  2406. IPW_DEBUG_FW_INFO
  2407. (": Adding new cb - the buffer tail\n");
  2408. }
  2409. IPW_DEBUG_FW("<< \n");
  2410. return 0;
  2411. }
  2412. static int ipw_fw_dma_wait(struct ipw_priv *priv)
  2413. {
  2414. u32 current_index = 0, previous_index;
  2415. u32 watchdog = 0;
  2416. IPW_DEBUG_FW(">> : \n");
  2417. current_index = ipw_fw_dma_command_block_index(priv);
  2418. IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
  2419. (int)priv->sram_desc.last_cb_index);
  2420. while (current_index < priv->sram_desc.last_cb_index) {
  2421. udelay(50);
  2422. previous_index = current_index;
  2423. current_index = ipw_fw_dma_command_block_index(priv);
  2424. if (previous_index < current_index) {
  2425. watchdog = 0;
  2426. continue;
  2427. }
  2428. if (++watchdog > 400) {
  2429. IPW_DEBUG_FW_INFO("Timeout\n");
  2430. ipw_fw_dma_dump_command_block(priv);
  2431. ipw_fw_dma_abort(priv);
  2432. return -1;
  2433. }
  2434. }
  2435. ipw_fw_dma_abort(priv);
  2436. /*Disable the DMA in the CSR register */
  2437. ipw_set_bit(priv, IPW_RESET_REG,
  2438. IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
  2439. IPW_DEBUG_FW("<< dmaWaitSync \n");
  2440. return 0;
  2441. }
  2442. static void ipw_remove_current_network(struct ipw_priv *priv)
  2443. {
  2444. struct list_head *element, *safe;
  2445. struct ieee80211_network *network = NULL;
  2446. unsigned long flags;
  2447. spin_lock_irqsave(&priv->ieee->lock, flags);
  2448. list_for_each_safe(element, safe, &priv->ieee->network_list) {
  2449. network = list_entry(element, struct ieee80211_network, list);
  2450. if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
  2451. list_del(element);
  2452. list_add_tail(&network->list,
  2453. &priv->ieee->network_free_list);
  2454. }
  2455. }
  2456. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  2457. }
  2458. /**
  2459. * Check that card is still alive.
  2460. * Reads debug register from domain0.
  2461. * If card is present, pre-defined value should
  2462. * be found there.
  2463. *
  2464. * @param priv
  2465. * @return 1 if card is present, 0 otherwise
  2466. */
  2467. static inline int ipw_alive(struct ipw_priv *priv)
  2468. {
  2469. return ipw_read32(priv, 0x90) == 0xd55555d5;
  2470. }
  2471. /* timeout in msec, attempted in 10-msec quanta */
  2472. static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
  2473. int timeout)
  2474. {
  2475. int i = 0;
  2476. do {
  2477. if ((ipw_read32(priv, addr) & mask) == mask)
  2478. return i;
  2479. mdelay(10);
  2480. i += 10;
  2481. } while (i < timeout);
  2482. return -ETIME;
  2483. }
  2484. /* These functions load the firmware and micro code for the operation of
  2485. * the ipw hardware. It assumes the buffer has all the bits for the
  2486. * image and the caller is handling the memory allocation and clean up.
  2487. */
  2488. static int ipw_stop_master(struct ipw_priv *priv)
  2489. {
  2490. int rc;
  2491. IPW_DEBUG_TRACE(">> \n");
  2492. /* stop master. typical delay - 0 */
  2493. ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
  2494. /* timeout is in msec, polled in 10-msec quanta */
  2495. rc = ipw_poll_bit(priv, IPW_RESET_REG,
  2496. IPW_RESET_REG_MASTER_DISABLED, 100);
  2497. if (rc < 0) {
  2498. IPW_ERROR("wait for stop master failed after 100ms\n");
  2499. return -1;
  2500. }
  2501. IPW_DEBUG_INFO("stop master %dms\n", rc);
  2502. return rc;
  2503. }
  2504. static void ipw_arc_release(struct ipw_priv *priv)
  2505. {
  2506. IPW_DEBUG_TRACE(">> \n");
  2507. mdelay(5);
  2508. ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
  2509. /* no one knows timing, for safety add some delay */
  2510. mdelay(5);
  2511. }
  2512. struct fw_chunk {
  2513. __le32 address;
  2514. __le32 length;
  2515. };
  2516. static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
  2517. {
  2518. int rc = 0, i, addr;
  2519. u8 cr = 0;
  2520. __le16 *image;
  2521. image = (__le16 *) data;
  2522. IPW_DEBUG_TRACE(">> \n");
  2523. rc = ipw_stop_master(priv);
  2524. if (rc < 0)
  2525. return rc;
  2526. for (addr = IPW_SHARED_LOWER_BOUND;
  2527. addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
  2528. ipw_write32(priv, addr, 0);
  2529. }
  2530. /* no ucode (yet) */
  2531. memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
  2532. /* destroy DMA queues */
  2533. /* reset sequence */
  2534. ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
  2535. ipw_arc_release(priv);
  2536. ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
  2537. mdelay(1);
  2538. /* reset PHY */
  2539. ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
  2540. mdelay(1);
  2541. ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
  2542. mdelay(1);
  2543. /* enable ucode store */
  2544. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
  2545. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
  2546. mdelay(1);
  2547. /* write ucode */
  2548. /**
  2549. * @bug
  2550. * Do NOT set indirect address register once and then
  2551. * store data to indirect data register in the loop.
  2552. * It seems very reasonable, but in this case DINO do not
  2553. * accept ucode. It is essential to set address each time.
  2554. */
  2555. /* load new ipw uCode */
  2556. for (i = 0; i < len / 2; i++)
  2557. ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
  2558. le16_to_cpu(image[i]));
  2559. /* enable DINO */
  2560. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
  2561. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
  2562. /* this is where the igx / win driver deveates from the VAP driver. */
  2563. /* wait for alive response */
  2564. for (i = 0; i < 100; i++) {
  2565. /* poll for incoming data */
  2566. cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
  2567. if (cr & DINO_RXFIFO_DATA)
  2568. break;
  2569. mdelay(1);
  2570. }
  2571. if (cr & DINO_RXFIFO_DATA) {
  2572. /* alive_command_responce size is NOT multiple of 4 */
  2573. __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
  2574. for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
  2575. response_buffer[i] =
  2576. cpu_to_le32(ipw_read_reg32(priv,
  2577. IPW_BASEBAND_RX_FIFO_READ));
  2578. memcpy(&priv->dino_alive, response_buffer,
  2579. sizeof(priv->dino_alive));
  2580. if (priv->dino_alive.alive_command == 1
  2581. && priv->dino_alive.ucode_valid == 1) {
  2582. rc = 0;
  2583. IPW_DEBUG_INFO
  2584. ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
  2585. "of %02d/%02d/%02d %02d:%02d\n",
  2586. priv->dino_alive.software_revision,
  2587. priv->dino_alive.software_revision,
  2588. priv->dino_alive.device_identifier,
  2589. priv->dino_alive.device_identifier,
  2590. priv->dino_alive.time_stamp[0],
  2591. priv->dino_alive.time_stamp[1],
  2592. priv->dino_alive.time_stamp[2],
  2593. priv->dino_alive.time_stamp[3],
  2594. priv->dino_alive.time_stamp[4]);
  2595. } else {
  2596. IPW_DEBUG_INFO("Microcode is not alive\n");
  2597. rc = -EINVAL;
  2598. }
  2599. } else {
  2600. IPW_DEBUG_INFO("No alive response from DINO\n");
  2601. rc = -ETIME;
  2602. }
  2603. /* disable DINO, otherwise for some reason
  2604. firmware have problem getting alive resp. */
  2605. ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
  2606. return rc;
  2607. }
  2608. static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
  2609. {
  2610. int rc = -1;
  2611. int offset = 0;
  2612. struct fw_chunk *chunk;
  2613. dma_addr_t shared_phys;
  2614. u8 *shared_virt;
  2615. IPW_DEBUG_TRACE("<< : \n");
  2616. shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
  2617. if (!shared_virt)
  2618. return -ENOMEM;
  2619. memmove(shared_virt, data, len);
  2620. /* Start the Dma */
  2621. rc = ipw_fw_dma_enable(priv);
  2622. if (priv->sram_desc.last_cb_index > 0) {
  2623. /* the DMA is already ready this would be a bug. */
  2624. BUG();
  2625. goto out;
  2626. }
  2627. do {
  2628. chunk = (struct fw_chunk *)(data + offset);
  2629. offset += sizeof(struct fw_chunk);
  2630. /* build DMA packet and queue up for sending */
  2631. /* dma to chunk->address, the chunk->length bytes from data +
  2632. * offeset*/
  2633. /* Dma loading */
  2634. rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
  2635. le32_to_cpu(chunk->address),
  2636. le32_to_cpu(chunk->length));
  2637. if (rc) {
  2638. IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
  2639. goto out;
  2640. }
  2641. offset += le32_to_cpu(chunk->length);
  2642. } while (offset < len);
  2643. /* Run the DMA and wait for the answer */
  2644. rc = ipw_fw_dma_kick(priv);
  2645. if (rc) {
  2646. IPW_ERROR("dmaKick Failed\n");
  2647. goto out;
  2648. }
  2649. rc = ipw_fw_dma_wait(priv);
  2650. if (rc) {
  2651. IPW_ERROR("dmaWaitSync Failed\n");
  2652. goto out;
  2653. }
  2654. out:
  2655. pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
  2656. return rc;
  2657. }
  2658. /* stop nic */
  2659. static int ipw_stop_nic(struct ipw_priv *priv)
  2660. {
  2661. int rc = 0;
  2662. /* stop */
  2663. ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
  2664. rc = ipw_poll_bit(priv, IPW_RESET_REG,
  2665. IPW_RESET_REG_MASTER_DISABLED, 500);
  2666. if (rc < 0) {
  2667. IPW_ERROR("wait for reg master disabled failed after 500ms\n");
  2668. return rc;
  2669. }
  2670. ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
  2671. return rc;
  2672. }
  2673. static void ipw_start_nic(struct ipw_priv *priv)
  2674. {
  2675. IPW_DEBUG_TRACE(">>\n");
  2676. /* prvHwStartNic release ARC */
  2677. ipw_clear_bit(priv, IPW_RESET_REG,
  2678. IPW_RESET_REG_MASTER_DISABLED |
  2679. IPW_RESET_REG_STOP_MASTER |
  2680. CBD_RESET_REG_PRINCETON_RESET);
  2681. /* enable power management */
  2682. ipw_set_bit(priv, IPW_GP_CNTRL_RW,
  2683. IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
  2684. IPW_DEBUG_TRACE("<<\n");
  2685. }
  2686. static int ipw_init_nic(struct ipw_priv *priv)
  2687. {
  2688. int rc;
  2689. IPW_DEBUG_TRACE(">>\n");
  2690. /* reset */
  2691. /*prvHwInitNic */
  2692. /* set "initialization complete" bit to move adapter to D0 state */
  2693. ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
  2694. /* low-level PLL activation */
  2695. ipw_write32(priv, IPW_READ_INT_REGISTER,
  2696. IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
  2697. /* wait for clock stabilization */
  2698. rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
  2699. IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
  2700. if (rc < 0)
  2701. IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
  2702. /* assert SW reset */
  2703. ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
  2704. udelay(10);
  2705. /* set "initialization complete" bit to move adapter to D0 state */
  2706. ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
  2707. IPW_DEBUG_TRACE(">>\n");
  2708. return 0;
  2709. }
  2710. /* Call this function from process context, it will sleep in request_firmware.
  2711. * Probe is an ok place to call this from.
  2712. */
  2713. static int ipw_reset_nic(struct ipw_priv *priv)
  2714. {
  2715. int rc = 0;
  2716. unsigned long flags;
  2717. IPW_DEBUG_TRACE(">>\n");
  2718. rc = ipw_init_nic(priv);
  2719. spin_lock_irqsave(&priv->lock, flags);
  2720. /* Clear the 'host command active' bit... */
  2721. priv->status &= ~STATUS_HCMD_ACTIVE;
  2722. wake_up_interruptible(&priv->wait_command_queue);
  2723. priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
  2724. wake_up_interruptible(&priv->wait_state);
  2725. spin_unlock_irqrestore(&priv->lock, flags);
  2726. IPW_DEBUG_TRACE("<<\n");
  2727. return rc;
  2728. }
  2729. struct ipw_fw {
  2730. __le32 ver;
  2731. __le32 boot_size;
  2732. __le32 ucode_size;
  2733. __le32 fw_size;
  2734. u8 data[0];
  2735. };
  2736. static int ipw_get_fw(struct ipw_priv *priv,
  2737. const struct firmware **raw, const char *name)
  2738. {
  2739. struct ipw_fw *fw;
  2740. int rc;
  2741. /* ask firmware_class module to get the boot firmware off disk */
  2742. rc = request_firmware(raw, name, &priv->pci_dev->dev);
  2743. if (rc < 0) {
  2744. IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
  2745. return rc;
  2746. }
  2747. if ((*raw)->size < sizeof(*fw)) {
  2748. IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
  2749. return -EINVAL;
  2750. }
  2751. fw = (void *)(*raw)->data;
  2752. if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
  2753. le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
  2754. IPW_ERROR("%s is too small or corrupt (%zd)\n",
  2755. name, (*raw)->size);
  2756. return -EINVAL;
  2757. }
  2758. IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
  2759. name,
  2760. le32_to_cpu(fw->ver) >> 16,
  2761. le32_to_cpu(fw->ver) & 0xff,
  2762. (*raw)->size - sizeof(*fw));
  2763. return 0;
  2764. }
  2765. #define IPW_RX_BUF_SIZE (3000)
  2766. static void ipw_rx_queue_reset(struct ipw_priv *priv,
  2767. struct ipw_rx_queue *rxq)
  2768. {
  2769. unsigned long flags;
  2770. int i;
  2771. spin_lock_irqsave(&rxq->lock, flags);
  2772. INIT_LIST_HEAD(&rxq->rx_free);
  2773. INIT_LIST_HEAD(&rxq->rx_used);
  2774. /* Fill the rx_used queue with _all_ of the Rx buffers */
  2775. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  2776. /* In the reset function, these buffers may have been allocated
  2777. * to an SKB, so we need to unmap and free potential storage */
  2778. if (rxq->pool[i].skb != NULL) {
  2779. pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
  2780. IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  2781. dev_kfree_skb(rxq->pool[i].skb);
  2782. rxq->pool[i].skb = NULL;
  2783. }
  2784. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  2785. }
  2786. /* Set us so that we have processed and used all buffers, but have
  2787. * not restocked the Rx queue with fresh buffers */
  2788. rxq->read = rxq->write = 0;
  2789. rxq->free_count = 0;
  2790. spin_unlock_irqrestore(&rxq->lock, flags);
  2791. }
  2792. #ifdef CONFIG_PM
  2793. static int fw_loaded = 0;
  2794. static const struct firmware *raw = NULL;
  2795. static void free_firmware(void)
  2796. {
  2797. if (fw_loaded) {
  2798. release_firmware(raw);
  2799. raw = NULL;
  2800. fw_loaded = 0;
  2801. }
  2802. }
  2803. #else
  2804. #define free_firmware() do {} while (0)
  2805. #endif
  2806. static int ipw_load(struct ipw_priv *priv)
  2807. {
  2808. #ifndef CONFIG_PM
  2809. const struct firmware *raw = NULL;
  2810. #endif
  2811. struct ipw_fw *fw;
  2812. u8 *boot_img, *ucode_img, *fw_img;
  2813. u8 *name = NULL;
  2814. int rc = 0, retries = 3;
  2815. switch (priv->ieee->iw_mode) {
  2816. case IW_MODE_ADHOC:
  2817. name = "ipw2200-ibss.fw";
  2818. break;
  2819. #ifdef CONFIG_IPW2200_MONITOR
  2820. case IW_MODE_MONITOR:
  2821. name = "ipw2200-sniffer.fw";
  2822. break;
  2823. #endif
  2824. case IW_MODE_INFRA:
  2825. name = "ipw2200-bss.fw";
  2826. break;
  2827. }
  2828. if (!name) {
  2829. rc = -EINVAL;
  2830. goto error;
  2831. }
  2832. #ifdef CONFIG_PM
  2833. if (!fw_loaded) {
  2834. #endif
  2835. rc = ipw_get_fw(priv, &raw, name);
  2836. if (rc < 0)
  2837. goto error;
  2838. #ifdef CONFIG_PM
  2839. }
  2840. #endif
  2841. fw = (void *)raw->data;
  2842. boot_img = &fw->data[0];
  2843. ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
  2844. fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
  2845. le32_to_cpu(fw->ucode_size)];
  2846. if (rc < 0)
  2847. goto error;
  2848. if (!priv->rxq)
  2849. priv->rxq = ipw_rx_queue_alloc(priv);
  2850. else
  2851. ipw_rx_queue_reset(priv, priv->rxq);
  2852. if (!priv->rxq) {
  2853. IPW_ERROR("Unable to initialize Rx queue\n");
  2854. goto error;
  2855. }
  2856. retry:
  2857. /* Ensure interrupts are disabled */
  2858. ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
  2859. priv->status &= ~STATUS_INT_ENABLED;
  2860. /* ack pending interrupts */
  2861. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
  2862. ipw_stop_nic(priv);
  2863. rc = ipw_reset_nic(priv);
  2864. if (rc < 0) {
  2865. IPW_ERROR("Unable to reset NIC\n");
  2866. goto error;
  2867. }
  2868. ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
  2869. IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
  2870. /* DMA the initial boot firmware into the device */
  2871. rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
  2872. if (rc < 0) {
  2873. IPW_ERROR("Unable to load boot firmware: %d\n", rc);
  2874. goto error;
  2875. }
  2876. /* kick start the device */
  2877. ipw_start_nic(priv);
  2878. /* wait for the device to finish its initial startup sequence */
  2879. rc = ipw_poll_bit(priv, IPW_INTA_RW,
  2880. IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
  2881. if (rc < 0) {
  2882. IPW_ERROR("device failed to boot initial fw image\n");
  2883. goto error;
  2884. }
  2885. IPW_DEBUG_INFO("initial device response after %dms\n", rc);
  2886. /* ack fw init done interrupt */
  2887. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
  2888. /* DMA the ucode into the device */
  2889. rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
  2890. if (rc < 0) {
  2891. IPW_ERROR("Unable to load ucode: %d\n", rc);
  2892. goto error;
  2893. }
  2894. /* stop nic */
  2895. ipw_stop_nic(priv);
  2896. /* DMA bss firmware into the device */
  2897. rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
  2898. if (rc < 0) {
  2899. IPW_ERROR("Unable to load firmware: %d\n", rc);
  2900. goto error;
  2901. }
  2902. #ifdef CONFIG_PM
  2903. fw_loaded = 1;
  2904. #endif
  2905. ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
  2906. rc = ipw_queue_reset(priv);
  2907. if (rc < 0) {
  2908. IPW_ERROR("Unable to initialize queues\n");
  2909. goto error;
  2910. }
  2911. /* Ensure interrupts are disabled */
  2912. ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
  2913. /* ack pending interrupts */
  2914. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
  2915. /* kick start the device */
  2916. ipw_start_nic(priv);
  2917. if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
  2918. if (retries > 0) {
  2919. IPW_WARNING("Parity error. Retrying init.\n");
  2920. retries--;
  2921. goto retry;
  2922. }
  2923. IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
  2924. rc = -EIO;
  2925. goto error;
  2926. }
  2927. /* wait for the device */
  2928. rc = ipw_poll_bit(priv, IPW_INTA_RW,
  2929. IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
  2930. if (rc < 0) {
  2931. IPW_ERROR("device failed to start within 500ms\n");
  2932. goto error;
  2933. }
  2934. IPW_DEBUG_INFO("device response after %dms\n", rc);
  2935. /* ack fw init done interrupt */
  2936. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
  2937. /* read eeprom data and initialize the eeprom region of sram */
  2938. priv->eeprom_delay = 1;
  2939. ipw_eeprom_init_sram(priv);
  2940. /* enable interrupts */
  2941. ipw_enable_interrupts(priv);
  2942. /* Ensure our queue has valid packets */
  2943. ipw_rx_queue_replenish(priv);
  2944. ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
  2945. /* ack pending interrupts */
  2946. ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
  2947. #ifndef CONFIG_PM
  2948. release_firmware(raw);
  2949. #endif
  2950. return 0;
  2951. error:
  2952. if (priv->rxq) {
  2953. ipw_rx_queue_free(priv, priv->rxq);
  2954. priv->rxq = NULL;
  2955. }
  2956. ipw_tx_queue_free(priv);
  2957. if (raw)
  2958. release_firmware(raw);
  2959. #ifdef CONFIG_PM
  2960. fw_loaded = 0;
  2961. raw = NULL;
  2962. #endif
  2963. return rc;
  2964. }
  2965. /**
  2966. * DMA services
  2967. *
  2968. * Theory of operation
  2969. *
  2970. * A queue is a circular buffers with 'Read' and 'Write' pointers.
  2971. * 2 empty entries always kept in the buffer to protect from overflow.
  2972. *
  2973. * For Tx queue, there are low mark and high mark limits. If, after queuing
  2974. * the packet for Tx, free space become < low mark, Tx queue stopped. When
  2975. * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
  2976. * Tx queue resumed.
  2977. *
  2978. * The IPW operates with six queues, one receive queue in the device's
  2979. * sram, one transmit queue for sending commands to the device firmware,
  2980. * and four transmit queues for data.
  2981. *
  2982. * The four transmit queues allow for performing quality of service (qos)
  2983. * transmissions as per the 802.11 protocol. Currently Linux does not
  2984. * provide a mechanism to the user for utilizing prioritized queues, so
  2985. * we only utilize the first data transmit queue (queue1).
  2986. */
  2987. /**
  2988. * Driver allocates buffers of this size for Rx
  2989. */
  2990. /**
  2991. * ipw_rx_queue_space - Return number of free slots available in queue.
  2992. */
  2993. static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
  2994. {
  2995. int s = q->read - q->write;
  2996. if (s <= 0)
  2997. s += RX_QUEUE_SIZE;
  2998. /* keep some buffer to not confuse full and empty queue */
  2999. s -= 2;
  3000. if (s < 0)
  3001. s = 0;
  3002. return s;
  3003. }
  3004. static inline int ipw_tx_queue_space(const struct clx2_queue *q)
  3005. {
  3006. int s = q->last_used - q->first_empty;
  3007. if (s <= 0)
  3008. s += q->n_bd;
  3009. s -= 2; /* keep some reserve to not confuse empty and full situations */
  3010. if (s < 0)
  3011. s = 0;
  3012. return s;
  3013. }
  3014. static inline int ipw_queue_inc_wrap(int index, int n_bd)
  3015. {
  3016. return (++index == n_bd) ? 0 : index;
  3017. }
  3018. /**
  3019. * Initialize common DMA queue structure
  3020. *
  3021. * @param q queue to init
  3022. * @param count Number of BD's to allocate. Should be power of 2
  3023. * @param read_register Address for 'read' register
  3024. * (not offset within BAR, full address)
  3025. * @param write_register Address for 'write' register
  3026. * (not offset within BAR, full address)
  3027. * @param base_register Address for 'base' register
  3028. * (not offset within BAR, full address)
  3029. * @param size Address for 'size' register
  3030. * (not offset within BAR, full address)
  3031. */
  3032. static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
  3033. int count, u32 read, u32 write, u32 base, u32 size)
  3034. {
  3035. q->n_bd = count;
  3036. q->low_mark = q->n_bd / 4;
  3037. if (q->low_mark < 4)
  3038. q->low_mark = 4;
  3039. q->high_mark = q->n_bd / 8;
  3040. if (q->high_mark < 2)
  3041. q->high_mark = 2;
  3042. q->first_empty = q->last_used = 0;
  3043. q->reg_r = read;
  3044. q->reg_w = write;
  3045. ipw_write32(priv, base, q->dma_addr);
  3046. ipw_write32(priv, size, count);
  3047. ipw_write32(priv, read, 0);
  3048. ipw_write32(priv, write, 0);
  3049. _ipw_read32(priv, 0x90);
  3050. }
  3051. static int ipw_queue_tx_init(struct ipw_priv *priv,
  3052. struct clx2_tx_queue *q,
  3053. int count, u32 read, u32 write, u32 base, u32 size)
  3054. {
  3055. struct pci_dev *dev = priv->pci_dev;
  3056. q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
  3057. if (!q->txb) {
  3058. IPW_ERROR("vmalloc for auxilary BD structures failed\n");
  3059. return -ENOMEM;
  3060. }
  3061. q->bd =
  3062. pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
  3063. if (!q->bd) {
  3064. IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
  3065. sizeof(q->bd[0]) * count);
  3066. kfree(q->txb);
  3067. q->txb = NULL;
  3068. return -ENOMEM;
  3069. }
  3070. ipw_queue_init(priv, &q->q, count, read, write, base, size);
  3071. return 0;
  3072. }
  3073. /**
  3074. * Free one TFD, those at index [txq->q.last_used].
  3075. * Do NOT advance any indexes
  3076. *
  3077. * @param dev
  3078. * @param txq
  3079. */
  3080. static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
  3081. struct clx2_tx_queue *txq)
  3082. {
  3083. struct tfd_frame *bd = &txq->bd[txq->q.last_used];
  3084. struct pci_dev *dev = priv->pci_dev;
  3085. int i;
  3086. /* classify bd */
  3087. if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
  3088. /* nothing to cleanup after for host commands */
  3089. return;
  3090. /* sanity check */
  3091. if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
  3092. IPW_ERROR("Too many chunks: %i\n",
  3093. le32_to_cpu(bd->u.data.num_chunks));
  3094. /** @todo issue fatal error, it is quite serious situation */
  3095. return;
  3096. }
  3097. /* unmap chunks if any */
  3098. for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
  3099. pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
  3100. le16_to_cpu(bd->u.data.chunk_len[i]),
  3101. PCI_DMA_TODEVICE);
  3102. if (txq->txb[txq->q.last_used]) {
  3103. ieee80211_txb_free(txq->txb[txq->q.last_used]);
  3104. txq->txb[txq->q.last_used] = NULL;
  3105. }
  3106. }
  3107. }
  3108. /**
  3109. * Deallocate DMA queue.
  3110. *
  3111. * Empty queue by removing and destroying all BD's.
  3112. * Free all buffers.
  3113. *
  3114. * @param dev
  3115. * @param q
  3116. */
  3117. static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
  3118. {
  3119. struct clx2_queue *q = &txq->q;
  3120. struct pci_dev *dev = priv->pci_dev;
  3121. if (q->n_bd == 0)
  3122. return;
  3123. /* first, empty all BD's */
  3124. for (; q->first_empty != q->last_used;
  3125. q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
  3126. ipw_queue_tx_free_tfd(priv, txq);
  3127. }
  3128. /* free buffers belonging to queue itself */
  3129. pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
  3130. q->dma_addr);
  3131. kfree(txq->txb);
  3132. /* 0 fill whole structure */
  3133. memset(txq, 0, sizeof(*txq));
  3134. }
  3135. /**
  3136. * Destroy all DMA queues and structures
  3137. *
  3138. * @param priv
  3139. */
  3140. static void ipw_tx_queue_free(struct ipw_priv *priv)
  3141. {
  3142. /* Tx CMD queue */
  3143. ipw_queue_tx_free(priv, &priv->txq_cmd);
  3144. /* Tx queues */
  3145. ipw_queue_tx_free(priv, &priv->txq[0]);
  3146. ipw_queue_tx_free(priv, &priv->txq[1]);
  3147. ipw_queue_tx_free(priv, &priv->txq[2]);
  3148. ipw_queue_tx_free(priv, &priv->txq[3]);
  3149. }
  3150. static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
  3151. {
  3152. /* First 3 bytes are manufacturer */
  3153. bssid[0] = priv->mac_addr[0];
  3154. bssid[1] = priv->mac_addr[1];
  3155. bssid[2] = priv->mac_addr[2];
  3156. /* Last bytes are random */
  3157. get_random_bytes(&bssid[3], ETH_ALEN - 3);
  3158. bssid[0] &= 0xfe; /* clear multicast bit */
  3159. bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
  3160. }
  3161. static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
  3162. {
  3163. struct ipw_station_entry entry;
  3164. int i;
  3165. for (i = 0; i < priv->num_stations; i++) {
  3166. if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
  3167. /* Another node is active in network */
  3168. priv->missed_adhoc_beacons = 0;
  3169. if (!(priv->config & CFG_STATIC_CHANNEL))
  3170. /* when other nodes drop out, we drop out */
  3171. priv->config &= ~CFG_ADHOC_PERSIST;
  3172. return i;
  3173. }
  3174. }
  3175. if (i == MAX_STATIONS)
  3176. return IPW_INVALID_STATION;
  3177. IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
  3178. entry.reserved = 0;
  3179. entry.support_mode = 0;
  3180. memcpy(entry.mac_addr, bssid, ETH_ALEN);
  3181. memcpy(priv->stations[i], bssid, ETH_ALEN);
  3182. ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
  3183. &entry, sizeof(entry));
  3184. priv->num_stations++;
  3185. return i;
  3186. }
  3187. static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
  3188. {
  3189. int i;
  3190. for (i = 0; i < priv->num_stations; i++)
  3191. if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
  3192. return i;
  3193. return IPW_INVALID_STATION;
  3194. }
  3195. static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
  3196. {
  3197. int err;
  3198. if (priv->status & STATUS_ASSOCIATING) {
  3199. IPW_DEBUG_ASSOC("Disassociating while associating.\n");
  3200. queue_work(priv->workqueue, &priv->disassociate);
  3201. return;
  3202. }
  3203. if (!(priv->status & STATUS_ASSOCIATED)) {
  3204. IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
  3205. return;
  3206. }
  3207. IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
  3208. "on channel %d.\n",
  3209. priv->assoc_request.bssid,
  3210. priv->assoc_request.channel);
  3211. priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
  3212. priv->status |= STATUS_DISASSOCIATING;
  3213. if (quiet)
  3214. priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
  3215. else
  3216. priv->assoc_request.assoc_type = HC_DISASSOCIATE;
  3217. err = ipw_send_associate(priv, &priv->assoc_request);
  3218. if (err) {
  3219. IPW_DEBUG_HC("Attempt to send [dis]associate command "
  3220. "failed.\n");
  3221. return;
  3222. }
  3223. }
  3224. static int ipw_disassociate(void *data)
  3225. {
  3226. struct ipw_priv *priv = data;
  3227. if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
  3228. return 0;
  3229. ipw_send_disassociate(data, 0);
  3230. netif_carrier_off(priv->net_dev);
  3231. return 1;
  3232. }
  3233. static void ipw_bg_disassociate(struct work_struct *work)
  3234. {
  3235. struct ipw_priv *priv =
  3236. container_of(work, struct ipw_priv, disassociate);
  3237. mutex_lock(&priv->mutex);
  3238. ipw_disassociate(priv);
  3239. mutex_unlock(&priv->mutex);
  3240. }
  3241. static void ipw_system_config(struct work_struct *work)
  3242. {
  3243. struct ipw_priv *priv =
  3244. container_of(work, struct ipw_priv, system_config);
  3245. #ifdef CONFIG_IPW2200_PROMISCUOUS
  3246. if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
  3247. priv->sys_config.accept_all_data_frames = 1;
  3248. priv->sys_config.accept_non_directed_frames = 1;
  3249. priv->sys_config.accept_all_mgmt_bcpr = 1;
  3250. priv->sys_config.accept_all_mgmt_frames = 1;
  3251. }
  3252. #endif
  3253. ipw_send_system_config(priv);
  3254. }
  3255. struct ipw_status_code {
  3256. u16 status;
  3257. const char *reason;
  3258. };
  3259. static const struct ipw_status_code ipw_status_codes[] = {
  3260. {0x00, "Successful"},
  3261. {0x01, "Unspecified failure"},
  3262. {0x0A, "Cannot support all requested capabilities in the "
  3263. "Capability information field"},
  3264. {0x0B, "Reassociation denied due to inability to confirm that "
  3265. "association exists"},
  3266. {0x0C, "Association denied due to reason outside the scope of this "
  3267. "standard"},
  3268. {0x0D,
  3269. "Responding station does not support the specified authentication "
  3270. "algorithm"},
  3271. {0x0E,
  3272. "Received an Authentication frame with authentication sequence "
  3273. "transaction sequence number out of expected sequence"},
  3274. {0x0F, "Authentication rejected because of challenge failure"},
  3275. {0x10, "Authentication rejected due to timeout waiting for next "
  3276. "frame in sequence"},
  3277. {0x11, "Association denied because AP is unable to handle additional "
  3278. "associated stations"},
  3279. {0x12,
  3280. "Association denied due to requesting station not supporting all "
  3281. "of the datarates in the BSSBasicServiceSet Parameter"},
  3282. {0x13,
  3283. "Association denied due to requesting station not supporting "
  3284. "short preamble operation"},
  3285. {0x14,
  3286. "Association denied due to requesting station not supporting "
  3287. "PBCC encoding"},
  3288. {0x15,
  3289. "Association denied due to requesting station not supporting "
  3290. "channel agility"},
  3291. {0x19,
  3292. "Association denied due to requesting station not supporting "
  3293. "short slot operation"},
  3294. {0x1A,
  3295. "Association denied due to requesting station not supporting "
  3296. "DSSS-OFDM operation"},
  3297. {0x28, "Invalid Information Element"},
  3298. {0x29, "Group Cipher is not valid"},
  3299. {0x2A, "Pairwise Cipher is not valid"},
  3300. {0x2B, "AKMP is not valid"},
  3301. {0x2C, "Unsupported RSN IE version"},
  3302. {0x2D, "Invalid RSN IE Capabilities"},
  3303. {0x2E, "Cipher suite is rejected per security policy"},
  3304. };
  3305. static const char *ipw_get_status_code(u16 status)
  3306. {
  3307. int i;
  3308. for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
  3309. if (ipw_status_codes[i].status == (status & 0xff))
  3310. return ipw_status_codes[i].reason;
  3311. return "Unknown status value.";
  3312. }
  3313. static void inline average_init(struct average *avg)
  3314. {
  3315. memset(avg, 0, sizeof(*avg));
  3316. }
  3317. #define DEPTH_RSSI 8
  3318. #define DEPTH_NOISE 16
  3319. static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
  3320. {
  3321. return ((depth-1)*prev_avg + val)/depth;
  3322. }
  3323. static void average_add(struct average *avg, s16 val)
  3324. {
  3325. avg->sum -= avg->entries[avg->pos];
  3326. avg->sum += val;
  3327. avg->entries[avg->pos++] = val;
  3328. if (unlikely(avg->pos == AVG_ENTRIES)) {
  3329. avg->init = 1;
  3330. avg->pos = 0;
  3331. }
  3332. }
  3333. static s16 average_value(struct average *avg)
  3334. {
  3335. if (!unlikely(avg->init)) {
  3336. if (avg->pos)
  3337. return avg->sum / avg->pos;
  3338. return 0;
  3339. }
  3340. return avg->sum / AVG_ENTRIES;
  3341. }
  3342. static void ipw_reset_stats(struct ipw_priv *priv)
  3343. {
  3344. u32 len = sizeof(u32);
  3345. priv->quality = 0;
  3346. average_init(&priv->average_missed_beacons);
  3347. priv->exp_avg_rssi = -60;
  3348. priv->exp_avg_noise = -85 + 0x100;
  3349. priv->last_rate = 0;
  3350. priv->last_missed_beacons = 0;
  3351. priv->last_rx_packets = 0;
  3352. priv->last_tx_packets = 0;
  3353. priv->last_tx_failures = 0;
  3354. /* Firmware managed, reset only when NIC is restarted, so we have to
  3355. * normalize on the current value */
  3356. ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
  3357. &priv->last_rx_err, &len);
  3358. ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
  3359. &priv->last_tx_failures, &len);
  3360. /* Driver managed, reset with each association */
  3361. priv->missed_adhoc_beacons = 0;
  3362. priv->missed_beacons = 0;
  3363. priv->tx_packets = 0;
  3364. priv->rx_packets = 0;
  3365. }
  3366. static u32 ipw_get_max_rate(struct ipw_priv *priv)
  3367. {
  3368. u32 i = 0x80000000;
  3369. u32 mask = priv->rates_mask;
  3370. /* If currently associated in B mode, restrict the maximum
  3371. * rate match to B rates */
  3372. if (priv->assoc_request.ieee_mode == IPW_B_MODE)
  3373. mask &= IEEE80211_CCK_RATES_MASK;
  3374. /* TODO: Verify that the rate is supported by the current rates
  3375. * list. */
  3376. while (i && !(mask & i))
  3377. i >>= 1;
  3378. switch (i) {
  3379. case IEEE80211_CCK_RATE_1MB_MASK:
  3380. return 1000000;
  3381. case IEEE80211_CCK_RATE_2MB_MASK:
  3382. return 2000000;
  3383. case IEEE80211_CCK_RATE_5MB_MASK:
  3384. return 5500000;
  3385. case IEEE80211_OFDM_RATE_6MB_MASK:
  3386. return 6000000;
  3387. case IEEE80211_OFDM_RATE_9MB_MASK:
  3388. return 9000000;
  3389. case IEEE80211_CCK_RATE_11MB_MASK:
  3390. return 11000000;
  3391. case IEEE80211_OFDM_RATE_12MB_MASK:
  3392. return 12000000;
  3393. case IEEE80211_OFDM_RATE_18MB_MASK:
  3394. return 18000000;
  3395. case IEEE80211_OFDM_RATE_24MB_MASK:
  3396. return 24000000;
  3397. case IEEE80211_OFDM_RATE_36MB_MASK:
  3398. return 36000000;
  3399. case IEEE80211_OFDM_RATE_48MB_MASK:
  3400. return 48000000;
  3401. case IEEE80211_OFDM_RATE_54MB_MASK:
  3402. return 54000000;
  3403. }
  3404. if (priv->ieee->mode == IEEE_B)
  3405. return 11000000;
  3406. else
  3407. return 54000000;
  3408. }
  3409. static u32 ipw_get_current_rate(struct ipw_priv *priv)
  3410. {
  3411. u32 rate, len = sizeof(rate);
  3412. int err;
  3413. if (!(priv->status & STATUS_ASSOCIATED))
  3414. return 0;
  3415. if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
  3416. err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
  3417. &len);
  3418. if (err) {
  3419. IPW_DEBUG_INFO("failed querying ordinals.\n");
  3420. return 0;
  3421. }
  3422. } else
  3423. return ipw_get_max_rate(priv);
  3424. switch (rate) {
  3425. case IPW_TX_RATE_1MB:
  3426. return 1000000;
  3427. case IPW_TX_RATE_2MB:
  3428. return 2000000;
  3429. case IPW_TX_RATE_5MB:
  3430. return 5500000;
  3431. case IPW_TX_RATE_6MB:
  3432. return 6000000;
  3433. case IPW_TX_RATE_9MB:
  3434. return 9000000;
  3435. case IPW_TX_RATE_11MB:
  3436. return 11000000;
  3437. case IPW_TX_RATE_12MB:
  3438. return 12000000;
  3439. case IPW_TX_RATE_18MB:
  3440. return 18000000;
  3441. case IPW_TX_RATE_24MB:
  3442. return 24000000;
  3443. case IPW_TX_RATE_36MB:
  3444. return 36000000;
  3445. case IPW_TX_RATE_48MB:
  3446. return 48000000;
  3447. case IPW_TX_RATE_54MB:
  3448. return 54000000;
  3449. }
  3450. return 0;
  3451. }
  3452. #define IPW_STATS_INTERVAL (2 * HZ)
  3453. static void ipw_gather_stats(struct ipw_priv *priv)
  3454. {
  3455. u32 rx_err, rx_err_delta, rx_packets_delta;
  3456. u32 tx_failures, tx_failures_delta, tx_packets_delta;
  3457. u32 missed_beacons_percent, missed_beacons_delta;
  3458. u32 quality = 0;
  3459. u32 len = sizeof(u32);
  3460. s16 rssi;
  3461. u32 beacon_quality, signal_quality, tx_quality, rx_quality,
  3462. rate_quality;
  3463. u32 max_rate;
  3464. if (!(priv->status & STATUS_ASSOCIATED)) {
  3465. priv->quality = 0;
  3466. return;
  3467. }
  3468. /* Update the statistics */
  3469. ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
  3470. &priv->missed_beacons, &len);
  3471. missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
  3472. priv->last_missed_beacons = priv->missed_beacons;
  3473. if (priv->assoc_request.beacon_interval) {
  3474. missed_beacons_percent = missed_beacons_delta *
  3475. (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
  3476. (IPW_STATS_INTERVAL * 10);
  3477. } else {
  3478. missed_beacons_percent = 0;
  3479. }
  3480. average_add(&priv->average_missed_beacons, missed_beacons_percent);
  3481. ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
  3482. rx_err_delta = rx_err - priv->last_rx_err;
  3483. priv->last_rx_err = rx_err;
  3484. ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
  3485. tx_failures_delta = tx_failures - priv->last_tx_failures;
  3486. priv->last_tx_failures = tx_failures;
  3487. rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
  3488. priv->last_rx_packets = priv->rx_packets;
  3489. tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
  3490. priv->last_tx_packets = priv->tx_packets;
  3491. /* Calculate quality based on the following:
  3492. *
  3493. * Missed beacon: 100% = 0, 0% = 70% missed
  3494. * Rate: 60% = 1Mbs, 100% = Max
  3495. * Rx and Tx errors represent a straight % of total Rx/Tx
  3496. * RSSI: 100% = > -50, 0% = < -80
  3497. * Rx errors: 100% = 0, 0% = 50% missed
  3498. *
  3499. * The lowest computed quality is used.
  3500. *
  3501. */
  3502. #define BEACON_THRESHOLD 5
  3503. beacon_quality = 100 - missed_beacons_percent;
  3504. if (beacon_quality < BEACON_THRESHOLD)
  3505. beacon_quality = 0;
  3506. else
  3507. beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
  3508. (100 - BEACON_THRESHOLD);
  3509. IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
  3510. beacon_quality, missed_beacons_percent);
  3511. priv->last_rate = ipw_get_current_rate(priv);
  3512. max_rate = ipw_get_max_rate(priv);
  3513. rate_quality = priv->last_rate * 40 / max_rate + 60;
  3514. IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
  3515. rate_quality, priv->last_rate / 1000000);
  3516. if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
  3517. rx_quality = 100 - (rx_err_delta * 100) /
  3518. (rx_packets_delta + rx_err_delta);
  3519. else
  3520. rx_quality = 100;
  3521. IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
  3522. rx_quality, rx_err_delta, rx_packets_delta);
  3523. if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
  3524. tx_quality = 100 - (tx_failures_delta * 100) /
  3525. (tx_packets_delta + tx_failures_delta);
  3526. else
  3527. tx_quality = 100;
  3528. IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
  3529. tx_quality, tx_failures_delta, tx_packets_delta);
  3530. rssi = priv->exp_avg_rssi;
  3531. signal_quality =
  3532. (100 *
  3533. (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
  3534. (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
  3535. (priv->ieee->perfect_rssi - rssi) *
  3536. (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
  3537. 62 * (priv->ieee->perfect_rssi - rssi))) /
  3538. ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
  3539. (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
  3540. if (signal_quality > 100)
  3541. signal_quality = 100;
  3542. else if (signal_quality < 1)
  3543. signal_quality = 0;
  3544. IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
  3545. signal_quality, rssi);
  3546. quality = min(beacon_quality,
  3547. min(rate_quality,
  3548. min(tx_quality, min(rx_quality, signal_quality))));
  3549. if (quality == beacon_quality)
  3550. IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
  3551. quality);
  3552. if (quality == rate_quality)
  3553. IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
  3554. quality);
  3555. if (quality == tx_quality)
  3556. IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
  3557. quality);
  3558. if (quality == rx_quality)
  3559. IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
  3560. quality);
  3561. if (quality == signal_quality)
  3562. IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
  3563. quality);
  3564. priv->quality = quality;
  3565. queue_delayed_work(priv->workqueue, &priv->gather_stats,
  3566. IPW_STATS_INTERVAL);
  3567. }
  3568. static void ipw_bg_gather_stats(struct work_struct *work)
  3569. {
  3570. struct ipw_priv *priv =
  3571. container_of(work, struct ipw_priv, gather_stats.work);
  3572. mutex_lock(&priv->mutex);
  3573. ipw_gather_stats(priv);
  3574. mutex_unlock(&priv->mutex);
  3575. }
  3576. /* Missed beacon behavior:
  3577. * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
  3578. * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
  3579. * Above disassociate threshold, give up and stop scanning.
  3580. * Roaming is disabled if disassociate_threshold <= roaming_threshold */
  3581. static void ipw_handle_missed_beacon(struct ipw_priv *priv,
  3582. int missed_count)
  3583. {
  3584. priv->notif_missed_beacons = missed_count;
  3585. if (missed_count > priv->disassociate_threshold &&
  3586. priv->status & STATUS_ASSOCIATED) {
  3587. /* If associated and we've hit the missed
  3588. * beacon threshold, disassociate, turn
  3589. * off roaming, and abort any active scans */
  3590. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  3591. IPW_DL_STATE | IPW_DL_ASSOC,
  3592. "Missed beacon: %d - disassociate\n", missed_count);
  3593. priv->status &= ~STATUS_ROAMING;
  3594. if (priv->status & STATUS_SCANNING) {
  3595. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  3596. IPW_DL_STATE,
  3597. "Aborting scan with missed beacon.\n");
  3598. queue_work(priv->workqueue, &priv->abort_scan);
  3599. }
  3600. queue_work(priv->workqueue, &priv->disassociate);
  3601. return;
  3602. }
  3603. if (priv->status & STATUS_ROAMING) {
  3604. /* If we are currently roaming, then just
  3605. * print a debug statement... */
  3606. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3607. "Missed beacon: %d - roam in progress\n",
  3608. missed_count);
  3609. return;
  3610. }
  3611. if (roaming &&
  3612. (missed_count > priv->roaming_threshold &&
  3613. missed_count <= priv->disassociate_threshold)) {
  3614. /* If we are not already roaming, set the ROAM
  3615. * bit in the status and kick off a scan.
  3616. * This can happen several times before we reach
  3617. * disassociate_threshold. */
  3618. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3619. "Missed beacon: %d - initiate "
  3620. "roaming\n", missed_count);
  3621. if (!(priv->status & STATUS_ROAMING)) {
  3622. priv->status |= STATUS_ROAMING;
  3623. if (!(priv->status & STATUS_SCANNING))
  3624. queue_delayed_work(priv->workqueue,
  3625. &priv->request_scan, 0);
  3626. }
  3627. return;
  3628. }
  3629. if (priv->status & STATUS_SCANNING &&
  3630. missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
  3631. /* Stop scan to keep fw from getting
  3632. * stuck (only if we aren't roaming --
  3633. * otherwise we'll never scan more than 2 or 3
  3634. * channels..) */
  3635. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
  3636. "Aborting scan with missed beacon.\n");
  3637. queue_work(priv->workqueue, &priv->abort_scan);
  3638. }
  3639. IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
  3640. }
  3641. static void ipw_scan_event(struct work_struct *work)
  3642. {
  3643. union iwreq_data wrqu;
  3644. struct ipw_priv *priv =
  3645. container_of(work, struct ipw_priv, scan_event.work);
  3646. wrqu.data.length = 0;
  3647. wrqu.data.flags = 0;
  3648. wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
  3649. }
  3650. static void handle_scan_event(struct ipw_priv *priv)
  3651. {
  3652. /* Only userspace-requested scan completion events go out immediately */
  3653. if (!priv->user_requested_scan) {
  3654. if (!delayed_work_pending(&priv->scan_event))
  3655. queue_delayed_work(priv->workqueue, &priv->scan_event,
  3656. round_jiffies_relative(msecs_to_jiffies(4000)));
  3657. } else {
  3658. union iwreq_data wrqu;
  3659. priv->user_requested_scan = 0;
  3660. cancel_delayed_work(&priv->scan_event);
  3661. wrqu.data.length = 0;
  3662. wrqu.data.flags = 0;
  3663. wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
  3664. }
  3665. }
  3666. /**
  3667. * Handle host notification packet.
  3668. * Called from interrupt routine
  3669. */
  3670. static void ipw_rx_notification(struct ipw_priv *priv,
  3671. struct ipw_rx_notification *notif)
  3672. {
  3673. DECLARE_SSID_BUF(ssid);
  3674. u16 size = le16_to_cpu(notif->size);
  3675. notif->size = le16_to_cpu(notif->size);
  3676. IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
  3677. switch (notif->subtype) {
  3678. case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
  3679. struct notif_association *assoc = &notif->u.assoc;
  3680. switch (assoc->state) {
  3681. case CMAS_ASSOCIATED:{
  3682. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3683. IPW_DL_ASSOC,
  3684. "associated: '%s' %pM \n",
  3685. print_ssid(ssid, priv->essid,
  3686. priv->essid_len),
  3687. priv->bssid);
  3688. switch (priv->ieee->iw_mode) {
  3689. case IW_MODE_INFRA:
  3690. memcpy(priv->ieee->bssid,
  3691. priv->bssid, ETH_ALEN);
  3692. break;
  3693. case IW_MODE_ADHOC:
  3694. memcpy(priv->ieee->bssid,
  3695. priv->bssid, ETH_ALEN);
  3696. /* clear out the station table */
  3697. priv->num_stations = 0;
  3698. IPW_DEBUG_ASSOC
  3699. ("queueing adhoc check\n");
  3700. queue_delayed_work(priv->
  3701. workqueue,
  3702. &priv->
  3703. adhoc_check,
  3704. le16_to_cpu(priv->
  3705. assoc_request.
  3706. beacon_interval));
  3707. break;
  3708. }
  3709. priv->status &= ~STATUS_ASSOCIATING;
  3710. priv->status |= STATUS_ASSOCIATED;
  3711. queue_work(priv->workqueue,
  3712. &priv->system_config);
  3713. #ifdef CONFIG_IPW2200_QOS
  3714. #define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
  3715. le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
  3716. if ((priv->status & STATUS_AUTH) &&
  3717. (IPW_GET_PACKET_STYPE(&notif->u.raw)
  3718. == IEEE80211_STYPE_ASSOC_RESP)) {
  3719. if ((sizeof
  3720. (struct
  3721. ieee80211_assoc_response)
  3722. <= size)
  3723. && (size <= 2314)) {
  3724. struct
  3725. ieee80211_rx_stats
  3726. stats = {
  3727. .len = size - 1,
  3728. };
  3729. IPW_DEBUG_QOS
  3730. ("QoS Associate "
  3731. "size %d\n", size);
  3732. ieee80211_rx_mgt(priv->
  3733. ieee,
  3734. (struct
  3735. ieee80211_hdr_4addr
  3736. *)
  3737. &notif->u.raw, &stats);
  3738. }
  3739. }
  3740. #endif
  3741. schedule_work(&priv->link_up);
  3742. break;
  3743. }
  3744. case CMAS_AUTHENTICATED:{
  3745. if (priv->
  3746. status & (STATUS_ASSOCIATED |
  3747. STATUS_AUTH)) {
  3748. struct notif_authenticate *auth
  3749. = &notif->u.auth;
  3750. IPW_DEBUG(IPW_DL_NOTIF |
  3751. IPW_DL_STATE |
  3752. IPW_DL_ASSOC,
  3753. "deauthenticated: '%s' "
  3754. "%pM"
  3755. ": (0x%04X) - %s \n",
  3756. print_ssid(ssid,
  3757. priv->
  3758. essid,
  3759. priv->
  3760. essid_len),
  3761. priv->bssid,
  3762. le16_to_cpu(auth->status),
  3763. ipw_get_status_code
  3764. (le16_to_cpu
  3765. (auth->status)));
  3766. priv->status &=
  3767. ~(STATUS_ASSOCIATING |
  3768. STATUS_AUTH |
  3769. STATUS_ASSOCIATED);
  3770. schedule_work(&priv->link_down);
  3771. break;
  3772. }
  3773. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3774. IPW_DL_ASSOC,
  3775. "authenticated: '%s' %pM\n",
  3776. print_ssid(ssid, priv->essid,
  3777. priv->essid_len),
  3778. priv->bssid);
  3779. break;
  3780. }
  3781. case CMAS_INIT:{
  3782. if (priv->status & STATUS_AUTH) {
  3783. struct
  3784. ieee80211_assoc_response
  3785. *resp;
  3786. resp =
  3787. (struct
  3788. ieee80211_assoc_response
  3789. *)&notif->u.raw;
  3790. IPW_DEBUG(IPW_DL_NOTIF |
  3791. IPW_DL_STATE |
  3792. IPW_DL_ASSOC,
  3793. "association failed (0x%04X): %s\n",
  3794. le16_to_cpu(resp->status),
  3795. ipw_get_status_code
  3796. (le16_to_cpu
  3797. (resp->status)));
  3798. }
  3799. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3800. IPW_DL_ASSOC,
  3801. "disassociated: '%s' %pM \n",
  3802. print_ssid(ssid, priv->essid,
  3803. priv->essid_len),
  3804. priv->bssid);
  3805. priv->status &=
  3806. ~(STATUS_DISASSOCIATING |
  3807. STATUS_ASSOCIATING |
  3808. STATUS_ASSOCIATED | STATUS_AUTH);
  3809. if (priv->assoc_network
  3810. && (priv->assoc_network->
  3811. capability &
  3812. WLAN_CAPABILITY_IBSS))
  3813. ipw_remove_current_network
  3814. (priv);
  3815. schedule_work(&priv->link_down);
  3816. break;
  3817. }
  3818. case CMAS_RX_ASSOC_RESP:
  3819. break;
  3820. default:
  3821. IPW_ERROR("assoc: unknown (%d)\n",
  3822. assoc->state);
  3823. break;
  3824. }
  3825. break;
  3826. }
  3827. case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
  3828. struct notif_authenticate *auth = &notif->u.auth;
  3829. switch (auth->state) {
  3830. case CMAS_AUTHENTICATED:
  3831. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  3832. "authenticated: '%s' %pM \n",
  3833. print_ssid(ssid, priv->essid,
  3834. priv->essid_len),
  3835. priv->bssid);
  3836. priv->status |= STATUS_AUTH;
  3837. break;
  3838. case CMAS_INIT:
  3839. if (priv->status & STATUS_AUTH) {
  3840. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3841. IPW_DL_ASSOC,
  3842. "authentication failed (0x%04X): %s\n",
  3843. le16_to_cpu(auth->status),
  3844. ipw_get_status_code(le16_to_cpu
  3845. (auth->
  3846. status)));
  3847. }
  3848. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3849. IPW_DL_ASSOC,
  3850. "deauthenticated: '%s' %pM\n",
  3851. print_ssid(ssid, priv->essid,
  3852. priv->essid_len),
  3853. priv->bssid);
  3854. priv->status &= ~(STATUS_ASSOCIATING |
  3855. STATUS_AUTH |
  3856. STATUS_ASSOCIATED);
  3857. schedule_work(&priv->link_down);
  3858. break;
  3859. case CMAS_TX_AUTH_SEQ_1:
  3860. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3861. IPW_DL_ASSOC, "AUTH_SEQ_1\n");
  3862. break;
  3863. case CMAS_RX_AUTH_SEQ_2:
  3864. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3865. IPW_DL_ASSOC, "AUTH_SEQ_2\n");
  3866. break;
  3867. case CMAS_AUTH_SEQ_1_PASS:
  3868. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3869. IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
  3870. break;
  3871. case CMAS_AUTH_SEQ_1_FAIL:
  3872. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3873. IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
  3874. break;
  3875. case CMAS_TX_AUTH_SEQ_3:
  3876. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3877. IPW_DL_ASSOC, "AUTH_SEQ_3\n");
  3878. break;
  3879. case CMAS_RX_AUTH_SEQ_4:
  3880. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3881. IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
  3882. break;
  3883. case CMAS_AUTH_SEQ_2_PASS:
  3884. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3885. IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
  3886. break;
  3887. case CMAS_AUTH_SEQ_2_FAIL:
  3888. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3889. IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
  3890. break;
  3891. case CMAS_TX_ASSOC:
  3892. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3893. IPW_DL_ASSOC, "TX_ASSOC\n");
  3894. break;
  3895. case CMAS_RX_ASSOC_RESP:
  3896. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3897. IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
  3898. break;
  3899. case CMAS_ASSOCIATED:
  3900. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
  3901. IPW_DL_ASSOC, "ASSOCIATED\n");
  3902. break;
  3903. default:
  3904. IPW_DEBUG_NOTIF("auth: failure - %d\n",
  3905. auth->state);
  3906. break;
  3907. }
  3908. break;
  3909. }
  3910. case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
  3911. struct notif_channel_result *x =
  3912. &notif->u.channel_result;
  3913. if (size == sizeof(*x)) {
  3914. IPW_DEBUG_SCAN("Scan result for channel %d\n",
  3915. x->channel_num);
  3916. } else {
  3917. IPW_DEBUG_SCAN("Scan result of wrong size %d "
  3918. "(should be %zd)\n",
  3919. size, sizeof(*x));
  3920. }
  3921. break;
  3922. }
  3923. case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
  3924. struct notif_scan_complete *x = &notif->u.scan_complete;
  3925. if (size == sizeof(*x)) {
  3926. IPW_DEBUG_SCAN
  3927. ("Scan completed: type %d, %d channels, "
  3928. "%d status\n", x->scan_type,
  3929. x->num_channels, x->status);
  3930. } else {
  3931. IPW_ERROR("Scan completed of wrong size %d "
  3932. "(should be %zd)\n",
  3933. size, sizeof(*x));
  3934. }
  3935. priv->status &=
  3936. ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
  3937. wake_up_interruptible(&priv->wait_state);
  3938. cancel_delayed_work(&priv->scan_check);
  3939. if (priv->status & STATUS_EXIT_PENDING)
  3940. break;
  3941. priv->ieee->scans++;
  3942. #ifdef CONFIG_IPW2200_MONITOR
  3943. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  3944. priv->status |= STATUS_SCAN_FORCED;
  3945. queue_delayed_work(priv->workqueue,
  3946. &priv->request_scan, 0);
  3947. break;
  3948. }
  3949. priv->status &= ~STATUS_SCAN_FORCED;
  3950. #endif /* CONFIG_IPW2200_MONITOR */
  3951. /* Do queued direct scans first */
  3952. if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
  3953. queue_delayed_work(priv->workqueue,
  3954. &priv->request_direct_scan, 0);
  3955. }
  3956. if (!(priv->status & (STATUS_ASSOCIATED |
  3957. STATUS_ASSOCIATING |
  3958. STATUS_ROAMING |
  3959. STATUS_DISASSOCIATING)))
  3960. queue_work(priv->workqueue, &priv->associate);
  3961. else if (priv->status & STATUS_ROAMING) {
  3962. if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
  3963. /* If a scan completed and we are in roam mode, then
  3964. * the scan that completed was the one requested as a
  3965. * result of entering roam... so, schedule the
  3966. * roam work */
  3967. queue_work(priv->workqueue,
  3968. &priv->roam);
  3969. else
  3970. /* Don't schedule if we aborted the scan */
  3971. priv->status &= ~STATUS_ROAMING;
  3972. } else if (priv->status & STATUS_SCAN_PENDING)
  3973. queue_delayed_work(priv->workqueue,
  3974. &priv->request_scan, 0);
  3975. else if (priv->config & CFG_BACKGROUND_SCAN
  3976. && priv->status & STATUS_ASSOCIATED)
  3977. queue_delayed_work(priv->workqueue,
  3978. &priv->request_scan,
  3979. round_jiffies_relative(HZ));
  3980. /* Send an empty event to user space.
  3981. * We don't send the received data on the event because
  3982. * it would require us to do complex transcoding, and
  3983. * we want to minimise the work done in the irq handler
  3984. * Use a request to extract the data.
  3985. * Also, we generate this even for any scan, regardless
  3986. * on how the scan was initiated. User space can just
  3987. * sync on periodic scan to get fresh data...
  3988. * Jean II */
  3989. if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
  3990. handle_scan_event(priv);
  3991. break;
  3992. }
  3993. case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
  3994. struct notif_frag_length *x = &notif->u.frag_len;
  3995. if (size == sizeof(*x))
  3996. IPW_ERROR("Frag length: %d\n",
  3997. le16_to_cpu(x->frag_length));
  3998. else
  3999. IPW_ERROR("Frag length of wrong size %d "
  4000. "(should be %zd)\n",
  4001. size, sizeof(*x));
  4002. break;
  4003. }
  4004. case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
  4005. struct notif_link_deterioration *x =
  4006. &notif->u.link_deterioration;
  4007. if (size == sizeof(*x)) {
  4008. IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
  4009. "link deterioration: type %d, cnt %d\n",
  4010. x->silence_notification_type,
  4011. x->silence_count);
  4012. memcpy(&priv->last_link_deterioration, x,
  4013. sizeof(*x));
  4014. } else {
  4015. IPW_ERROR("Link Deterioration of wrong size %d "
  4016. "(should be %zd)\n",
  4017. size, sizeof(*x));
  4018. }
  4019. break;
  4020. }
  4021. case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
  4022. IPW_ERROR("Dino config\n");
  4023. if (priv->hcmd
  4024. && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
  4025. IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
  4026. break;
  4027. }
  4028. case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
  4029. struct notif_beacon_state *x = &notif->u.beacon_state;
  4030. if (size != sizeof(*x)) {
  4031. IPW_ERROR
  4032. ("Beacon state of wrong size %d (should "
  4033. "be %zd)\n", size, sizeof(*x));
  4034. break;
  4035. }
  4036. if (le32_to_cpu(x->state) ==
  4037. HOST_NOTIFICATION_STATUS_BEACON_MISSING)
  4038. ipw_handle_missed_beacon(priv,
  4039. le32_to_cpu(x->
  4040. number));
  4041. break;
  4042. }
  4043. case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
  4044. struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
  4045. if (size == sizeof(*x)) {
  4046. IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
  4047. "0x%02x station %d\n",
  4048. x->key_state, x->security_type,
  4049. x->station_index);
  4050. break;
  4051. }
  4052. IPW_ERROR
  4053. ("TGi Tx Key of wrong size %d (should be %zd)\n",
  4054. size, sizeof(*x));
  4055. break;
  4056. }
  4057. case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
  4058. struct notif_calibration *x = &notif->u.calibration;
  4059. if (size == sizeof(*x)) {
  4060. memcpy(&priv->calib, x, sizeof(*x));
  4061. IPW_DEBUG_INFO("TODO: Calibration\n");
  4062. break;
  4063. }
  4064. IPW_ERROR
  4065. ("Calibration of wrong size %d (should be %zd)\n",
  4066. size, sizeof(*x));
  4067. break;
  4068. }
  4069. case HOST_NOTIFICATION_NOISE_STATS:{
  4070. if (size == sizeof(u32)) {
  4071. priv->exp_avg_noise =
  4072. exponential_average(priv->exp_avg_noise,
  4073. (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
  4074. DEPTH_NOISE);
  4075. break;
  4076. }
  4077. IPW_ERROR
  4078. ("Noise stat is wrong size %d (should be %zd)\n",
  4079. size, sizeof(u32));
  4080. break;
  4081. }
  4082. default:
  4083. IPW_DEBUG_NOTIF("Unknown notification: "
  4084. "subtype=%d,flags=0x%2x,size=%d\n",
  4085. notif->subtype, notif->flags, size);
  4086. }
  4087. }
  4088. /**
  4089. * Destroys all DMA structures and initialise them again
  4090. *
  4091. * @param priv
  4092. * @return error code
  4093. */
  4094. static int ipw_queue_reset(struct ipw_priv *priv)
  4095. {
  4096. int rc = 0;
  4097. /** @todo customize queue sizes */
  4098. int nTx = 64, nTxCmd = 8;
  4099. ipw_tx_queue_free(priv);
  4100. /* Tx CMD queue */
  4101. rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
  4102. IPW_TX_CMD_QUEUE_READ_INDEX,
  4103. IPW_TX_CMD_QUEUE_WRITE_INDEX,
  4104. IPW_TX_CMD_QUEUE_BD_BASE,
  4105. IPW_TX_CMD_QUEUE_BD_SIZE);
  4106. if (rc) {
  4107. IPW_ERROR("Tx Cmd queue init failed\n");
  4108. goto error;
  4109. }
  4110. /* Tx queue(s) */
  4111. rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
  4112. IPW_TX_QUEUE_0_READ_INDEX,
  4113. IPW_TX_QUEUE_0_WRITE_INDEX,
  4114. IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
  4115. if (rc) {
  4116. IPW_ERROR("Tx 0 queue init failed\n");
  4117. goto error;
  4118. }
  4119. rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
  4120. IPW_TX_QUEUE_1_READ_INDEX,
  4121. IPW_TX_QUEUE_1_WRITE_INDEX,
  4122. IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
  4123. if (rc) {
  4124. IPW_ERROR("Tx 1 queue init failed\n");
  4125. goto error;
  4126. }
  4127. rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
  4128. IPW_TX_QUEUE_2_READ_INDEX,
  4129. IPW_TX_QUEUE_2_WRITE_INDEX,
  4130. IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
  4131. if (rc) {
  4132. IPW_ERROR("Tx 2 queue init failed\n");
  4133. goto error;
  4134. }
  4135. rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
  4136. IPW_TX_QUEUE_3_READ_INDEX,
  4137. IPW_TX_QUEUE_3_WRITE_INDEX,
  4138. IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
  4139. if (rc) {
  4140. IPW_ERROR("Tx 3 queue init failed\n");
  4141. goto error;
  4142. }
  4143. /* statistics */
  4144. priv->rx_bufs_min = 0;
  4145. priv->rx_pend_max = 0;
  4146. return rc;
  4147. error:
  4148. ipw_tx_queue_free(priv);
  4149. return rc;
  4150. }
  4151. /**
  4152. * Reclaim Tx queue entries no more used by NIC.
  4153. *
  4154. * When FW advances 'R' index, all entries between old and
  4155. * new 'R' index need to be reclaimed. As result, some free space
  4156. * forms. If there is enough free space (> low mark), wake Tx queue.
  4157. *
  4158. * @note Need to protect against garbage in 'R' index
  4159. * @param priv
  4160. * @param txq
  4161. * @param qindex
  4162. * @return Number of used entries remains in the queue
  4163. */
  4164. static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
  4165. struct clx2_tx_queue *txq, int qindex)
  4166. {
  4167. u32 hw_tail;
  4168. int used;
  4169. struct clx2_queue *q = &txq->q;
  4170. hw_tail = ipw_read32(priv, q->reg_r);
  4171. if (hw_tail >= q->n_bd) {
  4172. IPW_ERROR
  4173. ("Read index for DMA queue (%d) is out of range [0-%d)\n",
  4174. hw_tail, q->n_bd);
  4175. goto done;
  4176. }
  4177. for (; q->last_used != hw_tail;
  4178. q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
  4179. ipw_queue_tx_free_tfd(priv, txq);
  4180. priv->tx_packets++;
  4181. }
  4182. done:
  4183. if ((ipw_tx_queue_space(q) > q->low_mark) &&
  4184. (qindex >= 0))
  4185. netif_wake_queue(priv->net_dev);
  4186. used = q->first_empty - q->last_used;
  4187. if (used < 0)
  4188. used += q->n_bd;
  4189. return used;
  4190. }
  4191. static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
  4192. int len, int sync)
  4193. {
  4194. struct clx2_tx_queue *txq = &priv->txq_cmd;
  4195. struct clx2_queue *q = &txq->q;
  4196. struct tfd_frame *tfd;
  4197. if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
  4198. IPW_ERROR("No space for Tx\n");
  4199. return -EBUSY;
  4200. }
  4201. tfd = &txq->bd[q->first_empty];
  4202. txq->txb[q->first_empty] = NULL;
  4203. memset(tfd, 0, sizeof(*tfd));
  4204. tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
  4205. tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
  4206. priv->hcmd_seq++;
  4207. tfd->u.cmd.index = hcmd;
  4208. tfd->u.cmd.length = len;
  4209. memcpy(tfd->u.cmd.payload, buf, len);
  4210. q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
  4211. ipw_write32(priv, q->reg_w, q->first_empty);
  4212. _ipw_read32(priv, 0x90);
  4213. return 0;
  4214. }
  4215. /*
  4216. * Rx theory of operation
  4217. *
  4218. * The host allocates 32 DMA target addresses and passes the host address
  4219. * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
  4220. * 0 to 31
  4221. *
  4222. * Rx Queue Indexes
  4223. * The host/firmware share two index registers for managing the Rx buffers.
  4224. *
  4225. * The READ index maps to the first position that the firmware may be writing
  4226. * to -- the driver can read up to (but not including) this position and get
  4227. * good data.
  4228. * The READ index is managed by the firmware once the card is enabled.
  4229. *
  4230. * The WRITE index maps to the last position the driver has read from -- the
  4231. * position preceding WRITE is the last slot the firmware can place a packet.
  4232. *
  4233. * The queue is empty (no good data) if WRITE = READ - 1, and is full if
  4234. * WRITE = READ.
  4235. *
  4236. * During initialization the host sets up the READ queue position to the first
  4237. * INDEX position, and WRITE to the last (READ - 1 wrapped)
  4238. *
  4239. * When the firmware places a packet in a buffer it will advance the READ index
  4240. * and fire the RX interrupt. The driver can then query the READ index and
  4241. * process as many packets as possible, moving the WRITE index forward as it
  4242. * resets the Rx queue buffers with new memory.
  4243. *
  4244. * The management in the driver is as follows:
  4245. * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
  4246. * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
  4247. * to replensish the ipw->rxq->rx_free.
  4248. * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
  4249. * ipw->rxq is replenished and the READ INDEX is updated (updating the
  4250. * 'processed' and 'read' driver indexes as well)
  4251. * + A received packet is processed and handed to the kernel network stack,
  4252. * detached from the ipw->rxq. The driver 'processed' index is updated.
  4253. * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
  4254. * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
  4255. * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
  4256. * were enough free buffers and RX_STALLED is set it is cleared.
  4257. *
  4258. *
  4259. * Driver sequence:
  4260. *
  4261. * ipw_rx_queue_alloc() Allocates rx_free
  4262. * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
  4263. * ipw_rx_queue_restock
  4264. * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
  4265. * queue, updates firmware pointers, and updates
  4266. * the WRITE index. If insufficient rx_free buffers
  4267. * are available, schedules ipw_rx_queue_replenish
  4268. *
  4269. * -- enable interrupts --
  4270. * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
  4271. * READ INDEX, detaching the SKB from the pool.
  4272. * Moves the packet buffer from queue to rx_used.
  4273. * Calls ipw_rx_queue_restock to refill any empty
  4274. * slots.
  4275. * ...
  4276. *
  4277. */
  4278. /*
  4279. * If there are slots in the RX queue that need to be restocked,
  4280. * and we have free pre-allocated buffers, fill the ranks as much
  4281. * as we can pulling from rx_free.
  4282. *
  4283. * This moves the 'write' index forward to catch up with 'processed', and
  4284. * also updates the memory address in the firmware to reference the new
  4285. * target buffer.
  4286. */
  4287. static void ipw_rx_queue_restock(struct ipw_priv *priv)
  4288. {
  4289. struct ipw_rx_queue *rxq = priv->rxq;
  4290. struct list_head *element;
  4291. struct ipw_rx_mem_buffer *rxb;
  4292. unsigned long flags;
  4293. int write;
  4294. spin_lock_irqsave(&rxq->lock, flags);
  4295. write = rxq->write;
  4296. while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
  4297. element = rxq->rx_free.next;
  4298. rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
  4299. list_del(element);
  4300. ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
  4301. rxb->dma_addr);
  4302. rxq->queue[rxq->write] = rxb;
  4303. rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
  4304. rxq->free_count--;
  4305. }
  4306. spin_unlock_irqrestore(&rxq->lock, flags);
  4307. /* If the pre-allocated buffer pool is dropping low, schedule to
  4308. * refill it */
  4309. if (rxq->free_count <= RX_LOW_WATERMARK)
  4310. queue_work(priv->workqueue, &priv->rx_replenish);
  4311. /* If we've added more space for the firmware to place data, tell it */
  4312. if (write != rxq->write)
  4313. ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
  4314. }
  4315. /*
  4316. * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
  4317. * Also restock the Rx queue via ipw_rx_queue_restock.
  4318. *
  4319. * This is called as a scheduled work item (except for during intialization)
  4320. */
  4321. static void ipw_rx_queue_replenish(void *data)
  4322. {
  4323. struct ipw_priv *priv = data;
  4324. struct ipw_rx_queue *rxq = priv->rxq;
  4325. struct list_head *element;
  4326. struct ipw_rx_mem_buffer *rxb;
  4327. unsigned long flags;
  4328. spin_lock_irqsave(&rxq->lock, flags);
  4329. while (!list_empty(&rxq->rx_used)) {
  4330. element = rxq->rx_used.next;
  4331. rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
  4332. rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
  4333. if (!rxb->skb) {
  4334. printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
  4335. priv->net_dev->name);
  4336. /* We don't reschedule replenish work here -- we will
  4337. * call the restock method and if it still needs
  4338. * more buffers it will schedule replenish */
  4339. break;
  4340. }
  4341. list_del(element);
  4342. rxb->dma_addr =
  4343. pci_map_single(priv->pci_dev, rxb->skb->data,
  4344. IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  4345. list_add_tail(&rxb->list, &rxq->rx_free);
  4346. rxq->free_count++;
  4347. }
  4348. spin_unlock_irqrestore(&rxq->lock, flags);
  4349. ipw_rx_queue_restock(priv);
  4350. }
  4351. static void ipw_bg_rx_queue_replenish(struct work_struct *work)
  4352. {
  4353. struct ipw_priv *priv =
  4354. container_of(work, struct ipw_priv, rx_replenish);
  4355. mutex_lock(&priv->mutex);
  4356. ipw_rx_queue_replenish(priv);
  4357. mutex_unlock(&priv->mutex);
  4358. }
  4359. /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
  4360. * If an SKB has been detached, the POOL needs to have its SKB set to NULL
  4361. * This free routine walks the list of POOL entries and if SKB is set to
  4362. * non NULL it is unmapped and freed
  4363. */
  4364. static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
  4365. {
  4366. int i;
  4367. if (!rxq)
  4368. return;
  4369. for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
  4370. if (rxq->pool[i].skb != NULL) {
  4371. pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
  4372. IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  4373. dev_kfree_skb(rxq->pool[i].skb);
  4374. }
  4375. }
  4376. kfree(rxq);
  4377. }
  4378. static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
  4379. {
  4380. struct ipw_rx_queue *rxq;
  4381. int i;
  4382. rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
  4383. if (unlikely(!rxq)) {
  4384. IPW_ERROR("memory allocation failed\n");
  4385. return NULL;
  4386. }
  4387. spin_lock_init(&rxq->lock);
  4388. INIT_LIST_HEAD(&rxq->rx_free);
  4389. INIT_LIST_HEAD(&rxq->rx_used);
  4390. /* Fill the rx_used queue with _all_ of the Rx buffers */
  4391. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
  4392. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  4393. /* Set us so that we have processed and used all buffers, but have
  4394. * not restocked the Rx queue with fresh buffers */
  4395. rxq->read = rxq->write = 0;
  4396. rxq->free_count = 0;
  4397. return rxq;
  4398. }
  4399. static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
  4400. {
  4401. rate &= ~IEEE80211_BASIC_RATE_MASK;
  4402. if (ieee_mode == IEEE_A) {
  4403. switch (rate) {
  4404. case IEEE80211_OFDM_RATE_6MB:
  4405. return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
  4406. 1 : 0;
  4407. case IEEE80211_OFDM_RATE_9MB:
  4408. return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
  4409. 1 : 0;
  4410. case IEEE80211_OFDM_RATE_12MB:
  4411. return priv->
  4412. rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
  4413. case IEEE80211_OFDM_RATE_18MB:
  4414. return priv->
  4415. rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
  4416. case IEEE80211_OFDM_RATE_24MB:
  4417. return priv->
  4418. rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
  4419. case IEEE80211_OFDM_RATE_36MB:
  4420. return priv->
  4421. rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
  4422. case IEEE80211_OFDM_RATE_48MB:
  4423. return priv->
  4424. rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
  4425. case IEEE80211_OFDM_RATE_54MB:
  4426. return priv->
  4427. rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
  4428. default:
  4429. return 0;
  4430. }
  4431. }
  4432. /* B and G mixed */
  4433. switch (rate) {
  4434. case IEEE80211_CCK_RATE_1MB:
  4435. return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
  4436. case IEEE80211_CCK_RATE_2MB:
  4437. return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
  4438. case IEEE80211_CCK_RATE_5MB:
  4439. return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
  4440. case IEEE80211_CCK_RATE_11MB:
  4441. return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
  4442. }
  4443. /* If we are limited to B modulations, bail at this point */
  4444. if (ieee_mode == IEEE_B)
  4445. return 0;
  4446. /* G */
  4447. switch (rate) {
  4448. case IEEE80211_OFDM_RATE_6MB:
  4449. return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
  4450. case IEEE80211_OFDM_RATE_9MB:
  4451. return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
  4452. case IEEE80211_OFDM_RATE_12MB:
  4453. return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
  4454. case IEEE80211_OFDM_RATE_18MB:
  4455. return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
  4456. case IEEE80211_OFDM_RATE_24MB:
  4457. return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
  4458. case IEEE80211_OFDM_RATE_36MB:
  4459. return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
  4460. case IEEE80211_OFDM_RATE_48MB:
  4461. return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
  4462. case IEEE80211_OFDM_RATE_54MB:
  4463. return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
  4464. }
  4465. return 0;
  4466. }
  4467. static int ipw_compatible_rates(struct ipw_priv *priv,
  4468. const struct ieee80211_network *network,
  4469. struct ipw_supported_rates *rates)
  4470. {
  4471. int num_rates, i;
  4472. memset(rates, 0, sizeof(*rates));
  4473. num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
  4474. rates->num_rates = 0;
  4475. for (i = 0; i < num_rates; i++) {
  4476. if (!ipw_is_rate_in_mask(priv, network->mode,
  4477. network->rates[i])) {
  4478. if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
  4479. IPW_DEBUG_SCAN("Adding masked mandatory "
  4480. "rate %02X\n",
  4481. network->rates[i]);
  4482. rates->supported_rates[rates->num_rates++] =
  4483. network->rates[i];
  4484. continue;
  4485. }
  4486. IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
  4487. network->rates[i], priv->rates_mask);
  4488. continue;
  4489. }
  4490. rates->supported_rates[rates->num_rates++] = network->rates[i];
  4491. }
  4492. num_rates = min(network->rates_ex_len,
  4493. (u8) (IPW_MAX_RATES - num_rates));
  4494. for (i = 0; i < num_rates; i++) {
  4495. if (!ipw_is_rate_in_mask(priv, network->mode,
  4496. network->rates_ex[i])) {
  4497. if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
  4498. IPW_DEBUG_SCAN("Adding masked mandatory "
  4499. "rate %02X\n",
  4500. network->rates_ex[i]);
  4501. rates->supported_rates[rates->num_rates++] =
  4502. network->rates[i];
  4503. continue;
  4504. }
  4505. IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
  4506. network->rates_ex[i], priv->rates_mask);
  4507. continue;
  4508. }
  4509. rates->supported_rates[rates->num_rates++] =
  4510. network->rates_ex[i];
  4511. }
  4512. return 1;
  4513. }
  4514. static void ipw_copy_rates(struct ipw_supported_rates *dest,
  4515. const struct ipw_supported_rates *src)
  4516. {
  4517. u8 i;
  4518. for (i = 0; i < src->num_rates; i++)
  4519. dest->supported_rates[i] = src->supported_rates[i];
  4520. dest->num_rates = src->num_rates;
  4521. }
  4522. /* TODO: Look at sniffed packets in the air to determine if the basic rate
  4523. * mask should ever be used -- right now all callers to add the scan rates are
  4524. * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
  4525. static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
  4526. u8 modulation, u32 rate_mask)
  4527. {
  4528. u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
  4529. IEEE80211_BASIC_RATE_MASK : 0;
  4530. if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
  4531. rates->supported_rates[rates->num_rates++] =
  4532. IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
  4533. if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
  4534. rates->supported_rates[rates->num_rates++] =
  4535. IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
  4536. if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
  4537. rates->supported_rates[rates->num_rates++] = basic_mask |
  4538. IEEE80211_CCK_RATE_5MB;
  4539. if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
  4540. rates->supported_rates[rates->num_rates++] = basic_mask |
  4541. IEEE80211_CCK_RATE_11MB;
  4542. }
  4543. static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
  4544. u8 modulation, u32 rate_mask)
  4545. {
  4546. u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
  4547. IEEE80211_BASIC_RATE_MASK : 0;
  4548. if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
  4549. rates->supported_rates[rates->num_rates++] = basic_mask |
  4550. IEEE80211_OFDM_RATE_6MB;
  4551. if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
  4552. rates->supported_rates[rates->num_rates++] =
  4553. IEEE80211_OFDM_RATE_9MB;
  4554. if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
  4555. rates->supported_rates[rates->num_rates++] = basic_mask |
  4556. IEEE80211_OFDM_RATE_12MB;
  4557. if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
  4558. rates->supported_rates[rates->num_rates++] =
  4559. IEEE80211_OFDM_RATE_18MB;
  4560. if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
  4561. rates->supported_rates[rates->num_rates++] = basic_mask |
  4562. IEEE80211_OFDM_RATE_24MB;
  4563. if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
  4564. rates->supported_rates[rates->num_rates++] =
  4565. IEEE80211_OFDM_RATE_36MB;
  4566. if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
  4567. rates->supported_rates[rates->num_rates++] =
  4568. IEEE80211_OFDM_RATE_48MB;
  4569. if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
  4570. rates->supported_rates[rates->num_rates++] =
  4571. IEEE80211_OFDM_RATE_54MB;
  4572. }
  4573. struct ipw_network_match {
  4574. struct ieee80211_network *network;
  4575. struct ipw_supported_rates rates;
  4576. };
  4577. static int ipw_find_adhoc_network(struct ipw_priv *priv,
  4578. struct ipw_network_match *match,
  4579. struct ieee80211_network *network,
  4580. int roaming)
  4581. {
  4582. struct ipw_supported_rates rates;
  4583. DECLARE_SSID_BUF(ssid);
  4584. /* Verify that this network's capability is compatible with the
  4585. * current mode (AdHoc or Infrastructure) */
  4586. if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
  4587. !(network->capability & WLAN_CAPABILITY_IBSS))) {
  4588. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
  4589. "capability mismatch.\n",
  4590. print_ssid(ssid, network->ssid,
  4591. network->ssid_len),
  4592. network->bssid);
  4593. return 0;
  4594. }
  4595. if (unlikely(roaming)) {
  4596. /* If we are roaming, then ensure check if this is a valid
  4597. * network to try and roam to */
  4598. if ((network->ssid_len != match->network->ssid_len) ||
  4599. memcmp(network->ssid, match->network->ssid,
  4600. network->ssid_len)) {
  4601. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4602. "because of non-network ESSID.\n",
  4603. print_ssid(ssid, network->ssid,
  4604. network->ssid_len),
  4605. network->bssid);
  4606. return 0;
  4607. }
  4608. } else {
  4609. /* If an ESSID has been configured then compare the broadcast
  4610. * ESSID to ours */
  4611. if ((priv->config & CFG_STATIC_ESSID) &&
  4612. ((network->ssid_len != priv->essid_len) ||
  4613. memcmp(network->ssid, priv->essid,
  4614. min(network->ssid_len, priv->essid_len)))) {
  4615. char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
  4616. strncpy(escaped,
  4617. print_ssid(ssid, network->ssid,
  4618. network->ssid_len),
  4619. sizeof(escaped));
  4620. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4621. "because of ESSID mismatch: '%s'.\n",
  4622. escaped, network->bssid,
  4623. print_ssid(ssid, priv->essid,
  4624. priv->essid_len));
  4625. return 0;
  4626. }
  4627. }
  4628. /* If the old network rate is better than this one, don't bother
  4629. * testing everything else. */
  4630. if (network->time_stamp[0] < match->network->time_stamp[0]) {
  4631. IPW_DEBUG_MERGE("Network '%s excluded because newer than "
  4632. "current network.\n",
  4633. print_ssid(ssid, match->network->ssid,
  4634. match->network->ssid_len));
  4635. return 0;
  4636. } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
  4637. IPW_DEBUG_MERGE("Network '%s excluded because newer than "
  4638. "current network.\n",
  4639. print_ssid(ssid, match->network->ssid,
  4640. match->network->ssid_len));
  4641. return 0;
  4642. }
  4643. /* Now go through and see if the requested network is valid... */
  4644. if (priv->ieee->scan_age != 0 &&
  4645. time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
  4646. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4647. "because of age: %ums.\n",
  4648. print_ssid(ssid, network->ssid,
  4649. network->ssid_len),
  4650. network->bssid,
  4651. jiffies_to_msecs(jiffies -
  4652. network->last_scanned));
  4653. return 0;
  4654. }
  4655. if ((priv->config & CFG_STATIC_CHANNEL) &&
  4656. (network->channel != priv->channel)) {
  4657. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4658. "because of channel mismatch: %d != %d.\n",
  4659. print_ssid(ssid, network->ssid,
  4660. network->ssid_len),
  4661. network->bssid,
  4662. network->channel, priv->channel);
  4663. return 0;
  4664. }
  4665. /* Verify privacy compatability */
  4666. if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
  4667. ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
  4668. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4669. "because of privacy mismatch: %s != %s.\n",
  4670. print_ssid(ssid, network->ssid,
  4671. network->ssid_len),
  4672. network->bssid,
  4673. priv->
  4674. capability & CAP_PRIVACY_ON ? "on" : "off",
  4675. network->
  4676. capability & WLAN_CAPABILITY_PRIVACY ? "on" :
  4677. "off");
  4678. return 0;
  4679. }
  4680. if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
  4681. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4682. "because of the same BSSID match: %pM"
  4683. ".\n", print_ssid(ssid, network->ssid,
  4684. network->ssid_len),
  4685. network->bssid,
  4686. priv->bssid);
  4687. return 0;
  4688. }
  4689. /* Filter out any incompatible freq / mode combinations */
  4690. if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
  4691. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4692. "because of invalid frequency/mode "
  4693. "combination.\n",
  4694. print_ssid(ssid, network->ssid,
  4695. network->ssid_len),
  4696. network->bssid);
  4697. return 0;
  4698. }
  4699. /* Ensure that the rates supported by the driver are compatible with
  4700. * this AP, including verification of basic rates (mandatory) */
  4701. if (!ipw_compatible_rates(priv, network, &rates)) {
  4702. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4703. "because configured rate mask excludes "
  4704. "AP mandatory rate.\n",
  4705. print_ssid(ssid, network->ssid,
  4706. network->ssid_len),
  4707. network->bssid);
  4708. return 0;
  4709. }
  4710. if (rates.num_rates == 0) {
  4711. IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
  4712. "because of no compatible rates.\n",
  4713. print_ssid(ssid, network->ssid,
  4714. network->ssid_len),
  4715. network->bssid);
  4716. return 0;
  4717. }
  4718. /* TODO: Perform any further minimal comparititive tests. We do not
  4719. * want to put too much policy logic here; intelligent scan selection
  4720. * should occur within a generic IEEE 802.11 user space tool. */
  4721. /* Set up 'new' AP to this network */
  4722. ipw_copy_rates(&match->rates, &rates);
  4723. match->network = network;
  4724. IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
  4725. print_ssid(ssid, network->ssid, network->ssid_len),
  4726. network->bssid);
  4727. return 1;
  4728. }
  4729. static void ipw_merge_adhoc_network(struct work_struct *work)
  4730. {
  4731. DECLARE_SSID_BUF(ssid);
  4732. struct ipw_priv *priv =
  4733. container_of(work, struct ipw_priv, merge_networks);
  4734. struct ieee80211_network *network = NULL;
  4735. struct ipw_network_match match = {
  4736. .network = priv->assoc_network
  4737. };
  4738. if ((priv->status & STATUS_ASSOCIATED) &&
  4739. (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
  4740. /* First pass through ROAM process -- look for a better
  4741. * network */
  4742. unsigned long flags;
  4743. spin_lock_irqsave(&priv->ieee->lock, flags);
  4744. list_for_each_entry(network, &priv->ieee->network_list, list) {
  4745. if (network != priv->assoc_network)
  4746. ipw_find_adhoc_network(priv, &match, network,
  4747. 1);
  4748. }
  4749. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  4750. if (match.network == priv->assoc_network) {
  4751. IPW_DEBUG_MERGE("No better ADHOC in this network to "
  4752. "merge to.\n");
  4753. return;
  4754. }
  4755. mutex_lock(&priv->mutex);
  4756. if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
  4757. IPW_DEBUG_MERGE("remove network %s\n",
  4758. print_ssid(ssid, priv->essid,
  4759. priv->essid_len));
  4760. ipw_remove_current_network(priv);
  4761. }
  4762. ipw_disassociate(priv);
  4763. priv->assoc_network = match.network;
  4764. mutex_unlock(&priv->mutex);
  4765. return;
  4766. }
  4767. }
  4768. static int ipw_best_network(struct ipw_priv *priv,
  4769. struct ipw_network_match *match,
  4770. struct ieee80211_network *network, int roaming)
  4771. {
  4772. struct ipw_supported_rates rates;
  4773. DECLARE_SSID_BUF(ssid);
  4774. /* Verify that this network's capability is compatible with the
  4775. * current mode (AdHoc or Infrastructure) */
  4776. if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
  4777. !(network->capability & WLAN_CAPABILITY_ESS)) ||
  4778. (priv->ieee->iw_mode == IW_MODE_ADHOC &&
  4779. !(network->capability & WLAN_CAPABILITY_IBSS))) {
  4780. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
  4781. "capability mismatch.\n",
  4782. print_ssid(ssid, network->ssid,
  4783. network->ssid_len),
  4784. network->bssid);
  4785. return 0;
  4786. }
  4787. if (unlikely(roaming)) {
  4788. /* If we are roaming, then ensure check if this is a valid
  4789. * network to try and roam to */
  4790. if ((network->ssid_len != match->network->ssid_len) ||
  4791. memcmp(network->ssid, match->network->ssid,
  4792. network->ssid_len)) {
  4793. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4794. "because of non-network ESSID.\n",
  4795. print_ssid(ssid, network->ssid,
  4796. network->ssid_len),
  4797. network->bssid);
  4798. return 0;
  4799. }
  4800. } else {
  4801. /* If an ESSID has been configured then compare the broadcast
  4802. * ESSID to ours */
  4803. if ((priv->config & CFG_STATIC_ESSID) &&
  4804. ((network->ssid_len != priv->essid_len) ||
  4805. memcmp(network->ssid, priv->essid,
  4806. min(network->ssid_len, priv->essid_len)))) {
  4807. char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
  4808. strncpy(escaped,
  4809. print_ssid(ssid, network->ssid,
  4810. network->ssid_len),
  4811. sizeof(escaped));
  4812. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4813. "because of ESSID mismatch: '%s'.\n",
  4814. escaped, network->bssid,
  4815. print_ssid(ssid, priv->essid,
  4816. priv->essid_len));
  4817. return 0;
  4818. }
  4819. }
  4820. /* If the old network rate is better than this one, don't bother
  4821. * testing everything else. */
  4822. if (match->network && match->network->stats.rssi > network->stats.rssi) {
  4823. char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
  4824. strncpy(escaped,
  4825. print_ssid(ssid, network->ssid, network->ssid_len),
  4826. sizeof(escaped));
  4827. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
  4828. "'%s (%pM)' has a stronger signal.\n",
  4829. escaped, network->bssid,
  4830. print_ssid(ssid, match->network->ssid,
  4831. match->network->ssid_len),
  4832. match->network->bssid);
  4833. return 0;
  4834. }
  4835. /* If this network has already had an association attempt within the
  4836. * last 3 seconds, do not try and associate again... */
  4837. if (network->last_associate &&
  4838. time_after(network->last_associate + (HZ * 3UL), jiffies)) {
  4839. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4840. "because of storming (%ums since last "
  4841. "assoc attempt).\n",
  4842. print_ssid(ssid, network->ssid,
  4843. network->ssid_len),
  4844. network->bssid,
  4845. jiffies_to_msecs(jiffies -
  4846. network->last_associate));
  4847. return 0;
  4848. }
  4849. /* Now go through and see if the requested network is valid... */
  4850. if (priv->ieee->scan_age != 0 &&
  4851. time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
  4852. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4853. "because of age: %ums.\n",
  4854. print_ssid(ssid, network->ssid,
  4855. network->ssid_len),
  4856. network->bssid,
  4857. jiffies_to_msecs(jiffies -
  4858. network->last_scanned));
  4859. return 0;
  4860. }
  4861. if ((priv->config & CFG_STATIC_CHANNEL) &&
  4862. (network->channel != priv->channel)) {
  4863. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4864. "because of channel mismatch: %d != %d.\n",
  4865. print_ssid(ssid, network->ssid,
  4866. network->ssid_len),
  4867. network->bssid,
  4868. network->channel, priv->channel);
  4869. return 0;
  4870. }
  4871. /* Verify privacy compatability */
  4872. if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
  4873. ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
  4874. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4875. "because of privacy mismatch: %s != %s.\n",
  4876. print_ssid(ssid, network->ssid,
  4877. network->ssid_len),
  4878. network->bssid,
  4879. priv->capability & CAP_PRIVACY_ON ? "on" :
  4880. "off",
  4881. network->capability &
  4882. WLAN_CAPABILITY_PRIVACY ? "on" : "off");
  4883. return 0;
  4884. }
  4885. if ((priv->config & CFG_STATIC_BSSID) &&
  4886. memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
  4887. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4888. "because of BSSID mismatch: %pM.\n",
  4889. print_ssid(ssid, network->ssid,
  4890. network->ssid_len),
  4891. network->bssid, priv->bssid);
  4892. return 0;
  4893. }
  4894. /* Filter out any incompatible freq / mode combinations */
  4895. if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
  4896. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4897. "because of invalid frequency/mode "
  4898. "combination.\n",
  4899. print_ssid(ssid, network->ssid,
  4900. network->ssid_len),
  4901. network->bssid);
  4902. return 0;
  4903. }
  4904. /* Filter out invalid channel in current GEO */
  4905. if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
  4906. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4907. "because of invalid channel in current GEO\n",
  4908. print_ssid(ssid, network->ssid,
  4909. network->ssid_len),
  4910. network->bssid);
  4911. return 0;
  4912. }
  4913. /* Ensure that the rates supported by the driver are compatible with
  4914. * this AP, including verification of basic rates (mandatory) */
  4915. if (!ipw_compatible_rates(priv, network, &rates)) {
  4916. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4917. "because configured rate mask excludes "
  4918. "AP mandatory rate.\n",
  4919. print_ssid(ssid, network->ssid,
  4920. network->ssid_len),
  4921. network->bssid);
  4922. return 0;
  4923. }
  4924. if (rates.num_rates == 0) {
  4925. IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
  4926. "because of no compatible rates.\n",
  4927. print_ssid(ssid, network->ssid,
  4928. network->ssid_len),
  4929. network->bssid);
  4930. return 0;
  4931. }
  4932. /* TODO: Perform any further minimal comparititive tests. We do not
  4933. * want to put too much policy logic here; intelligent scan selection
  4934. * should occur within a generic IEEE 802.11 user space tool. */
  4935. /* Set up 'new' AP to this network */
  4936. ipw_copy_rates(&match->rates, &rates);
  4937. match->network = network;
  4938. IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
  4939. print_ssid(ssid, network->ssid, network->ssid_len),
  4940. network->bssid);
  4941. return 1;
  4942. }
  4943. static void ipw_adhoc_create(struct ipw_priv *priv,
  4944. struct ieee80211_network *network)
  4945. {
  4946. const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
  4947. int i;
  4948. /*
  4949. * For the purposes of scanning, we can set our wireless mode
  4950. * to trigger scans across combinations of bands, but when it
  4951. * comes to creating a new ad-hoc network, we have tell the FW
  4952. * exactly which band to use.
  4953. *
  4954. * We also have the possibility of an invalid channel for the
  4955. * chossen band. Attempting to create a new ad-hoc network
  4956. * with an invalid channel for wireless mode will trigger a
  4957. * FW fatal error.
  4958. *
  4959. */
  4960. switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
  4961. case IEEE80211_52GHZ_BAND:
  4962. network->mode = IEEE_A;
  4963. i = ieee80211_channel_to_index(priv->ieee, priv->channel);
  4964. BUG_ON(i == -1);
  4965. if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
  4966. IPW_WARNING("Overriding invalid channel\n");
  4967. priv->channel = geo->a[0].channel;
  4968. }
  4969. break;
  4970. case IEEE80211_24GHZ_BAND:
  4971. if (priv->ieee->mode & IEEE_G)
  4972. network->mode = IEEE_G;
  4973. else
  4974. network->mode = IEEE_B;
  4975. i = ieee80211_channel_to_index(priv->ieee, priv->channel);
  4976. BUG_ON(i == -1);
  4977. if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
  4978. IPW_WARNING("Overriding invalid channel\n");
  4979. priv->channel = geo->bg[0].channel;
  4980. }
  4981. break;
  4982. default:
  4983. IPW_WARNING("Overriding invalid channel\n");
  4984. if (priv->ieee->mode & IEEE_A) {
  4985. network->mode = IEEE_A;
  4986. priv->channel = geo->a[0].channel;
  4987. } else if (priv->ieee->mode & IEEE_G) {
  4988. network->mode = IEEE_G;
  4989. priv->channel = geo->bg[0].channel;
  4990. } else {
  4991. network->mode = IEEE_B;
  4992. priv->channel = geo->bg[0].channel;
  4993. }
  4994. break;
  4995. }
  4996. network->channel = priv->channel;
  4997. priv->config |= CFG_ADHOC_PERSIST;
  4998. ipw_create_bssid(priv, network->bssid);
  4999. network->ssid_len = priv->essid_len;
  5000. memcpy(network->ssid, priv->essid, priv->essid_len);
  5001. memset(&network->stats, 0, sizeof(network->stats));
  5002. network->capability = WLAN_CAPABILITY_IBSS;
  5003. if (!(priv->config & CFG_PREAMBLE_LONG))
  5004. network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
  5005. if (priv->capability & CAP_PRIVACY_ON)
  5006. network->capability |= WLAN_CAPABILITY_PRIVACY;
  5007. network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
  5008. memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
  5009. network->rates_ex_len = priv->rates.num_rates - network->rates_len;
  5010. memcpy(network->rates_ex,
  5011. &priv->rates.supported_rates[network->rates_len],
  5012. network->rates_ex_len);
  5013. network->last_scanned = 0;
  5014. network->flags = 0;
  5015. network->last_associate = 0;
  5016. network->time_stamp[0] = 0;
  5017. network->time_stamp[1] = 0;
  5018. network->beacon_interval = 100; /* Default */
  5019. network->listen_interval = 10; /* Default */
  5020. network->atim_window = 0; /* Default */
  5021. network->wpa_ie_len = 0;
  5022. network->rsn_ie_len = 0;
  5023. }
  5024. static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
  5025. {
  5026. struct ipw_tgi_tx_key key;
  5027. if (!(priv->ieee->sec.flags & (1 << index)))
  5028. return;
  5029. key.key_id = index;
  5030. memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
  5031. key.security_type = type;
  5032. key.station_index = 0; /* always 0 for BSS */
  5033. key.flags = 0;
  5034. /* 0 for new key; previous value of counter (after fatal error) */
  5035. key.tx_counter[0] = cpu_to_le32(0);
  5036. key.tx_counter[1] = cpu_to_le32(0);
  5037. ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
  5038. }
  5039. static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
  5040. {
  5041. struct ipw_wep_key key;
  5042. int i;
  5043. key.cmd_id = DINO_CMD_WEP_KEY;
  5044. key.seq_num = 0;
  5045. /* Note: AES keys cannot be set for multiple times.
  5046. * Only set it at the first time. */
  5047. for (i = 0; i < 4; i++) {
  5048. key.key_index = i | type;
  5049. if (!(priv->ieee->sec.flags & (1 << i))) {
  5050. key.key_size = 0;
  5051. continue;
  5052. }
  5053. key.key_size = priv->ieee->sec.key_sizes[i];
  5054. memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
  5055. ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
  5056. }
  5057. }
  5058. static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
  5059. {
  5060. if (priv->ieee->host_encrypt)
  5061. return;
  5062. switch (level) {
  5063. case SEC_LEVEL_3:
  5064. priv->sys_config.disable_unicast_decryption = 0;
  5065. priv->ieee->host_decrypt = 0;
  5066. break;
  5067. case SEC_LEVEL_2:
  5068. priv->sys_config.disable_unicast_decryption = 1;
  5069. priv->ieee->host_decrypt = 1;
  5070. break;
  5071. case SEC_LEVEL_1:
  5072. priv->sys_config.disable_unicast_decryption = 0;
  5073. priv->ieee->host_decrypt = 0;
  5074. break;
  5075. case SEC_LEVEL_0:
  5076. priv->sys_config.disable_unicast_decryption = 1;
  5077. break;
  5078. default:
  5079. break;
  5080. }
  5081. }
  5082. static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
  5083. {
  5084. if (priv->ieee->host_encrypt)
  5085. return;
  5086. switch (level) {
  5087. case SEC_LEVEL_3:
  5088. priv->sys_config.disable_multicast_decryption = 0;
  5089. break;
  5090. case SEC_LEVEL_2:
  5091. priv->sys_config.disable_multicast_decryption = 1;
  5092. break;
  5093. case SEC_LEVEL_1:
  5094. priv->sys_config.disable_multicast_decryption = 0;
  5095. break;
  5096. case SEC_LEVEL_0:
  5097. priv->sys_config.disable_multicast_decryption = 1;
  5098. break;
  5099. default:
  5100. break;
  5101. }
  5102. }
  5103. static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
  5104. {
  5105. switch (priv->ieee->sec.level) {
  5106. case SEC_LEVEL_3:
  5107. if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
  5108. ipw_send_tgi_tx_key(priv,
  5109. DCT_FLAG_EXT_SECURITY_CCM,
  5110. priv->ieee->sec.active_key);
  5111. if (!priv->ieee->host_mc_decrypt)
  5112. ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
  5113. break;
  5114. case SEC_LEVEL_2:
  5115. if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
  5116. ipw_send_tgi_tx_key(priv,
  5117. DCT_FLAG_EXT_SECURITY_TKIP,
  5118. priv->ieee->sec.active_key);
  5119. break;
  5120. case SEC_LEVEL_1:
  5121. ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
  5122. ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
  5123. ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
  5124. break;
  5125. case SEC_LEVEL_0:
  5126. default:
  5127. break;
  5128. }
  5129. }
  5130. static void ipw_adhoc_check(void *data)
  5131. {
  5132. struct ipw_priv *priv = data;
  5133. if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
  5134. !(priv->config & CFG_ADHOC_PERSIST)) {
  5135. IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
  5136. IPW_DL_STATE | IPW_DL_ASSOC,
  5137. "Missed beacon: %d - disassociate\n",
  5138. priv->missed_adhoc_beacons);
  5139. ipw_remove_current_network(priv);
  5140. ipw_disassociate(priv);
  5141. return;
  5142. }
  5143. queue_delayed_work(priv->workqueue, &priv->adhoc_check,
  5144. le16_to_cpu(priv->assoc_request.beacon_interval));
  5145. }
  5146. static void ipw_bg_adhoc_check(struct work_struct *work)
  5147. {
  5148. struct ipw_priv *priv =
  5149. container_of(work, struct ipw_priv, adhoc_check.work);
  5150. mutex_lock(&priv->mutex);
  5151. ipw_adhoc_check(priv);
  5152. mutex_unlock(&priv->mutex);
  5153. }
  5154. static void ipw_debug_config(struct ipw_priv *priv)
  5155. {
  5156. DECLARE_SSID_BUF(ssid);
  5157. IPW_DEBUG_INFO("Scan completed, no valid APs matched "
  5158. "[CFG 0x%08X]\n", priv->config);
  5159. if (priv->config & CFG_STATIC_CHANNEL)
  5160. IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
  5161. else
  5162. IPW_DEBUG_INFO("Channel unlocked.\n");
  5163. if (priv->config & CFG_STATIC_ESSID)
  5164. IPW_DEBUG_INFO("ESSID locked to '%s'\n",
  5165. print_ssid(ssid, priv->essid, priv->essid_len));
  5166. else
  5167. IPW_DEBUG_INFO("ESSID unlocked.\n");
  5168. if (priv->config & CFG_STATIC_BSSID)
  5169. IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
  5170. else
  5171. IPW_DEBUG_INFO("BSSID unlocked.\n");
  5172. if (priv->capability & CAP_PRIVACY_ON)
  5173. IPW_DEBUG_INFO("PRIVACY on\n");
  5174. else
  5175. IPW_DEBUG_INFO("PRIVACY off\n");
  5176. IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
  5177. }
  5178. static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
  5179. {
  5180. /* TODO: Verify that this works... */
  5181. struct ipw_fixed_rate fr = {
  5182. .tx_rates = priv->rates_mask
  5183. };
  5184. u32 reg;
  5185. u16 mask = 0;
  5186. /* Identify 'current FW band' and match it with the fixed
  5187. * Tx rates */
  5188. switch (priv->ieee->freq_band) {
  5189. case IEEE80211_52GHZ_BAND: /* A only */
  5190. /* IEEE_A */
  5191. if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
  5192. /* Invalid fixed rate mask */
  5193. IPW_DEBUG_WX
  5194. ("invalid fixed rate mask in ipw_set_fixed_rate\n");
  5195. fr.tx_rates = 0;
  5196. break;
  5197. }
  5198. fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
  5199. break;
  5200. default: /* 2.4Ghz or Mixed */
  5201. /* IEEE_B */
  5202. if (mode == IEEE_B) {
  5203. if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
  5204. /* Invalid fixed rate mask */
  5205. IPW_DEBUG_WX
  5206. ("invalid fixed rate mask in ipw_set_fixed_rate\n");
  5207. fr.tx_rates = 0;
  5208. }
  5209. break;
  5210. }
  5211. /* IEEE_G */
  5212. if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
  5213. IEEE80211_OFDM_RATES_MASK)) {
  5214. /* Invalid fixed rate mask */
  5215. IPW_DEBUG_WX
  5216. ("invalid fixed rate mask in ipw_set_fixed_rate\n");
  5217. fr.tx_rates = 0;
  5218. break;
  5219. }
  5220. if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
  5221. mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
  5222. fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
  5223. }
  5224. if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
  5225. mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
  5226. fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
  5227. }
  5228. if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
  5229. mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
  5230. fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
  5231. }
  5232. fr.tx_rates |= mask;
  5233. break;
  5234. }
  5235. reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
  5236. ipw_write_reg32(priv, reg, *(u32 *) & fr);
  5237. }
  5238. static void ipw_abort_scan(struct ipw_priv *priv)
  5239. {
  5240. int err;
  5241. if (priv->status & STATUS_SCAN_ABORTING) {
  5242. IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
  5243. return;
  5244. }
  5245. priv->status |= STATUS_SCAN_ABORTING;
  5246. err = ipw_send_scan_abort(priv);
  5247. if (err)
  5248. IPW_DEBUG_HC("Request to abort scan failed.\n");
  5249. }
  5250. static void ipw_add_scan_channels(struct ipw_priv *priv,
  5251. struct ipw_scan_request_ext *scan,
  5252. int scan_type)
  5253. {
  5254. int channel_index = 0;
  5255. const struct ieee80211_geo *geo;
  5256. int i;
  5257. geo = ieee80211_get_geo(priv->ieee);
  5258. if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
  5259. int start = channel_index;
  5260. for (i = 0; i < geo->a_channels; i++) {
  5261. if ((priv->status & STATUS_ASSOCIATED) &&
  5262. geo->a[i].channel == priv->channel)
  5263. continue;
  5264. channel_index++;
  5265. scan->channels_list[channel_index] = geo->a[i].channel;
  5266. ipw_set_scan_type(scan, channel_index,
  5267. geo->a[i].
  5268. flags & IEEE80211_CH_PASSIVE_ONLY ?
  5269. IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
  5270. scan_type);
  5271. }
  5272. if (start != channel_index) {
  5273. scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
  5274. (channel_index - start);
  5275. channel_index++;
  5276. }
  5277. }
  5278. if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
  5279. int start = channel_index;
  5280. if (priv->config & CFG_SPEED_SCAN) {
  5281. int index;
  5282. u8 channels[IEEE80211_24GHZ_CHANNELS] = {
  5283. /* nop out the list */
  5284. [0] = 0
  5285. };
  5286. u8 channel;
  5287. while (channel_index < IPW_SCAN_CHANNELS) {
  5288. channel =
  5289. priv->speed_scan[priv->speed_scan_pos];
  5290. if (channel == 0) {
  5291. priv->speed_scan_pos = 0;
  5292. channel = priv->speed_scan[0];
  5293. }
  5294. if ((priv->status & STATUS_ASSOCIATED) &&
  5295. channel == priv->channel) {
  5296. priv->speed_scan_pos++;
  5297. continue;
  5298. }
  5299. /* If this channel has already been
  5300. * added in scan, break from loop
  5301. * and this will be the first channel
  5302. * in the next scan.
  5303. */
  5304. if (channels[channel - 1] != 0)
  5305. break;
  5306. channels[channel - 1] = 1;
  5307. priv->speed_scan_pos++;
  5308. channel_index++;
  5309. scan->channels_list[channel_index] = channel;
  5310. index =
  5311. ieee80211_channel_to_index(priv->ieee, channel);
  5312. ipw_set_scan_type(scan, channel_index,
  5313. geo->bg[index].
  5314. flags &
  5315. IEEE80211_CH_PASSIVE_ONLY ?
  5316. IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
  5317. : scan_type);
  5318. }
  5319. } else {
  5320. for (i = 0; i < geo->bg_channels; i++) {
  5321. if ((priv->status & STATUS_ASSOCIATED) &&
  5322. geo->bg[i].channel == priv->channel)
  5323. continue;
  5324. channel_index++;
  5325. scan->channels_list[channel_index] =
  5326. geo->bg[i].channel;
  5327. ipw_set_scan_type(scan, channel_index,
  5328. geo->bg[i].
  5329. flags &
  5330. IEEE80211_CH_PASSIVE_ONLY ?
  5331. IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
  5332. : scan_type);
  5333. }
  5334. }
  5335. if (start != channel_index) {
  5336. scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
  5337. (channel_index - start);
  5338. }
  5339. }
  5340. }
  5341. static int ipw_passive_dwell_time(struct ipw_priv *priv)
  5342. {
  5343. /* staying on passive channels longer than the DTIM interval during a
  5344. * scan, while associated, causes the firmware to cancel the scan
  5345. * without notification. Hence, don't stay on passive channels longer
  5346. * than the beacon interval.
  5347. */
  5348. if (priv->status & STATUS_ASSOCIATED
  5349. && priv->assoc_network->beacon_interval > 10)
  5350. return priv->assoc_network->beacon_interval - 10;
  5351. else
  5352. return 120;
  5353. }
  5354. static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
  5355. {
  5356. struct ipw_scan_request_ext scan;
  5357. int err = 0, scan_type;
  5358. if (!(priv->status & STATUS_INIT) ||
  5359. (priv->status & STATUS_EXIT_PENDING))
  5360. return 0;
  5361. mutex_lock(&priv->mutex);
  5362. if (direct && (priv->direct_scan_ssid_len == 0)) {
  5363. IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
  5364. priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
  5365. goto done;
  5366. }
  5367. if (priv->status & STATUS_SCANNING) {
  5368. IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
  5369. priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
  5370. STATUS_SCAN_PENDING;
  5371. goto done;
  5372. }
  5373. if (!(priv->status & STATUS_SCAN_FORCED) &&
  5374. priv->status & STATUS_SCAN_ABORTING) {
  5375. IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
  5376. priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
  5377. STATUS_SCAN_PENDING;
  5378. goto done;
  5379. }
  5380. if (priv->status & STATUS_RF_KILL_MASK) {
  5381. IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
  5382. priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
  5383. STATUS_SCAN_PENDING;
  5384. goto done;
  5385. }
  5386. memset(&scan, 0, sizeof(scan));
  5387. scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
  5388. if (type == IW_SCAN_TYPE_PASSIVE) {
  5389. IPW_DEBUG_WX("use passive scanning\n");
  5390. scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
  5391. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
  5392. cpu_to_le16(ipw_passive_dwell_time(priv));
  5393. ipw_add_scan_channels(priv, &scan, scan_type);
  5394. goto send_request;
  5395. }
  5396. /* Use active scan by default. */
  5397. if (priv->config & CFG_SPEED_SCAN)
  5398. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
  5399. cpu_to_le16(30);
  5400. else
  5401. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
  5402. cpu_to_le16(20);
  5403. scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
  5404. cpu_to_le16(20);
  5405. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
  5406. cpu_to_le16(ipw_passive_dwell_time(priv));
  5407. scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
  5408. #ifdef CONFIG_IPW2200_MONITOR
  5409. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  5410. u8 channel;
  5411. u8 band = 0;
  5412. switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
  5413. case IEEE80211_52GHZ_BAND:
  5414. band = (u8) (IPW_A_MODE << 6) | 1;
  5415. channel = priv->channel;
  5416. break;
  5417. case IEEE80211_24GHZ_BAND:
  5418. band = (u8) (IPW_B_MODE << 6) | 1;
  5419. channel = priv->channel;
  5420. break;
  5421. default:
  5422. band = (u8) (IPW_B_MODE << 6) | 1;
  5423. channel = 9;
  5424. break;
  5425. }
  5426. scan.channels_list[0] = band;
  5427. scan.channels_list[1] = channel;
  5428. ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
  5429. /* NOTE: The card will sit on this channel for this time
  5430. * period. Scan aborts are timing sensitive and frequently
  5431. * result in firmware restarts. As such, it is best to
  5432. * set a small dwell_time here and just keep re-issuing
  5433. * scans. Otherwise fast channel hopping will not actually
  5434. * hop channels.
  5435. *
  5436. * TODO: Move SPEED SCAN support to all modes and bands */
  5437. scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
  5438. cpu_to_le16(2000);
  5439. } else {
  5440. #endif /* CONFIG_IPW2200_MONITOR */
  5441. /* Honor direct scans first, otherwise if we are roaming make
  5442. * this a direct scan for the current network. Finally,
  5443. * ensure that every other scan is a fast channel hop scan */
  5444. if (direct) {
  5445. err = ipw_send_ssid(priv, priv->direct_scan_ssid,
  5446. priv->direct_scan_ssid_len);
  5447. if (err) {
  5448. IPW_DEBUG_HC("Attempt to send SSID command "
  5449. "failed\n");
  5450. goto done;
  5451. }
  5452. scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
  5453. } else if ((priv->status & STATUS_ROAMING)
  5454. || (!(priv->status & STATUS_ASSOCIATED)
  5455. && (priv->config & CFG_STATIC_ESSID)
  5456. && (le32_to_cpu(scan.full_scan_index) % 2))) {
  5457. err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
  5458. if (err) {
  5459. IPW_DEBUG_HC("Attempt to send SSID command "
  5460. "failed.\n");
  5461. goto done;
  5462. }
  5463. scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
  5464. } else
  5465. scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
  5466. ipw_add_scan_channels(priv, &scan, scan_type);
  5467. #ifdef CONFIG_IPW2200_MONITOR
  5468. }
  5469. #endif
  5470. send_request:
  5471. err = ipw_send_scan_request_ext(priv, &scan);
  5472. if (err) {
  5473. IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
  5474. goto done;
  5475. }
  5476. priv->status |= STATUS_SCANNING;
  5477. if (direct) {
  5478. priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
  5479. priv->direct_scan_ssid_len = 0;
  5480. } else
  5481. priv->status &= ~STATUS_SCAN_PENDING;
  5482. queue_delayed_work(priv->workqueue, &priv->scan_check,
  5483. IPW_SCAN_CHECK_WATCHDOG);
  5484. done:
  5485. mutex_unlock(&priv->mutex);
  5486. return err;
  5487. }
  5488. static void ipw_request_passive_scan(struct work_struct *work)
  5489. {
  5490. struct ipw_priv *priv =
  5491. container_of(work, struct ipw_priv, request_passive_scan.work);
  5492. ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
  5493. }
  5494. static void ipw_request_scan(struct work_struct *work)
  5495. {
  5496. struct ipw_priv *priv =
  5497. container_of(work, struct ipw_priv, request_scan.work);
  5498. ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
  5499. }
  5500. static void ipw_request_direct_scan(struct work_struct *work)
  5501. {
  5502. struct ipw_priv *priv =
  5503. container_of(work, struct ipw_priv, request_direct_scan.work);
  5504. ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
  5505. }
  5506. static void ipw_bg_abort_scan(struct work_struct *work)
  5507. {
  5508. struct ipw_priv *priv =
  5509. container_of(work, struct ipw_priv, abort_scan);
  5510. mutex_lock(&priv->mutex);
  5511. ipw_abort_scan(priv);
  5512. mutex_unlock(&priv->mutex);
  5513. }
  5514. static int ipw_wpa_enable(struct ipw_priv *priv, int value)
  5515. {
  5516. /* This is called when wpa_supplicant loads and closes the driver
  5517. * interface. */
  5518. priv->ieee->wpa_enabled = value;
  5519. return 0;
  5520. }
  5521. static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
  5522. {
  5523. struct ieee80211_device *ieee = priv->ieee;
  5524. struct ieee80211_security sec = {
  5525. .flags = SEC_AUTH_MODE,
  5526. };
  5527. int ret = 0;
  5528. if (value & IW_AUTH_ALG_SHARED_KEY) {
  5529. sec.auth_mode = WLAN_AUTH_SHARED_KEY;
  5530. ieee->open_wep = 0;
  5531. } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
  5532. sec.auth_mode = WLAN_AUTH_OPEN;
  5533. ieee->open_wep = 1;
  5534. } else if (value & IW_AUTH_ALG_LEAP) {
  5535. sec.auth_mode = WLAN_AUTH_LEAP;
  5536. ieee->open_wep = 1;
  5537. } else
  5538. return -EINVAL;
  5539. if (ieee->set_security)
  5540. ieee->set_security(ieee->dev, &sec);
  5541. else
  5542. ret = -EOPNOTSUPP;
  5543. return ret;
  5544. }
  5545. static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
  5546. int wpa_ie_len)
  5547. {
  5548. /* make sure WPA is enabled */
  5549. ipw_wpa_enable(priv, 1);
  5550. }
  5551. static int ipw_set_rsn_capa(struct ipw_priv *priv,
  5552. char *capabilities, int length)
  5553. {
  5554. IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
  5555. return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
  5556. capabilities);
  5557. }
  5558. /*
  5559. * WE-18 support
  5560. */
  5561. /* SIOCSIWGENIE */
  5562. static int ipw_wx_set_genie(struct net_device *dev,
  5563. struct iw_request_info *info,
  5564. union iwreq_data *wrqu, char *extra)
  5565. {
  5566. struct ipw_priv *priv = ieee80211_priv(dev);
  5567. struct ieee80211_device *ieee = priv->ieee;
  5568. u8 *buf;
  5569. int err = 0;
  5570. if (wrqu->data.length > MAX_WPA_IE_LEN ||
  5571. (wrqu->data.length && extra == NULL))
  5572. return -EINVAL;
  5573. if (wrqu->data.length) {
  5574. buf = kmalloc(wrqu->data.length, GFP_KERNEL);
  5575. if (buf == NULL) {
  5576. err = -ENOMEM;
  5577. goto out;
  5578. }
  5579. memcpy(buf, extra, wrqu->data.length);
  5580. kfree(ieee->wpa_ie);
  5581. ieee->wpa_ie = buf;
  5582. ieee->wpa_ie_len = wrqu->data.length;
  5583. } else {
  5584. kfree(ieee->wpa_ie);
  5585. ieee->wpa_ie = NULL;
  5586. ieee->wpa_ie_len = 0;
  5587. }
  5588. ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
  5589. out:
  5590. return err;
  5591. }
  5592. /* SIOCGIWGENIE */
  5593. static int ipw_wx_get_genie(struct net_device *dev,
  5594. struct iw_request_info *info,
  5595. union iwreq_data *wrqu, char *extra)
  5596. {
  5597. struct ipw_priv *priv = ieee80211_priv(dev);
  5598. struct ieee80211_device *ieee = priv->ieee;
  5599. int err = 0;
  5600. if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
  5601. wrqu->data.length = 0;
  5602. goto out;
  5603. }
  5604. if (wrqu->data.length < ieee->wpa_ie_len) {
  5605. err = -E2BIG;
  5606. goto out;
  5607. }
  5608. wrqu->data.length = ieee->wpa_ie_len;
  5609. memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
  5610. out:
  5611. return err;
  5612. }
  5613. static int wext_cipher2level(int cipher)
  5614. {
  5615. switch (cipher) {
  5616. case IW_AUTH_CIPHER_NONE:
  5617. return SEC_LEVEL_0;
  5618. case IW_AUTH_CIPHER_WEP40:
  5619. case IW_AUTH_CIPHER_WEP104:
  5620. return SEC_LEVEL_1;
  5621. case IW_AUTH_CIPHER_TKIP:
  5622. return SEC_LEVEL_2;
  5623. case IW_AUTH_CIPHER_CCMP:
  5624. return SEC_LEVEL_3;
  5625. default:
  5626. return -1;
  5627. }
  5628. }
  5629. /* SIOCSIWAUTH */
  5630. static int ipw_wx_set_auth(struct net_device *dev,
  5631. struct iw_request_info *info,
  5632. union iwreq_data *wrqu, char *extra)
  5633. {
  5634. struct ipw_priv *priv = ieee80211_priv(dev);
  5635. struct ieee80211_device *ieee = priv->ieee;
  5636. struct iw_param *param = &wrqu->param;
  5637. struct lib80211_crypt_data *crypt;
  5638. unsigned long flags;
  5639. int ret = 0;
  5640. switch (param->flags & IW_AUTH_INDEX) {
  5641. case IW_AUTH_WPA_VERSION:
  5642. break;
  5643. case IW_AUTH_CIPHER_PAIRWISE:
  5644. ipw_set_hw_decrypt_unicast(priv,
  5645. wext_cipher2level(param->value));
  5646. break;
  5647. case IW_AUTH_CIPHER_GROUP:
  5648. ipw_set_hw_decrypt_multicast(priv,
  5649. wext_cipher2level(param->value));
  5650. break;
  5651. case IW_AUTH_KEY_MGMT:
  5652. /*
  5653. * ipw2200 does not use these parameters
  5654. */
  5655. break;
  5656. case IW_AUTH_TKIP_COUNTERMEASURES:
  5657. crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
  5658. if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
  5659. break;
  5660. flags = crypt->ops->get_flags(crypt->priv);
  5661. if (param->value)
  5662. flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
  5663. else
  5664. flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
  5665. crypt->ops->set_flags(flags, crypt->priv);
  5666. break;
  5667. case IW_AUTH_DROP_UNENCRYPTED:{
  5668. /* HACK:
  5669. *
  5670. * wpa_supplicant calls set_wpa_enabled when the driver
  5671. * is loaded and unloaded, regardless of if WPA is being
  5672. * used. No other calls are made which can be used to
  5673. * determine if encryption will be used or not prior to
  5674. * association being expected. If encryption is not being
  5675. * used, drop_unencrypted is set to false, else true -- we
  5676. * can use this to determine if the CAP_PRIVACY_ON bit should
  5677. * be set.
  5678. */
  5679. struct ieee80211_security sec = {
  5680. .flags = SEC_ENABLED,
  5681. .enabled = param->value,
  5682. };
  5683. priv->ieee->drop_unencrypted = param->value;
  5684. /* We only change SEC_LEVEL for open mode. Others
  5685. * are set by ipw_wpa_set_encryption.
  5686. */
  5687. if (!param->value) {
  5688. sec.flags |= SEC_LEVEL;
  5689. sec.level = SEC_LEVEL_0;
  5690. } else {
  5691. sec.flags |= SEC_LEVEL;
  5692. sec.level = SEC_LEVEL_1;
  5693. }
  5694. if (priv->ieee->set_security)
  5695. priv->ieee->set_security(priv->ieee->dev, &sec);
  5696. break;
  5697. }
  5698. case IW_AUTH_80211_AUTH_ALG:
  5699. ret = ipw_wpa_set_auth_algs(priv, param->value);
  5700. break;
  5701. case IW_AUTH_WPA_ENABLED:
  5702. ret = ipw_wpa_enable(priv, param->value);
  5703. ipw_disassociate(priv);
  5704. break;
  5705. case IW_AUTH_RX_UNENCRYPTED_EAPOL:
  5706. ieee->ieee802_1x = param->value;
  5707. break;
  5708. case IW_AUTH_PRIVACY_INVOKED:
  5709. ieee->privacy_invoked = param->value;
  5710. break;
  5711. default:
  5712. return -EOPNOTSUPP;
  5713. }
  5714. return ret;
  5715. }
  5716. /* SIOCGIWAUTH */
  5717. static int ipw_wx_get_auth(struct net_device *dev,
  5718. struct iw_request_info *info,
  5719. union iwreq_data *wrqu, char *extra)
  5720. {
  5721. struct ipw_priv *priv = ieee80211_priv(dev);
  5722. struct ieee80211_device *ieee = priv->ieee;
  5723. struct lib80211_crypt_data *crypt;
  5724. struct iw_param *param = &wrqu->param;
  5725. int ret = 0;
  5726. switch (param->flags & IW_AUTH_INDEX) {
  5727. case IW_AUTH_WPA_VERSION:
  5728. case IW_AUTH_CIPHER_PAIRWISE:
  5729. case IW_AUTH_CIPHER_GROUP:
  5730. case IW_AUTH_KEY_MGMT:
  5731. /*
  5732. * wpa_supplicant will control these internally
  5733. */
  5734. ret = -EOPNOTSUPP;
  5735. break;
  5736. case IW_AUTH_TKIP_COUNTERMEASURES:
  5737. crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
  5738. if (!crypt || !crypt->ops->get_flags)
  5739. break;
  5740. param->value = (crypt->ops->get_flags(crypt->priv) &
  5741. IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
  5742. break;
  5743. case IW_AUTH_DROP_UNENCRYPTED:
  5744. param->value = ieee->drop_unencrypted;
  5745. break;
  5746. case IW_AUTH_80211_AUTH_ALG:
  5747. param->value = ieee->sec.auth_mode;
  5748. break;
  5749. case IW_AUTH_WPA_ENABLED:
  5750. param->value = ieee->wpa_enabled;
  5751. break;
  5752. case IW_AUTH_RX_UNENCRYPTED_EAPOL:
  5753. param->value = ieee->ieee802_1x;
  5754. break;
  5755. case IW_AUTH_ROAMING_CONTROL:
  5756. case IW_AUTH_PRIVACY_INVOKED:
  5757. param->value = ieee->privacy_invoked;
  5758. break;
  5759. default:
  5760. return -EOPNOTSUPP;
  5761. }
  5762. return 0;
  5763. }
  5764. /* SIOCSIWENCODEEXT */
  5765. static int ipw_wx_set_encodeext(struct net_device *dev,
  5766. struct iw_request_info *info,
  5767. union iwreq_data *wrqu, char *extra)
  5768. {
  5769. struct ipw_priv *priv = ieee80211_priv(dev);
  5770. struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
  5771. if (hwcrypto) {
  5772. if (ext->alg == IW_ENCODE_ALG_TKIP) {
  5773. /* IPW HW can't build TKIP MIC,
  5774. host decryption still needed */
  5775. if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
  5776. priv->ieee->host_mc_decrypt = 1;
  5777. else {
  5778. priv->ieee->host_encrypt = 0;
  5779. priv->ieee->host_encrypt_msdu = 1;
  5780. priv->ieee->host_decrypt = 1;
  5781. }
  5782. } else {
  5783. priv->ieee->host_encrypt = 0;
  5784. priv->ieee->host_encrypt_msdu = 0;
  5785. priv->ieee->host_decrypt = 0;
  5786. priv->ieee->host_mc_decrypt = 0;
  5787. }
  5788. }
  5789. return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
  5790. }
  5791. /* SIOCGIWENCODEEXT */
  5792. static int ipw_wx_get_encodeext(struct net_device *dev,
  5793. struct iw_request_info *info,
  5794. union iwreq_data *wrqu, char *extra)
  5795. {
  5796. struct ipw_priv *priv = ieee80211_priv(dev);
  5797. return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
  5798. }
  5799. /* SIOCSIWMLME */
  5800. static int ipw_wx_set_mlme(struct net_device *dev,
  5801. struct iw_request_info *info,
  5802. union iwreq_data *wrqu, char *extra)
  5803. {
  5804. struct ipw_priv *priv = ieee80211_priv(dev);
  5805. struct iw_mlme *mlme = (struct iw_mlme *)extra;
  5806. __le16 reason;
  5807. reason = cpu_to_le16(mlme->reason_code);
  5808. switch (mlme->cmd) {
  5809. case IW_MLME_DEAUTH:
  5810. /* silently ignore */
  5811. break;
  5812. case IW_MLME_DISASSOC:
  5813. ipw_disassociate(priv);
  5814. break;
  5815. default:
  5816. return -EOPNOTSUPP;
  5817. }
  5818. return 0;
  5819. }
  5820. #ifdef CONFIG_IPW2200_QOS
  5821. /* QoS */
  5822. /*
  5823. * get the modulation type of the current network or
  5824. * the card current mode
  5825. */
  5826. static u8 ipw_qos_current_mode(struct ipw_priv * priv)
  5827. {
  5828. u8 mode = 0;
  5829. if (priv->status & STATUS_ASSOCIATED) {
  5830. unsigned long flags;
  5831. spin_lock_irqsave(&priv->ieee->lock, flags);
  5832. mode = priv->assoc_network->mode;
  5833. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  5834. } else {
  5835. mode = priv->ieee->mode;
  5836. }
  5837. IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
  5838. return mode;
  5839. }
  5840. /*
  5841. * Handle management frame beacon and probe response
  5842. */
  5843. static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
  5844. int active_network,
  5845. struct ieee80211_network *network)
  5846. {
  5847. u32 size = sizeof(struct ieee80211_qos_parameters);
  5848. if (network->capability & WLAN_CAPABILITY_IBSS)
  5849. network->qos_data.active = network->qos_data.supported;
  5850. if (network->flags & NETWORK_HAS_QOS_MASK) {
  5851. if (active_network &&
  5852. (network->flags & NETWORK_HAS_QOS_PARAMETERS))
  5853. network->qos_data.active = network->qos_data.supported;
  5854. if ((network->qos_data.active == 1) && (active_network == 1) &&
  5855. (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
  5856. (network->qos_data.old_param_count !=
  5857. network->qos_data.param_count)) {
  5858. network->qos_data.old_param_count =
  5859. network->qos_data.param_count;
  5860. schedule_work(&priv->qos_activate);
  5861. IPW_DEBUG_QOS("QoS parameters change call "
  5862. "qos_activate\n");
  5863. }
  5864. } else {
  5865. if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
  5866. memcpy(&network->qos_data.parameters,
  5867. &def_parameters_CCK, size);
  5868. else
  5869. memcpy(&network->qos_data.parameters,
  5870. &def_parameters_OFDM, size);
  5871. if ((network->qos_data.active == 1) && (active_network == 1)) {
  5872. IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
  5873. schedule_work(&priv->qos_activate);
  5874. }
  5875. network->qos_data.active = 0;
  5876. network->qos_data.supported = 0;
  5877. }
  5878. if ((priv->status & STATUS_ASSOCIATED) &&
  5879. (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
  5880. if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
  5881. if (network->capability & WLAN_CAPABILITY_IBSS)
  5882. if ((network->ssid_len ==
  5883. priv->assoc_network->ssid_len) &&
  5884. !memcmp(network->ssid,
  5885. priv->assoc_network->ssid,
  5886. network->ssid_len)) {
  5887. queue_work(priv->workqueue,
  5888. &priv->merge_networks);
  5889. }
  5890. }
  5891. return 0;
  5892. }
  5893. /*
  5894. * This function set up the firmware to support QoS. It sends
  5895. * IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
  5896. */
  5897. static int ipw_qos_activate(struct ipw_priv *priv,
  5898. struct ieee80211_qos_data *qos_network_data)
  5899. {
  5900. int err;
  5901. struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
  5902. struct ieee80211_qos_parameters *active_one = NULL;
  5903. u32 size = sizeof(struct ieee80211_qos_parameters);
  5904. u32 burst_duration;
  5905. int i;
  5906. u8 type;
  5907. type = ipw_qos_current_mode(priv);
  5908. active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
  5909. memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
  5910. active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
  5911. memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
  5912. if (qos_network_data == NULL) {
  5913. if (type == IEEE_B) {
  5914. IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
  5915. active_one = &def_parameters_CCK;
  5916. } else
  5917. active_one = &def_parameters_OFDM;
  5918. memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
  5919. burst_duration = ipw_qos_get_burst_duration(priv);
  5920. for (i = 0; i < QOS_QUEUE_NUM; i++)
  5921. qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
  5922. cpu_to_le16(burst_duration);
  5923. } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  5924. if (type == IEEE_B) {
  5925. IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
  5926. type);
  5927. if (priv->qos_data.qos_enable == 0)
  5928. active_one = &def_parameters_CCK;
  5929. else
  5930. active_one = priv->qos_data.def_qos_parm_CCK;
  5931. } else {
  5932. if (priv->qos_data.qos_enable == 0)
  5933. active_one = &def_parameters_OFDM;
  5934. else
  5935. active_one = priv->qos_data.def_qos_parm_OFDM;
  5936. }
  5937. memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
  5938. } else {
  5939. unsigned long flags;
  5940. int active;
  5941. spin_lock_irqsave(&priv->ieee->lock, flags);
  5942. active_one = &(qos_network_data->parameters);
  5943. qos_network_data->old_param_count =
  5944. qos_network_data->param_count;
  5945. memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
  5946. active = qos_network_data->supported;
  5947. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  5948. if (active == 0) {
  5949. burst_duration = ipw_qos_get_burst_duration(priv);
  5950. for (i = 0; i < QOS_QUEUE_NUM; i++)
  5951. qos_parameters[QOS_PARAM_SET_ACTIVE].
  5952. tx_op_limit[i] = cpu_to_le16(burst_duration);
  5953. }
  5954. }
  5955. IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
  5956. err = ipw_send_qos_params_command(priv,
  5957. (struct ieee80211_qos_parameters *)
  5958. &(qos_parameters[0]));
  5959. if (err)
  5960. IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
  5961. return err;
  5962. }
  5963. /*
  5964. * send IPW_CMD_WME_INFO to the firmware
  5965. */
  5966. static int ipw_qos_set_info_element(struct ipw_priv *priv)
  5967. {
  5968. int ret = 0;
  5969. struct ieee80211_qos_information_element qos_info;
  5970. if (priv == NULL)
  5971. return -1;
  5972. qos_info.elementID = QOS_ELEMENT_ID;
  5973. qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
  5974. qos_info.version = QOS_VERSION_1;
  5975. qos_info.ac_info = 0;
  5976. memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
  5977. qos_info.qui_type = QOS_OUI_TYPE;
  5978. qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
  5979. ret = ipw_send_qos_info_command(priv, &qos_info);
  5980. if (ret != 0) {
  5981. IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
  5982. }
  5983. return ret;
  5984. }
  5985. /*
  5986. * Set the QoS parameter with the association request structure
  5987. */
  5988. static int ipw_qos_association(struct ipw_priv *priv,
  5989. struct ieee80211_network *network)
  5990. {
  5991. int err = 0;
  5992. struct ieee80211_qos_data *qos_data = NULL;
  5993. struct ieee80211_qos_data ibss_data = {
  5994. .supported = 1,
  5995. .active = 1,
  5996. };
  5997. switch (priv->ieee->iw_mode) {
  5998. case IW_MODE_ADHOC:
  5999. BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
  6000. qos_data = &ibss_data;
  6001. break;
  6002. case IW_MODE_INFRA:
  6003. qos_data = &network->qos_data;
  6004. break;
  6005. default:
  6006. BUG();
  6007. break;
  6008. }
  6009. err = ipw_qos_activate(priv, qos_data);
  6010. if (err) {
  6011. priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
  6012. return err;
  6013. }
  6014. if (priv->qos_data.qos_enable && qos_data->supported) {
  6015. IPW_DEBUG_QOS("QoS will be enabled for this association\n");
  6016. priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
  6017. return ipw_qos_set_info_element(priv);
  6018. }
  6019. return 0;
  6020. }
  6021. /*
  6022. * handling the beaconing responses. if we get different QoS setting
  6023. * off the network from the associated setting, adjust the QoS
  6024. * setting
  6025. */
  6026. static int ipw_qos_association_resp(struct ipw_priv *priv,
  6027. struct ieee80211_network *network)
  6028. {
  6029. int ret = 0;
  6030. unsigned long flags;
  6031. u32 size = sizeof(struct ieee80211_qos_parameters);
  6032. int set_qos_param = 0;
  6033. if ((priv == NULL) || (network == NULL) ||
  6034. (priv->assoc_network == NULL))
  6035. return ret;
  6036. if (!(priv->status & STATUS_ASSOCIATED))
  6037. return ret;
  6038. if ((priv->ieee->iw_mode != IW_MODE_INFRA))
  6039. return ret;
  6040. spin_lock_irqsave(&priv->ieee->lock, flags);
  6041. if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
  6042. memcpy(&priv->assoc_network->qos_data, &network->qos_data,
  6043. sizeof(struct ieee80211_qos_data));
  6044. priv->assoc_network->qos_data.active = 1;
  6045. if ((network->qos_data.old_param_count !=
  6046. network->qos_data.param_count)) {
  6047. set_qos_param = 1;
  6048. network->qos_data.old_param_count =
  6049. network->qos_data.param_count;
  6050. }
  6051. } else {
  6052. if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
  6053. memcpy(&priv->assoc_network->qos_data.parameters,
  6054. &def_parameters_CCK, size);
  6055. else
  6056. memcpy(&priv->assoc_network->qos_data.parameters,
  6057. &def_parameters_OFDM, size);
  6058. priv->assoc_network->qos_data.active = 0;
  6059. priv->assoc_network->qos_data.supported = 0;
  6060. set_qos_param = 1;
  6061. }
  6062. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  6063. if (set_qos_param == 1)
  6064. schedule_work(&priv->qos_activate);
  6065. return ret;
  6066. }
  6067. static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
  6068. {
  6069. u32 ret = 0;
  6070. if ((priv == NULL))
  6071. return 0;
  6072. if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
  6073. ret = priv->qos_data.burst_duration_CCK;
  6074. else
  6075. ret = priv->qos_data.burst_duration_OFDM;
  6076. return ret;
  6077. }
  6078. /*
  6079. * Initialize the setting of QoS global
  6080. */
  6081. static void ipw_qos_init(struct ipw_priv *priv, int enable,
  6082. int burst_enable, u32 burst_duration_CCK,
  6083. u32 burst_duration_OFDM)
  6084. {
  6085. priv->qos_data.qos_enable = enable;
  6086. if (priv->qos_data.qos_enable) {
  6087. priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
  6088. priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
  6089. IPW_DEBUG_QOS("QoS is enabled\n");
  6090. } else {
  6091. priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
  6092. priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
  6093. IPW_DEBUG_QOS("QoS is not enabled\n");
  6094. }
  6095. priv->qos_data.burst_enable = burst_enable;
  6096. if (burst_enable) {
  6097. priv->qos_data.burst_duration_CCK = burst_duration_CCK;
  6098. priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
  6099. } else {
  6100. priv->qos_data.burst_duration_CCK = 0;
  6101. priv->qos_data.burst_duration_OFDM = 0;
  6102. }
  6103. }
  6104. /*
  6105. * map the packet priority to the right TX Queue
  6106. */
  6107. static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
  6108. {
  6109. if (priority > 7 || !priv->qos_data.qos_enable)
  6110. priority = 0;
  6111. return from_priority_to_tx_queue[priority] - 1;
  6112. }
  6113. static int ipw_is_qos_active(struct net_device *dev,
  6114. struct sk_buff *skb)
  6115. {
  6116. struct ipw_priv *priv = ieee80211_priv(dev);
  6117. struct ieee80211_qos_data *qos_data = NULL;
  6118. int active, supported;
  6119. u8 *daddr = skb->data + ETH_ALEN;
  6120. int unicast = !is_multicast_ether_addr(daddr);
  6121. if (!(priv->status & STATUS_ASSOCIATED))
  6122. return 0;
  6123. qos_data = &priv->assoc_network->qos_data;
  6124. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  6125. if (unicast == 0)
  6126. qos_data->active = 0;
  6127. else
  6128. qos_data->active = qos_data->supported;
  6129. }
  6130. active = qos_data->active;
  6131. supported = qos_data->supported;
  6132. IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
  6133. "unicast %d\n",
  6134. priv->qos_data.qos_enable, active, supported, unicast);
  6135. if (active && priv->qos_data.qos_enable)
  6136. return 1;
  6137. return 0;
  6138. }
  6139. /*
  6140. * add QoS parameter to the TX command
  6141. */
  6142. static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
  6143. u16 priority,
  6144. struct tfd_data *tfd)
  6145. {
  6146. int tx_queue_id = 0;
  6147. tx_queue_id = from_priority_to_tx_queue[priority] - 1;
  6148. tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
  6149. if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
  6150. tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
  6151. tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
  6152. }
  6153. return 0;
  6154. }
  6155. /*
  6156. * background support to run QoS activate functionality
  6157. */
  6158. static void ipw_bg_qos_activate(struct work_struct *work)
  6159. {
  6160. struct ipw_priv *priv =
  6161. container_of(work, struct ipw_priv, qos_activate);
  6162. if (priv == NULL)
  6163. return;
  6164. mutex_lock(&priv->mutex);
  6165. if (priv->status & STATUS_ASSOCIATED)
  6166. ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
  6167. mutex_unlock(&priv->mutex);
  6168. }
  6169. static int ipw_handle_probe_response(struct net_device *dev,
  6170. struct ieee80211_probe_response *resp,
  6171. struct ieee80211_network *network)
  6172. {
  6173. struct ipw_priv *priv = ieee80211_priv(dev);
  6174. int active_network = ((priv->status & STATUS_ASSOCIATED) &&
  6175. (network == priv->assoc_network));
  6176. ipw_qos_handle_probe_response(priv, active_network, network);
  6177. return 0;
  6178. }
  6179. static int ipw_handle_beacon(struct net_device *dev,
  6180. struct ieee80211_beacon *resp,
  6181. struct ieee80211_network *network)
  6182. {
  6183. struct ipw_priv *priv = ieee80211_priv(dev);
  6184. int active_network = ((priv->status & STATUS_ASSOCIATED) &&
  6185. (network == priv->assoc_network));
  6186. ipw_qos_handle_probe_response(priv, active_network, network);
  6187. return 0;
  6188. }
  6189. static int ipw_handle_assoc_response(struct net_device *dev,
  6190. struct ieee80211_assoc_response *resp,
  6191. struct ieee80211_network *network)
  6192. {
  6193. struct ipw_priv *priv = ieee80211_priv(dev);
  6194. ipw_qos_association_resp(priv, network);
  6195. return 0;
  6196. }
  6197. static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
  6198. *qos_param)
  6199. {
  6200. return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
  6201. sizeof(*qos_param) * 3, qos_param);
  6202. }
  6203. static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
  6204. *qos_param)
  6205. {
  6206. return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
  6207. qos_param);
  6208. }
  6209. #endif /* CONFIG_IPW2200_QOS */
  6210. static int ipw_associate_network(struct ipw_priv *priv,
  6211. struct ieee80211_network *network,
  6212. struct ipw_supported_rates *rates, int roaming)
  6213. {
  6214. int err;
  6215. DECLARE_SSID_BUF(ssid);
  6216. if (priv->config & CFG_FIXED_RATE)
  6217. ipw_set_fixed_rate(priv, network->mode);
  6218. if (!(priv->config & CFG_STATIC_ESSID)) {
  6219. priv->essid_len = min(network->ssid_len,
  6220. (u8) IW_ESSID_MAX_SIZE);
  6221. memcpy(priv->essid, network->ssid, priv->essid_len);
  6222. }
  6223. network->last_associate = jiffies;
  6224. memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
  6225. priv->assoc_request.channel = network->channel;
  6226. priv->assoc_request.auth_key = 0;
  6227. if ((priv->capability & CAP_PRIVACY_ON) &&
  6228. (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
  6229. priv->assoc_request.auth_type = AUTH_SHARED_KEY;
  6230. priv->assoc_request.auth_key = priv->ieee->sec.active_key;
  6231. if (priv->ieee->sec.level == SEC_LEVEL_1)
  6232. ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
  6233. } else if ((priv->capability & CAP_PRIVACY_ON) &&
  6234. (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
  6235. priv->assoc_request.auth_type = AUTH_LEAP;
  6236. else
  6237. priv->assoc_request.auth_type = AUTH_OPEN;
  6238. if (priv->ieee->wpa_ie_len) {
  6239. priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
  6240. ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
  6241. priv->ieee->wpa_ie_len);
  6242. }
  6243. /*
  6244. * It is valid for our ieee device to support multiple modes, but
  6245. * when it comes to associating to a given network we have to choose
  6246. * just one mode.
  6247. */
  6248. if (network->mode & priv->ieee->mode & IEEE_A)
  6249. priv->assoc_request.ieee_mode = IPW_A_MODE;
  6250. else if (network->mode & priv->ieee->mode & IEEE_G)
  6251. priv->assoc_request.ieee_mode = IPW_G_MODE;
  6252. else if (network->mode & priv->ieee->mode & IEEE_B)
  6253. priv->assoc_request.ieee_mode = IPW_B_MODE;
  6254. priv->assoc_request.capability = cpu_to_le16(network->capability);
  6255. if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
  6256. && !(priv->config & CFG_PREAMBLE_LONG)) {
  6257. priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
  6258. } else {
  6259. priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
  6260. /* Clear the short preamble if we won't be supporting it */
  6261. priv->assoc_request.capability &=
  6262. ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
  6263. }
  6264. /* Clear capability bits that aren't used in Ad Hoc */
  6265. if (priv->ieee->iw_mode == IW_MODE_ADHOC)
  6266. priv->assoc_request.capability &=
  6267. ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
  6268. IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
  6269. "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
  6270. roaming ? "Rea" : "A",
  6271. print_ssid(ssid, priv->essid, priv->essid_len),
  6272. network->channel,
  6273. ipw_modes[priv->assoc_request.ieee_mode],
  6274. rates->num_rates,
  6275. (priv->assoc_request.preamble_length ==
  6276. DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
  6277. network->capability &
  6278. WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
  6279. priv->capability & CAP_PRIVACY_ON ? "on " : "off",
  6280. priv->capability & CAP_PRIVACY_ON ?
  6281. (priv->capability & CAP_SHARED_KEY ? "(shared)" :
  6282. "(open)") : "",
  6283. priv->capability & CAP_PRIVACY_ON ? " key=" : "",
  6284. priv->capability & CAP_PRIVACY_ON ?
  6285. '1' + priv->ieee->sec.active_key : '.',
  6286. priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
  6287. priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
  6288. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  6289. (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
  6290. priv->assoc_request.assoc_type = HC_IBSS_START;
  6291. priv->assoc_request.assoc_tsf_msw = 0;
  6292. priv->assoc_request.assoc_tsf_lsw = 0;
  6293. } else {
  6294. if (unlikely(roaming))
  6295. priv->assoc_request.assoc_type = HC_REASSOCIATE;
  6296. else
  6297. priv->assoc_request.assoc_type = HC_ASSOCIATE;
  6298. priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
  6299. priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
  6300. }
  6301. memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
  6302. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  6303. memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
  6304. priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
  6305. } else {
  6306. memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
  6307. priv->assoc_request.atim_window = 0;
  6308. }
  6309. priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
  6310. err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
  6311. if (err) {
  6312. IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
  6313. return err;
  6314. }
  6315. rates->ieee_mode = priv->assoc_request.ieee_mode;
  6316. rates->purpose = IPW_RATE_CONNECT;
  6317. ipw_send_supported_rates(priv, rates);
  6318. if (priv->assoc_request.ieee_mode == IPW_G_MODE)
  6319. priv->sys_config.dot11g_auto_detection = 1;
  6320. else
  6321. priv->sys_config.dot11g_auto_detection = 0;
  6322. if (priv->ieee->iw_mode == IW_MODE_ADHOC)
  6323. priv->sys_config.answer_broadcast_ssid_probe = 1;
  6324. else
  6325. priv->sys_config.answer_broadcast_ssid_probe = 0;
  6326. err = ipw_send_system_config(priv);
  6327. if (err) {
  6328. IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
  6329. return err;
  6330. }
  6331. IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
  6332. err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
  6333. if (err) {
  6334. IPW_DEBUG_HC("Attempt to send associate command failed.\n");
  6335. return err;
  6336. }
  6337. /*
  6338. * If preemption is enabled, it is possible for the association
  6339. * to complete before we return from ipw_send_associate. Therefore
  6340. * we have to be sure and update our priviate data first.
  6341. */
  6342. priv->channel = network->channel;
  6343. memcpy(priv->bssid, network->bssid, ETH_ALEN);
  6344. priv->status |= STATUS_ASSOCIATING;
  6345. priv->status &= ~STATUS_SECURITY_UPDATED;
  6346. priv->assoc_network = network;
  6347. #ifdef CONFIG_IPW2200_QOS
  6348. ipw_qos_association(priv, network);
  6349. #endif
  6350. err = ipw_send_associate(priv, &priv->assoc_request);
  6351. if (err) {
  6352. IPW_DEBUG_HC("Attempt to send associate command failed.\n");
  6353. return err;
  6354. }
  6355. IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
  6356. print_ssid(ssid, priv->essid, priv->essid_len),
  6357. priv->bssid);
  6358. return 0;
  6359. }
  6360. static void ipw_roam(void *data)
  6361. {
  6362. struct ipw_priv *priv = data;
  6363. struct ieee80211_network *network = NULL;
  6364. struct ipw_network_match match = {
  6365. .network = priv->assoc_network
  6366. };
  6367. /* The roaming process is as follows:
  6368. *
  6369. * 1. Missed beacon threshold triggers the roaming process by
  6370. * setting the status ROAM bit and requesting a scan.
  6371. * 2. When the scan completes, it schedules the ROAM work
  6372. * 3. The ROAM work looks at all of the known networks for one that
  6373. * is a better network than the currently associated. If none
  6374. * found, the ROAM process is over (ROAM bit cleared)
  6375. * 4. If a better network is found, a disassociation request is
  6376. * sent.
  6377. * 5. When the disassociation completes, the roam work is again
  6378. * scheduled. The second time through, the driver is no longer
  6379. * associated, and the newly selected network is sent an
  6380. * association request.
  6381. * 6. At this point ,the roaming process is complete and the ROAM
  6382. * status bit is cleared.
  6383. */
  6384. /* If we are no longer associated, and the roaming bit is no longer
  6385. * set, then we are not actively roaming, so just return */
  6386. if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
  6387. return;
  6388. if (priv->status & STATUS_ASSOCIATED) {
  6389. /* First pass through ROAM process -- look for a better
  6390. * network */
  6391. unsigned long flags;
  6392. u8 rssi = priv->assoc_network->stats.rssi;
  6393. priv->assoc_network->stats.rssi = -128;
  6394. spin_lock_irqsave(&priv->ieee->lock, flags);
  6395. list_for_each_entry(network, &priv->ieee->network_list, list) {
  6396. if (network != priv->assoc_network)
  6397. ipw_best_network(priv, &match, network, 1);
  6398. }
  6399. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  6400. priv->assoc_network->stats.rssi = rssi;
  6401. if (match.network == priv->assoc_network) {
  6402. IPW_DEBUG_ASSOC("No better APs in this network to "
  6403. "roam to.\n");
  6404. priv->status &= ~STATUS_ROAMING;
  6405. ipw_debug_config(priv);
  6406. return;
  6407. }
  6408. ipw_send_disassociate(priv, 1);
  6409. priv->assoc_network = match.network;
  6410. return;
  6411. }
  6412. /* Second pass through ROAM process -- request association */
  6413. ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
  6414. ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
  6415. priv->status &= ~STATUS_ROAMING;
  6416. }
  6417. static void ipw_bg_roam(struct work_struct *work)
  6418. {
  6419. struct ipw_priv *priv =
  6420. container_of(work, struct ipw_priv, roam);
  6421. mutex_lock(&priv->mutex);
  6422. ipw_roam(priv);
  6423. mutex_unlock(&priv->mutex);
  6424. }
  6425. static int ipw_associate(void *data)
  6426. {
  6427. struct ipw_priv *priv = data;
  6428. struct ieee80211_network *network = NULL;
  6429. struct ipw_network_match match = {
  6430. .network = NULL
  6431. };
  6432. struct ipw_supported_rates *rates;
  6433. struct list_head *element;
  6434. unsigned long flags;
  6435. DECLARE_SSID_BUF(ssid);
  6436. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  6437. IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
  6438. return 0;
  6439. }
  6440. if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  6441. IPW_DEBUG_ASSOC("Not attempting association (already in "
  6442. "progress)\n");
  6443. return 0;
  6444. }
  6445. if (priv->status & STATUS_DISASSOCIATING) {
  6446. IPW_DEBUG_ASSOC("Not attempting association (in "
  6447. "disassociating)\n ");
  6448. queue_work(priv->workqueue, &priv->associate);
  6449. return 0;
  6450. }
  6451. if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
  6452. IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
  6453. "initialized)\n");
  6454. return 0;
  6455. }
  6456. if (!(priv->config & CFG_ASSOCIATE) &&
  6457. !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
  6458. IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
  6459. return 0;
  6460. }
  6461. /* Protect our use of the network_list */
  6462. spin_lock_irqsave(&priv->ieee->lock, flags);
  6463. list_for_each_entry(network, &priv->ieee->network_list, list)
  6464. ipw_best_network(priv, &match, network, 0);
  6465. network = match.network;
  6466. rates = &match.rates;
  6467. if (network == NULL &&
  6468. priv->ieee->iw_mode == IW_MODE_ADHOC &&
  6469. priv->config & CFG_ADHOC_CREATE &&
  6470. priv->config & CFG_STATIC_ESSID &&
  6471. priv->config & CFG_STATIC_CHANNEL) {
  6472. /* Use oldest network if the free list is empty */
  6473. if (list_empty(&priv->ieee->network_free_list)) {
  6474. struct ieee80211_network *oldest = NULL;
  6475. struct ieee80211_network *target;
  6476. list_for_each_entry(target, &priv->ieee->network_list, list) {
  6477. if ((oldest == NULL) ||
  6478. (target->last_scanned < oldest->last_scanned))
  6479. oldest = target;
  6480. }
  6481. /* If there are no more slots, expire the oldest */
  6482. list_del(&oldest->list);
  6483. target = oldest;
  6484. IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
  6485. "network list.\n",
  6486. print_ssid(ssid, target->ssid,
  6487. target->ssid_len),
  6488. target->bssid);
  6489. list_add_tail(&target->list,
  6490. &priv->ieee->network_free_list);
  6491. }
  6492. element = priv->ieee->network_free_list.next;
  6493. network = list_entry(element, struct ieee80211_network, list);
  6494. ipw_adhoc_create(priv, network);
  6495. rates = &priv->rates;
  6496. list_del(element);
  6497. list_add_tail(&network->list, &priv->ieee->network_list);
  6498. }
  6499. spin_unlock_irqrestore(&priv->ieee->lock, flags);
  6500. /* If we reached the end of the list, then we don't have any valid
  6501. * matching APs */
  6502. if (!network) {
  6503. ipw_debug_config(priv);
  6504. if (!(priv->status & STATUS_SCANNING)) {
  6505. if (!(priv->config & CFG_SPEED_SCAN))
  6506. queue_delayed_work(priv->workqueue,
  6507. &priv->request_scan,
  6508. SCAN_INTERVAL);
  6509. else
  6510. queue_delayed_work(priv->workqueue,
  6511. &priv->request_scan, 0);
  6512. }
  6513. return 0;
  6514. }
  6515. ipw_associate_network(priv, network, rates, 0);
  6516. return 1;
  6517. }
  6518. static void ipw_bg_associate(struct work_struct *work)
  6519. {
  6520. struct ipw_priv *priv =
  6521. container_of(work, struct ipw_priv, associate);
  6522. mutex_lock(&priv->mutex);
  6523. ipw_associate(priv);
  6524. mutex_unlock(&priv->mutex);
  6525. }
  6526. static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
  6527. struct sk_buff *skb)
  6528. {
  6529. struct ieee80211_hdr *hdr;
  6530. u16 fc;
  6531. hdr = (struct ieee80211_hdr *)skb->data;
  6532. fc = le16_to_cpu(hdr->frame_control);
  6533. if (!(fc & IEEE80211_FCTL_PROTECTED))
  6534. return;
  6535. fc &= ~IEEE80211_FCTL_PROTECTED;
  6536. hdr->frame_control = cpu_to_le16(fc);
  6537. switch (priv->ieee->sec.level) {
  6538. case SEC_LEVEL_3:
  6539. /* Remove CCMP HDR */
  6540. memmove(skb->data + IEEE80211_3ADDR_LEN,
  6541. skb->data + IEEE80211_3ADDR_LEN + 8,
  6542. skb->len - IEEE80211_3ADDR_LEN - 8);
  6543. skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
  6544. break;
  6545. case SEC_LEVEL_2:
  6546. break;
  6547. case SEC_LEVEL_1:
  6548. /* Remove IV */
  6549. memmove(skb->data + IEEE80211_3ADDR_LEN,
  6550. skb->data + IEEE80211_3ADDR_LEN + 4,
  6551. skb->len - IEEE80211_3ADDR_LEN - 4);
  6552. skb_trim(skb, skb->len - 8); /* IV + ICV */
  6553. break;
  6554. case SEC_LEVEL_0:
  6555. break;
  6556. default:
  6557. printk(KERN_ERR "Unknow security level %d\n",
  6558. priv->ieee->sec.level);
  6559. break;
  6560. }
  6561. }
  6562. static void ipw_handle_data_packet(struct ipw_priv *priv,
  6563. struct ipw_rx_mem_buffer *rxb,
  6564. struct ieee80211_rx_stats *stats)
  6565. {
  6566. struct net_device *dev = priv->net_dev;
  6567. struct ieee80211_hdr_4addr *hdr;
  6568. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  6569. /* We received data from the HW, so stop the watchdog */
  6570. dev->trans_start = jiffies;
  6571. /* We only process data packets if the
  6572. * interface is open */
  6573. if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
  6574. skb_tailroom(rxb->skb))) {
  6575. dev->stats.rx_errors++;
  6576. priv->wstats.discard.misc++;
  6577. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  6578. return;
  6579. } else if (unlikely(!netif_running(priv->net_dev))) {
  6580. dev->stats.rx_dropped++;
  6581. priv->wstats.discard.misc++;
  6582. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  6583. return;
  6584. }
  6585. /* Advance skb->data to the start of the actual payload */
  6586. skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
  6587. /* Set the size of the skb to the size of the frame */
  6588. skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
  6589. IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
  6590. /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
  6591. hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
  6592. if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
  6593. (is_multicast_ether_addr(hdr->addr1) ?
  6594. !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
  6595. ipw_rebuild_decrypted_skb(priv, rxb->skb);
  6596. if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
  6597. dev->stats.rx_errors++;
  6598. else { /* ieee80211_rx succeeded, so it now owns the SKB */
  6599. rxb->skb = NULL;
  6600. __ipw_led_activity_on(priv);
  6601. }
  6602. }
  6603. #ifdef CONFIG_IPW2200_RADIOTAP
  6604. static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
  6605. struct ipw_rx_mem_buffer *rxb,
  6606. struct ieee80211_rx_stats *stats)
  6607. {
  6608. struct net_device *dev = priv->net_dev;
  6609. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  6610. struct ipw_rx_frame *frame = &pkt->u.frame;
  6611. /* initial pull of some data */
  6612. u16 received_channel = frame->received_channel;
  6613. u8 antennaAndPhy = frame->antennaAndPhy;
  6614. s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
  6615. u16 pktrate = frame->rate;
  6616. /* Magic struct that slots into the radiotap header -- no reason
  6617. * to build this manually element by element, we can write it much
  6618. * more efficiently than we can parse it. ORDER MATTERS HERE */
  6619. struct ipw_rt_hdr *ipw_rt;
  6620. short len = le16_to_cpu(pkt->u.frame.length);
  6621. /* We received data from the HW, so stop the watchdog */
  6622. dev->trans_start = jiffies;
  6623. /* We only process data packets if the
  6624. * interface is open */
  6625. if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
  6626. skb_tailroom(rxb->skb))) {
  6627. dev->stats.rx_errors++;
  6628. priv->wstats.discard.misc++;
  6629. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  6630. return;
  6631. } else if (unlikely(!netif_running(priv->net_dev))) {
  6632. dev->stats.rx_dropped++;
  6633. priv->wstats.discard.misc++;
  6634. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  6635. return;
  6636. }
  6637. /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
  6638. * that now */
  6639. if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
  6640. /* FIXME: Should alloc bigger skb instead */
  6641. dev->stats.rx_dropped++;
  6642. priv->wstats.discard.misc++;
  6643. IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
  6644. return;
  6645. }
  6646. /* copy the frame itself */
  6647. memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
  6648. rxb->skb->data + IPW_RX_FRAME_SIZE, len);
  6649. ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
  6650. ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
  6651. ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
  6652. ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
  6653. /* Big bitfield of all the fields we provide in radiotap */
  6654. ipw_rt->rt_hdr.it_present = cpu_to_le32(
  6655. (1 << IEEE80211_RADIOTAP_TSFT) |
  6656. (1 << IEEE80211_RADIOTAP_FLAGS) |
  6657. (1 << IEEE80211_RADIOTAP_RATE) |
  6658. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  6659. (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
  6660. (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
  6661. (1 << IEEE80211_RADIOTAP_ANTENNA));
  6662. /* Zero the flags, we'll add to them as we go */
  6663. ipw_rt->rt_flags = 0;
  6664. ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
  6665. frame->parent_tsf[2] << 16 |
  6666. frame->parent_tsf[1] << 8 |
  6667. frame->parent_tsf[0]);
  6668. /* Convert signal to DBM */
  6669. ipw_rt->rt_dbmsignal = antsignal;
  6670. ipw_rt->rt_dbmnoise = frame->noise;
  6671. /* Convert the channel data and set the flags */
  6672. ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
  6673. if (received_channel > 14) { /* 802.11a */
  6674. ipw_rt->rt_chbitmask =
  6675. cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
  6676. } else if (antennaAndPhy & 32) { /* 802.11b */
  6677. ipw_rt->rt_chbitmask =
  6678. cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
  6679. } else { /* 802.11g */
  6680. ipw_rt->rt_chbitmask =
  6681. cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
  6682. }
  6683. /* set the rate in multiples of 500k/s */
  6684. switch (pktrate) {
  6685. case IPW_TX_RATE_1MB:
  6686. ipw_rt->rt_rate = 2;
  6687. break;
  6688. case IPW_TX_RATE_2MB:
  6689. ipw_rt->rt_rate = 4;
  6690. break;
  6691. case IPW_TX_RATE_5MB:
  6692. ipw_rt->rt_rate = 10;
  6693. break;
  6694. case IPW_TX_RATE_6MB:
  6695. ipw_rt->rt_rate = 12;
  6696. break;
  6697. case IPW_TX_RATE_9MB:
  6698. ipw_rt->rt_rate = 18;
  6699. break;
  6700. case IPW_TX_RATE_11MB:
  6701. ipw_rt->rt_rate = 22;
  6702. break;
  6703. case IPW_TX_RATE_12MB:
  6704. ipw_rt->rt_rate = 24;
  6705. break;
  6706. case IPW_TX_RATE_18MB:
  6707. ipw_rt->rt_rate = 36;
  6708. break;
  6709. case IPW_TX_RATE_24MB:
  6710. ipw_rt->rt_rate = 48;
  6711. break;
  6712. case IPW_TX_RATE_36MB:
  6713. ipw_rt->rt_rate = 72;
  6714. break;
  6715. case IPW_TX_RATE_48MB:
  6716. ipw_rt->rt_rate = 96;
  6717. break;
  6718. case IPW_TX_RATE_54MB:
  6719. ipw_rt->rt_rate = 108;
  6720. break;
  6721. default:
  6722. ipw_rt->rt_rate = 0;
  6723. break;
  6724. }
  6725. /* antenna number */
  6726. ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
  6727. /* set the preamble flag if we have it */
  6728. if ((antennaAndPhy & 64))
  6729. ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
  6730. /* Set the size of the skb to the size of the frame */
  6731. skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
  6732. IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
  6733. if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
  6734. dev->stats.rx_errors++;
  6735. else { /* ieee80211_rx succeeded, so it now owns the SKB */
  6736. rxb->skb = NULL;
  6737. /* no LED during capture */
  6738. }
  6739. }
  6740. #endif
  6741. #ifdef CONFIG_IPW2200_PROMISCUOUS
  6742. #define ieee80211_is_probe_response(fc) \
  6743. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
  6744. (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
  6745. #define ieee80211_is_management(fc) \
  6746. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
  6747. #define ieee80211_is_control(fc) \
  6748. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
  6749. #define ieee80211_is_data(fc) \
  6750. ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
  6751. #define ieee80211_is_assoc_request(fc) \
  6752. ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
  6753. #define ieee80211_is_reassoc_request(fc) \
  6754. ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
  6755. static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
  6756. struct ipw_rx_mem_buffer *rxb,
  6757. struct ieee80211_rx_stats *stats)
  6758. {
  6759. struct net_device *dev = priv->prom_net_dev;
  6760. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
  6761. struct ipw_rx_frame *frame = &pkt->u.frame;
  6762. struct ipw_rt_hdr *ipw_rt;
  6763. /* First cache any information we need before we overwrite
  6764. * the information provided in the skb from the hardware */
  6765. struct ieee80211_hdr *hdr;
  6766. u16 channel = frame->received_channel;
  6767. u8 phy_flags = frame->antennaAndPhy;
  6768. s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
  6769. s8 noise = frame->noise;
  6770. u8 rate = frame->rate;
  6771. short len = le16_to_cpu(pkt->u.frame.length);
  6772. struct sk_buff *skb;
  6773. int hdr_only = 0;
  6774. u16 filter = priv->prom_priv->filter;
  6775. /* If the filter is set to not include Rx frames then return */
  6776. if (filter & IPW_PROM_NO_RX)
  6777. return;
  6778. /* We received data from the HW, so stop the watchdog */
  6779. dev->trans_start = jiffies;
  6780. if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
  6781. dev->stats.rx_errors++;
  6782. IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
  6783. return;
  6784. }
  6785. /* We only process data packets if the interface is open */
  6786. if (unlikely(!netif_running(dev))) {
  6787. dev->stats.rx_dropped++;
  6788. IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
  6789. return;
  6790. }
  6791. /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
  6792. * that now */
  6793. if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
  6794. /* FIXME: Should alloc bigger skb instead */
  6795. dev->stats.rx_dropped++;
  6796. IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
  6797. return;
  6798. }
  6799. hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
  6800. if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
  6801. if (filter & IPW_PROM_NO_MGMT)
  6802. return;
  6803. if (filter & IPW_PROM_MGMT_HEADER_ONLY)
  6804. hdr_only = 1;
  6805. } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
  6806. if (filter & IPW_PROM_NO_CTL)
  6807. return;
  6808. if (filter & IPW_PROM_CTL_HEADER_ONLY)
  6809. hdr_only = 1;
  6810. } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
  6811. if (filter & IPW_PROM_NO_DATA)
  6812. return;
  6813. if (filter & IPW_PROM_DATA_HEADER_ONLY)
  6814. hdr_only = 1;
  6815. }
  6816. /* Copy the SKB since this is for the promiscuous side */
  6817. skb = skb_copy(rxb->skb, GFP_ATOMIC);
  6818. if (skb == NULL) {
  6819. IPW_ERROR("skb_clone failed for promiscuous copy.\n");
  6820. return;
  6821. }
  6822. /* copy the frame data to write after where the radiotap header goes */
  6823. ipw_rt = (void *)skb->data;
  6824. if (hdr_only)
  6825. len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
  6826. memcpy(ipw_rt->payload, hdr, len);
  6827. ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
  6828. ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
  6829. ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
  6830. /* Set the size of the skb to the size of the frame */
  6831. skb_put(skb, sizeof(*ipw_rt) + len);
  6832. /* Big bitfield of all the fields we provide in radiotap */
  6833. ipw_rt->rt_hdr.it_present = cpu_to_le32(
  6834. (1 << IEEE80211_RADIOTAP_TSFT) |
  6835. (1 << IEEE80211_RADIOTAP_FLAGS) |
  6836. (1 << IEEE80211_RADIOTAP_RATE) |
  6837. (1 << IEEE80211_RADIOTAP_CHANNEL) |
  6838. (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
  6839. (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
  6840. (1 << IEEE80211_RADIOTAP_ANTENNA));
  6841. /* Zero the flags, we'll add to them as we go */
  6842. ipw_rt->rt_flags = 0;
  6843. ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
  6844. frame->parent_tsf[2] << 16 |
  6845. frame->parent_tsf[1] << 8 |
  6846. frame->parent_tsf[0]);
  6847. /* Convert to DBM */
  6848. ipw_rt->rt_dbmsignal = signal;
  6849. ipw_rt->rt_dbmnoise = noise;
  6850. /* Convert the channel data and set the flags */
  6851. ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
  6852. if (channel > 14) { /* 802.11a */
  6853. ipw_rt->rt_chbitmask =
  6854. cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
  6855. } else if (phy_flags & (1 << 5)) { /* 802.11b */
  6856. ipw_rt->rt_chbitmask =
  6857. cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
  6858. } else { /* 802.11g */
  6859. ipw_rt->rt_chbitmask =
  6860. cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
  6861. }
  6862. /* set the rate in multiples of 500k/s */
  6863. switch (rate) {
  6864. case IPW_TX_RATE_1MB:
  6865. ipw_rt->rt_rate = 2;
  6866. break;
  6867. case IPW_TX_RATE_2MB:
  6868. ipw_rt->rt_rate = 4;
  6869. break;
  6870. case IPW_TX_RATE_5MB:
  6871. ipw_rt->rt_rate = 10;
  6872. break;
  6873. case IPW_TX_RATE_6MB:
  6874. ipw_rt->rt_rate = 12;
  6875. break;
  6876. case IPW_TX_RATE_9MB:
  6877. ipw_rt->rt_rate = 18;
  6878. break;
  6879. case IPW_TX_RATE_11MB:
  6880. ipw_rt->rt_rate = 22;
  6881. break;
  6882. case IPW_TX_RATE_12MB:
  6883. ipw_rt->rt_rate = 24;
  6884. break;
  6885. case IPW_TX_RATE_18MB:
  6886. ipw_rt->rt_rate = 36;
  6887. break;
  6888. case IPW_TX_RATE_24MB:
  6889. ipw_rt->rt_rate = 48;
  6890. break;
  6891. case IPW_TX_RATE_36MB:
  6892. ipw_rt->rt_rate = 72;
  6893. break;
  6894. case IPW_TX_RATE_48MB:
  6895. ipw_rt->rt_rate = 96;
  6896. break;
  6897. case IPW_TX_RATE_54MB:
  6898. ipw_rt->rt_rate = 108;
  6899. break;
  6900. default:
  6901. ipw_rt->rt_rate = 0;
  6902. break;
  6903. }
  6904. /* antenna number */
  6905. ipw_rt->rt_antenna = (phy_flags & 3);
  6906. /* set the preamble flag if we have it */
  6907. if (phy_flags & (1 << 6))
  6908. ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
  6909. IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
  6910. if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
  6911. dev->stats.rx_errors++;
  6912. dev_kfree_skb_any(skb);
  6913. }
  6914. }
  6915. #endif
  6916. static int is_network_packet(struct ipw_priv *priv,
  6917. struct ieee80211_hdr_4addr *header)
  6918. {
  6919. /* Filter incoming packets to determine if they are targetted toward
  6920. * this network, discarding packets coming from ourselves */
  6921. switch (priv->ieee->iw_mode) {
  6922. case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
  6923. /* packets from our adapter are dropped (echo) */
  6924. if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
  6925. return 0;
  6926. /* {broad,multi}cast packets to our BSSID go through */
  6927. if (is_multicast_ether_addr(header->addr1))
  6928. return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
  6929. /* packets to our adapter go through */
  6930. return !memcmp(header->addr1, priv->net_dev->dev_addr,
  6931. ETH_ALEN);
  6932. case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
  6933. /* packets from our adapter are dropped (echo) */
  6934. if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
  6935. return 0;
  6936. /* {broad,multi}cast packets to our BSS go through */
  6937. if (is_multicast_ether_addr(header->addr1))
  6938. return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
  6939. /* packets to our adapter go through */
  6940. return !memcmp(header->addr1, priv->net_dev->dev_addr,
  6941. ETH_ALEN);
  6942. }
  6943. return 1;
  6944. }
  6945. #define IPW_PACKET_RETRY_TIME HZ
  6946. static int is_duplicate_packet(struct ipw_priv *priv,
  6947. struct ieee80211_hdr_4addr *header)
  6948. {
  6949. u16 sc = le16_to_cpu(header->seq_ctl);
  6950. u16 seq = WLAN_GET_SEQ_SEQ(sc);
  6951. u16 frag = WLAN_GET_SEQ_FRAG(sc);
  6952. u16 *last_seq, *last_frag;
  6953. unsigned long *last_time;
  6954. switch (priv->ieee->iw_mode) {
  6955. case IW_MODE_ADHOC:
  6956. {
  6957. struct list_head *p;
  6958. struct ipw_ibss_seq *entry = NULL;
  6959. u8 *mac = header->addr2;
  6960. int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
  6961. __list_for_each(p, &priv->ibss_mac_hash[index]) {
  6962. entry =
  6963. list_entry(p, struct ipw_ibss_seq, list);
  6964. if (!memcmp(entry->mac, mac, ETH_ALEN))
  6965. break;
  6966. }
  6967. if (p == &priv->ibss_mac_hash[index]) {
  6968. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  6969. if (!entry) {
  6970. IPW_ERROR
  6971. ("Cannot malloc new mac entry\n");
  6972. return 0;
  6973. }
  6974. memcpy(entry->mac, mac, ETH_ALEN);
  6975. entry->seq_num = seq;
  6976. entry->frag_num = frag;
  6977. entry->packet_time = jiffies;
  6978. list_add(&entry->list,
  6979. &priv->ibss_mac_hash[index]);
  6980. return 0;
  6981. }
  6982. last_seq = &entry->seq_num;
  6983. last_frag = &entry->frag_num;
  6984. last_time = &entry->packet_time;
  6985. break;
  6986. }
  6987. case IW_MODE_INFRA:
  6988. last_seq = &priv->last_seq_num;
  6989. last_frag = &priv->last_frag_num;
  6990. last_time = &priv->last_packet_time;
  6991. break;
  6992. default:
  6993. return 0;
  6994. }
  6995. if ((*last_seq == seq) &&
  6996. time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
  6997. if (*last_frag == frag)
  6998. goto drop;
  6999. if (*last_frag + 1 != frag)
  7000. /* out-of-order fragment */
  7001. goto drop;
  7002. } else
  7003. *last_seq = seq;
  7004. *last_frag = frag;
  7005. *last_time = jiffies;
  7006. return 0;
  7007. drop:
  7008. /* Comment this line now since we observed the card receives
  7009. * duplicate packets but the FCTL_RETRY bit is not set in the
  7010. * IBSS mode with fragmentation enabled.
  7011. BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
  7012. return 1;
  7013. }
  7014. static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
  7015. struct ipw_rx_mem_buffer *rxb,
  7016. struct ieee80211_rx_stats *stats)
  7017. {
  7018. struct sk_buff *skb = rxb->skb;
  7019. struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
  7020. struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
  7021. (skb->data + IPW_RX_FRAME_SIZE);
  7022. ieee80211_rx_mgt(priv->ieee, header, stats);
  7023. if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
  7024. ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
  7025. IEEE80211_STYPE_PROBE_RESP) ||
  7026. (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
  7027. IEEE80211_STYPE_BEACON))) {
  7028. if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
  7029. ipw_add_station(priv, header->addr2);
  7030. }
  7031. if (priv->config & CFG_NET_STATS) {
  7032. IPW_DEBUG_HC("sending stat packet\n");
  7033. /* Set the size of the skb to the size of the full
  7034. * ipw header and 802.11 frame */
  7035. skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
  7036. IPW_RX_FRAME_SIZE);
  7037. /* Advance past the ipw packet header to the 802.11 frame */
  7038. skb_pull(skb, IPW_RX_FRAME_SIZE);
  7039. /* Push the ieee80211_rx_stats before the 802.11 frame */
  7040. memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
  7041. skb->dev = priv->ieee->dev;
  7042. /* Point raw at the ieee80211_stats */
  7043. skb_reset_mac_header(skb);
  7044. skb->pkt_type = PACKET_OTHERHOST;
  7045. skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
  7046. memset(skb->cb, 0, sizeof(rxb->skb->cb));
  7047. netif_rx(skb);
  7048. rxb->skb = NULL;
  7049. }
  7050. }
  7051. /*
  7052. * Main entry function for recieving a packet with 80211 headers. This
  7053. * should be called when ever the FW has notified us that there is a new
  7054. * skb in the recieve queue.
  7055. */
  7056. static void ipw_rx(struct ipw_priv *priv)
  7057. {
  7058. struct ipw_rx_mem_buffer *rxb;
  7059. struct ipw_rx_packet *pkt;
  7060. struct ieee80211_hdr_4addr *header;
  7061. u32 r, w, i;
  7062. u8 network_packet;
  7063. u8 fill_rx = 0;
  7064. r = ipw_read32(priv, IPW_RX_READ_INDEX);
  7065. w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
  7066. i = priv->rxq->read;
  7067. if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
  7068. fill_rx = 1;
  7069. while (i != r) {
  7070. rxb = priv->rxq->queue[i];
  7071. if (unlikely(rxb == NULL)) {
  7072. printk(KERN_CRIT "Queue not allocated!\n");
  7073. break;
  7074. }
  7075. priv->rxq->queue[i] = NULL;
  7076. pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
  7077. IPW_RX_BUF_SIZE,
  7078. PCI_DMA_FROMDEVICE);
  7079. pkt = (struct ipw_rx_packet *)rxb->skb->data;
  7080. IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
  7081. pkt->header.message_type,
  7082. pkt->header.rx_seq_num, pkt->header.control_bits);
  7083. switch (pkt->header.message_type) {
  7084. case RX_FRAME_TYPE: /* 802.11 frame */ {
  7085. struct ieee80211_rx_stats stats = {
  7086. .rssi = pkt->u.frame.rssi_dbm -
  7087. IPW_RSSI_TO_DBM,
  7088. .signal =
  7089. le16_to_cpu(pkt->u.frame.rssi_dbm) -
  7090. IPW_RSSI_TO_DBM + 0x100,
  7091. .noise =
  7092. le16_to_cpu(pkt->u.frame.noise),
  7093. .rate = pkt->u.frame.rate,
  7094. .mac_time = jiffies,
  7095. .received_channel =
  7096. pkt->u.frame.received_channel,
  7097. .freq =
  7098. (pkt->u.frame.
  7099. control & (1 << 0)) ?
  7100. IEEE80211_24GHZ_BAND :
  7101. IEEE80211_52GHZ_BAND,
  7102. .len = le16_to_cpu(pkt->u.frame.length),
  7103. };
  7104. if (stats.rssi != 0)
  7105. stats.mask |= IEEE80211_STATMASK_RSSI;
  7106. if (stats.signal != 0)
  7107. stats.mask |= IEEE80211_STATMASK_SIGNAL;
  7108. if (stats.noise != 0)
  7109. stats.mask |= IEEE80211_STATMASK_NOISE;
  7110. if (stats.rate != 0)
  7111. stats.mask |= IEEE80211_STATMASK_RATE;
  7112. priv->rx_packets++;
  7113. #ifdef CONFIG_IPW2200_PROMISCUOUS
  7114. if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
  7115. ipw_handle_promiscuous_rx(priv, rxb, &stats);
  7116. #endif
  7117. #ifdef CONFIG_IPW2200_MONITOR
  7118. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  7119. #ifdef CONFIG_IPW2200_RADIOTAP
  7120. ipw_handle_data_packet_monitor(priv,
  7121. rxb,
  7122. &stats);
  7123. #else
  7124. ipw_handle_data_packet(priv, rxb,
  7125. &stats);
  7126. #endif
  7127. break;
  7128. }
  7129. #endif
  7130. header =
  7131. (struct ieee80211_hdr_4addr *)(rxb->skb->
  7132. data +
  7133. IPW_RX_FRAME_SIZE);
  7134. /* TODO: Check Ad-Hoc dest/source and make sure
  7135. * that we are actually parsing these packets
  7136. * correctly -- we should probably use the
  7137. * frame control of the packet and disregard
  7138. * the current iw_mode */
  7139. network_packet =
  7140. is_network_packet(priv, header);
  7141. if (network_packet && priv->assoc_network) {
  7142. priv->assoc_network->stats.rssi =
  7143. stats.rssi;
  7144. priv->exp_avg_rssi =
  7145. exponential_average(priv->exp_avg_rssi,
  7146. stats.rssi, DEPTH_RSSI);
  7147. }
  7148. IPW_DEBUG_RX("Frame: len=%u\n",
  7149. le16_to_cpu(pkt->u.frame.length));
  7150. if (le16_to_cpu(pkt->u.frame.length) <
  7151. ieee80211_get_hdrlen(le16_to_cpu(
  7152. header->frame_ctl))) {
  7153. IPW_DEBUG_DROP
  7154. ("Received packet is too small. "
  7155. "Dropping.\n");
  7156. priv->net_dev->stats.rx_errors++;
  7157. priv->wstats.discard.misc++;
  7158. break;
  7159. }
  7160. switch (WLAN_FC_GET_TYPE
  7161. (le16_to_cpu(header->frame_ctl))) {
  7162. case IEEE80211_FTYPE_MGMT:
  7163. ipw_handle_mgmt_packet(priv, rxb,
  7164. &stats);
  7165. break;
  7166. case IEEE80211_FTYPE_CTL:
  7167. break;
  7168. case IEEE80211_FTYPE_DATA:
  7169. if (unlikely(!network_packet ||
  7170. is_duplicate_packet(priv,
  7171. header)))
  7172. {
  7173. IPW_DEBUG_DROP("Dropping: "
  7174. "%pM, "
  7175. "%pM, "
  7176. "%pM\n",
  7177. header->addr1,
  7178. header->addr2,
  7179. header->addr3);
  7180. break;
  7181. }
  7182. ipw_handle_data_packet(priv, rxb,
  7183. &stats);
  7184. break;
  7185. }
  7186. break;
  7187. }
  7188. case RX_HOST_NOTIFICATION_TYPE:{
  7189. IPW_DEBUG_RX
  7190. ("Notification: subtype=%02X flags=%02X size=%d\n",
  7191. pkt->u.notification.subtype,
  7192. pkt->u.notification.flags,
  7193. le16_to_cpu(pkt->u.notification.size));
  7194. ipw_rx_notification(priv, &pkt->u.notification);
  7195. break;
  7196. }
  7197. default:
  7198. IPW_DEBUG_RX("Bad Rx packet of type %d\n",
  7199. pkt->header.message_type);
  7200. break;
  7201. }
  7202. /* For now we just don't re-use anything. We can tweak this
  7203. * later to try and re-use notification packets and SKBs that
  7204. * fail to Rx correctly */
  7205. if (rxb->skb != NULL) {
  7206. dev_kfree_skb_any(rxb->skb);
  7207. rxb->skb = NULL;
  7208. }
  7209. pci_unmap_single(priv->pci_dev, rxb->dma_addr,
  7210. IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
  7211. list_add_tail(&rxb->list, &priv->rxq->rx_used);
  7212. i = (i + 1) % RX_QUEUE_SIZE;
  7213. /* If there are a lot of unsued frames, restock the Rx queue
  7214. * so the ucode won't assert */
  7215. if (fill_rx) {
  7216. priv->rxq->read = i;
  7217. ipw_rx_queue_replenish(priv);
  7218. }
  7219. }
  7220. /* Backtrack one entry */
  7221. priv->rxq->read = i;
  7222. ipw_rx_queue_restock(priv);
  7223. }
  7224. #define DEFAULT_RTS_THRESHOLD 2304U
  7225. #define MIN_RTS_THRESHOLD 1U
  7226. #define MAX_RTS_THRESHOLD 2304U
  7227. #define DEFAULT_BEACON_INTERVAL 100U
  7228. #define DEFAULT_SHORT_RETRY_LIMIT 7U
  7229. #define DEFAULT_LONG_RETRY_LIMIT 4U
  7230. /**
  7231. * ipw_sw_reset
  7232. * @option: options to control different reset behaviour
  7233. * 0 = reset everything except the 'disable' module_param
  7234. * 1 = reset everything and print out driver info (for probe only)
  7235. * 2 = reset everything
  7236. */
  7237. static int ipw_sw_reset(struct ipw_priv *priv, int option)
  7238. {
  7239. int band, modulation;
  7240. int old_mode = priv->ieee->iw_mode;
  7241. /* Initialize module parameter values here */
  7242. priv->config = 0;
  7243. /* We default to disabling the LED code as right now it causes
  7244. * too many systems to lock up... */
  7245. if (!led)
  7246. priv->config |= CFG_NO_LED;
  7247. if (associate)
  7248. priv->config |= CFG_ASSOCIATE;
  7249. else
  7250. IPW_DEBUG_INFO("Auto associate disabled.\n");
  7251. if (auto_create)
  7252. priv->config |= CFG_ADHOC_CREATE;
  7253. else
  7254. IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
  7255. priv->config &= ~CFG_STATIC_ESSID;
  7256. priv->essid_len = 0;
  7257. memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
  7258. if (disable && option) {
  7259. priv->status |= STATUS_RF_KILL_SW;
  7260. IPW_DEBUG_INFO("Radio disabled.\n");
  7261. }
  7262. if (channel != 0) {
  7263. priv->config |= CFG_STATIC_CHANNEL;
  7264. priv->channel = channel;
  7265. IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
  7266. /* TODO: Validate that provided channel is in range */
  7267. }
  7268. #ifdef CONFIG_IPW2200_QOS
  7269. ipw_qos_init(priv, qos_enable, qos_burst_enable,
  7270. burst_duration_CCK, burst_duration_OFDM);
  7271. #endif /* CONFIG_IPW2200_QOS */
  7272. switch (mode) {
  7273. case 1:
  7274. priv->ieee->iw_mode = IW_MODE_ADHOC;
  7275. priv->net_dev->type = ARPHRD_ETHER;
  7276. break;
  7277. #ifdef CONFIG_IPW2200_MONITOR
  7278. case 2:
  7279. priv->ieee->iw_mode = IW_MODE_MONITOR;
  7280. #ifdef CONFIG_IPW2200_RADIOTAP
  7281. priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  7282. #else
  7283. priv->net_dev->type = ARPHRD_IEEE80211;
  7284. #endif
  7285. break;
  7286. #endif
  7287. default:
  7288. case 0:
  7289. priv->net_dev->type = ARPHRD_ETHER;
  7290. priv->ieee->iw_mode = IW_MODE_INFRA;
  7291. break;
  7292. }
  7293. if (hwcrypto) {
  7294. priv->ieee->host_encrypt = 0;
  7295. priv->ieee->host_encrypt_msdu = 0;
  7296. priv->ieee->host_decrypt = 0;
  7297. priv->ieee->host_mc_decrypt = 0;
  7298. }
  7299. IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
  7300. /* IPW2200/2915 is abled to do hardware fragmentation. */
  7301. priv->ieee->host_open_frag = 0;
  7302. if ((priv->pci_dev->device == 0x4223) ||
  7303. (priv->pci_dev->device == 0x4224)) {
  7304. if (option == 1)
  7305. printk(KERN_INFO DRV_NAME
  7306. ": Detected Intel PRO/Wireless 2915ABG Network "
  7307. "Connection\n");
  7308. priv->ieee->abg_true = 1;
  7309. band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
  7310. modulation = IEEE80211_OFDM_MODULATION |
  7311. IEEE80211_CCK_MODULATION;
  7312. priv->adapter = IPW_2915ABG;
  7313. priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
  7314. } else {
  7315. if (option == 1)
  7316. printk(KERN_INFO DRV_NAME
  7317. ": Detected Intel PRO/Wireless 2200BG Network "
  7318. "Connection\n");
  7319. priv->ieee->abg_true = 0;
  7320. band = IEEE80211_24GHZ_BAND;
  7321. modulation = IEEE80211_OFDM_MODULATION |
  7322. IEEE80211_CCK_MODULATION;
  7323. priv->adapter = IPW_2200BG;
  7324. priv->ieee->mode = IEEE_G | IEEE_B;
  7325. }
  7326. priv->ieee->freq_band = band;
  7327. priv->ieee->modulation = modulation;
  7328. priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
  7329. priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
  7330. priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
  7331. priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
  7332. priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
  7333. priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
  7334. /* If power management is turned on, default to AC mode */
  7335. priv->power_mode = IPW_POWER_AC;
  7336. priv->tx_power = IPW_TX_POWER_DEFAULT;
  7337. return old_mode == priv->ieee->iw_mode;
  7338. }
  7339. /*
  7340. * This file defines the Wireless Extension handlers. It does not
  7341. * define any methods of hardware manipulation and relies on the
  7342. * functions defined in ipw_main to provide the HW interaction.
  7343. *
  7344. * The exception to this is the use of the ipw_get_ordinal()
  7345. * function used to poll the hardware vs. making unecessary calls.
  7346. *
  7347. */
  7348. static int ipw_wx_get_name(struct net_device *dev,
  7349. struct iw_request_info *info,
  7350. union iwreq_data *wrqu, char *extra)
  7351. {
  7352. struct ipw_priv *priv = ieee80211_priv(dev);
  7353. mutex_lock(&priv->mutex);
  7354. if (priv->status & STATUS_RF_KILL_MASK)
  7355. strcpy(wrqu->name, "radio off");
  7356. else if (!(priv->status & STATUS_ASSOCIATED))
  7357. strcpy(wrqu->name, "unassociated");
  7358. else
  7359. snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
  7360. ipw_modes[priv->assoc_request.ieee_mode]);
  7361. IPW_DEBUG_WX("Name: %s\n", wrqu->name);
  7362. mutex_unlock(&priv->mutex);
  7363. return 0;
  7364. }
  7365. static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
  7366. {
  7367. if (channel == 0) {
  7368. IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
  7369. priv->config &= ~CFG_STATIC_CHANNEL;
  7370. IPW_DEBUG_ASSOC("Attempting to associate with new "
  7371. "parameters.\n");
  7372. ipw_associate(priv);
  7373. return 0;
  7374. }
  7375. priv->config |= CFG_STATIC_CHANNEL;
  7376. if (priv->channel == channel) {
  7377. IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
  7378. channel);
  7379. return 0;
  7380. }
  7381. IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
  7382. priv->channel = channel;
  7383. #ifdef CONFIG_IPW2200_MONITOR
  7384. if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
  7385. int i;
  7386. if (priv->status & STATUS_SCANNING) {
  7387. IPW_DEBUG_SCAN("Scan abort triggered due to "
  7388. "channel change.\n");
  7389. ipw_abort_scan(priv);
  7390. }
  7391. for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
  7392. udelay(10);
  7393. if (priv->status & STATUS_SCANNING)
  7394. IPW_DEBUG_SCAN("Still scanning...\n");
  7395. else
  7396. IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
  7397. 1000 - i);
  7398. return 0;
  7399. }
  7400. #endif /* CONFIG_IPW2200_MONITOR */
  7401. /* Network configuration changed -- force [re]association */
  7402. IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
  7403. if (!ipw_disassociate(priv))
  7404. ipw_associate(priv);
  7405. return 0;
  7406. }
  7407. static int ipw_wx_set_freq(struct net_device *dev,
  7408. struct iw_request_info *info,
  7409. union iwreq_data *wrqu, char *extra)
  7410. {
  7411. struct ipw_priv *priv = ieee80211_priv(dev);
  7412. const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
  7413. struct iw_freq *fwrq = &wrqu->freq;
  7414. int ret = 0, i;
  7415. u8 channel, flags;
  7416. int band;
  7417. if (fwrq->m == 0) {
  7418. IPW_DEBUG_WX("SET Freq/Channel -> any\n");
  7419. mutex_lock(&priv->mutex);
  7420. ret = ipw_set_channel(priv, 0);
  7421. mutex_unlock(&priv->mutex);
  7422. return ret;
  7423. }
  7424. /* if setting by freq convert to channel */
  7425. if (fwrq->e == 1) {
  7426. channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
  7427. if (channel == 0)
  7428. return -EINVAL;
  7429. } else
  7430. channel = fwrq->m;
  7431. if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
  7432. return -EINVAL;
  7433. if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
  7434. i = ieee80211_channel_to_index(priv->ieee, channel);
  7435. if (i == -1)
  7436. return -EINVAL;
  7437. flags = (band == IEEE80211_24GHZ_BAND) ?
  7438. geo->bg[i].flags : geo->a[i].flags;
  7439. if (flags & IEEE80211_CH_PASSIVE_ONLY) {
  7440. IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
  7441. return -EINVAL;
  7442. }
  7443. }
  7444. IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
  7445. mutex_lock(&priv->mutex);
  7446. ret = ipw_set_channel(priv, channel);
  7447. mutex_unlock(&priv->mutex);
  7448. return ret;
  7449. }
  7450. static int ipw_wx_get_freq(struct net_device *dev,
  7451. struct iw_request_info *info,
  7452. union iwreq_data *wrqu, char *extra)
  7453. {
  7454. struct ipw_priv *priv = ieee80211_priv(dev);
  7455. wrqu->freq.e = 0;
  7456. /* If we are associated, trying to associate, or have a statically
  7457. * configured CHANNEL then return that; otherwise return ANY */
  7458. mutex_lock(&priv->mutex);
  7459. if (priv->config & CFG_STATIC_CHANNEL ||
  7460. priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
  7461. int i;
  7462. i = ieee80211_channel_to_index(priv->ieee, priv->channel);
  7463. BUG_ON(i == -1);
  7464. wrqu->freq.e = 1;
  7465. switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
  7466. case IEEE80211_52GHZ_BAND:
  7467. wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
  7468. break;
  7469. case IEEE80211_24GHZ_BAND:
  7470. wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
  7471. break;
  7472. default:
  7473. BUG();
  7474. }
  7475. } else
  7476. wrqu->freq.m = 0;
  7477. mutex_unlock(&priv->mutex);
  7478. IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
  7479. return 0;
  7480. }
  7481. static int ipw_wx_set_mode(struct net_device *dev,
  7482. struct iw_request_info *info,
  7483. union iwreq_data *wrqu, char *extra)
  7484. {
  7485. struct ipw_priv *priv = ieee80211_priv(dev);
  7486. int err = 0;
  7487. IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
  7488. switch (wrqu->mode) {
  7489. #ifdef CONFIG_IPW2200_MONITOR
  7490. case IW_MODE_MONITOR:
  7491. #endif
  7492. case IW_MODE_ADHOC:
  7493. case IW_MODE_INFRA:
  7494. break;
  7495. case IW_MODE_AUTO:
  7496. wrqu->mode = IW_MODE_INFRA;
  7497. break;
  7498. default:
  7499. return -EINVAL;
  7500. }
  7501. if (wrqu->mode == priv->ieee->iw_mode)
  7502. return 0;
  7503. mutex_lock(&priv->mutex);
  7504. ipw_sw_reset(priv, 0);
  7505. #ifdef CONFIG_IPW2200_MONITOR
  7506. if (priv->ieee->iw_mode == IW_MODE_MONITOR)
  7507. priv->net_dev->type = ARPHRD_ETHER;
  7508. if (wrqu->mode == IW_MODE_MONITOR)
  7509. #ifdef CONFIG_IPW2200_RADIOTAP
  7510. priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  7511. #else
  7512. priv->net_dev->type = ARPHRD_IEEE80211;
  7513. #endif
  7514. #endif /* CONFIG_IPW2200_MONITOR */
  7515. /* Free the existing firmware and reset the fw_loaded
  7516. * flag so ipw_load() will bring in the new firmware */
  7517. free_firmware();
  7518. priv->ieee->iw_mode = wrqu->mode;
  7519. queue_work(priv->workqueue, &priv->adapter_restart);
  7520. mutex_unlock(&priv->mutex);
  7521. return err;
  7522. }
  7523. static int ipw_wx_get_mode(struct net_device *dev,
  7524. struct iw_request_info *info,
  7525. union iwreq_data *wrqu, char *extra)
  7526. {
  7527. struct ipw_priv *priv = ieee80211_priv(dev);
  7528. mutex_lock(&priv->mutex);
  7529. wrqu->mode = priv->ieee->iw_mode;
  7530. IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
  7531. mutex_unlock(&priv->mutex);
  7532. return 0;
  7533. }
  7534. /* Values are in microsecond */
  7535. static const s32 timeout_duration[] = {
  7536. 350000,
  7537. 250000,
  7538. 75000,
  7539. 37000,
  7540. 25000,
  7541. };
  7542. static const s32 period_duration[] = {
  7543. 400000,
  7544. 700000,
  7545. 1000000,
  7546. 1000000,
  7547. 1000000
  7548. };
  7549. static int ipw_wx_get_range(struct net_device *dev,
  7550. struct iw_request_info *info,
  7551. union iwreq_data *wrqu, char *extra)
  7552. {
  7553. struct ipw_priv *priv = ieee80211_priv(dev);
  7554. struct iw_range *range = (struct iw_range *)extra;
  7555. const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
  7556. int i = 0, j;
  7557. wrqu->data.length = sizeof(*range);
  7558. memset(range, 0, sizeof(*range));
  7559. /* 54Mbs == ~27 Mb/s real (802.11g) */
  7560. range->throughput = 27 * 1000 * 1000;
  7561. range->max_qual.qual = 100;
  7562. /* TODO: Find real max RSSI and stick here */
  7563. range->max_qual.level = 0;
  7564. range->max_qual.noise = 0;
  7565. range->max_qual.updated = 7; /* Updated all three */
  7566. range->avg_qual.qual = 70;
  7567. /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
  7568. range->avg_qual.level = 0; /* FIXME to real average level */
  7569. range->avg_qual.noise = 0;
  7570. range->avg_qual.updated = 7; /* Updated all three */
  7571. mutex_lock(&priv->mutex);
  7572. range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
  7573. for (i = 0; i < range->num_bitrates; i++)
  7574. range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
  7575. 500000;
  7576. range->max_rts = DEFAULT_RTS_THRESHOLD;
  7577. range->min_frag = MIN_FRAG_THRESHOLD;
  7578. range->max_frag = MAX_FRAG_THRESHOLD;
  7579. range->encoding_size[0] = 5;
  7580. range->encoding_size[1] = 13;
  7581. range->num_encoding_sizes = 2;
  7582. range->max_encoding_tokens = WEP_KEYS;
  7583. /* Set the Wireless Extension versions */
  7584. range->we_version_compiled = WIRELESS_EXT;
  7585. range->we_version_source = 18;
  7586. i = 0;
  7587. if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
  7588. for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
  7589. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  7590. (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
  7591. continue;
  7592. range->freq[i].i = geo->bg[j].channel;
  7593. range->freq[i].m = geo->bg[j].freq * 100000;
  7594. range->freq[i].e = 1;
  7595. i++;
  7596. }
  7597. }
  7598. if (priv->ieee->mode & IEEE_A) {
  7599. for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
  7600. if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
  7601. (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
  7602. continue;
  7603. range->freq[i].i = geo->a[j].channel;
  7604. range->freq[i].m = geo->a[j].freq * 100000;
  7605. range->freq[i].e = 1;
  7606. i++;
  7607. }
  7608. }
  7609. range->num_channels = i;
  7610. range->num_frequency = i;
  7611. mutex_unlock(&priv->mutex);
  7612. /* Event capability (kernel + driver) */
  7613. range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
  7614. IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
  7615. IW_EVENT_CAPA_MASK(SIOCGIWAP) |
  7616. IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
  7617. range->event_capa[1] = IW_EVENT_CAPA_K_1;
  7618. range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
  7619. IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
  7620. range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
  7621. IPW_DEBUG_WX("GET Range\n");
  7622. return 0;
  7623. }
  7624. static int ipw_wx_set_wap(struct net_device *dev,
  7625. struct iw_request_info *info,
  7626. union iwreq_data *wrqu, char *extra)
  7627. {
  7628. struct ipw_priv *priv = ieee80211_priv(dev);
  7629. static const unsigned char any[] = {
  7630. 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
  7631. };
  7632. static const unsigned char off[] = {
  7633. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
  7634. };
  7635. if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
  7636. return -EINVAL;
  7637. mutex_lock(&priv->mutex);
  7638. if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
  7639. !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
  7640. /* we disable mandatory BSSID association */
  7641. IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
  7642. priv->config &= ~CFG_STATIC_BSSID;
  7643. IPW_DEBUG_ASSOC("Attempting to associate with new "
  7644. "parameters.\n");
  7645. ipw_associate(priv);
  7646. mutex_unlock(&priv->mutex);
  7647. return 0;
  7648. }
  7649. priv->config |= CFG_STATIC_BSSID;
  7650. if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
  7651. IPW_DEBUG_WX("BSSID set to current BSSID.\n");
  7652. mutex_unlock(&priv->mutex);
  7653. return 0;
  7654. }
  7655. IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
  7656. wrqu->ap_addr.sa_data);
  7657. memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
  7658. /* Network configuration changed -- force [re]association */
  7659. IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
  7660. if (!ipw_disassociate(priv))
  7661. ipw_associate(priv);
  7662. mutex_unlock(&priv->mutex);
  7663. return 0;
  7664. }
  7665. static int ipw_wx_get_wap(struct net_device *dev,
  7666. struct iw_request_info *info,
  7667. union iwreq_data *wrqu, char *extra)
  7668. {
  7669. struct ipw_priv *priv = ieee80211_priv(dev);
  7670. /* If we are associated, trying to associate, or have a statically
  7671. * configured BSSID then return that; otherwise return ANY */
  7672. mutex_lock(&priv->mutex);
  7673. if (priv->config & CFG_STATIC_BSSID ||
  7674. priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  7675. wrqu->ap_addr.sa_family = ARPHRD_ETHER;
  7676. memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
  7677. } else
  7678. memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
  7679. IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
  7680. wrqu->ap_addr.sa_data);
  7681. mutex_unlock(&priv->mutex);
  7682. return 0;
  7683. }
  7684. static int ipw_wx_set_essid(struct net_device *dev,
  7685. struct iw_request_info *info,
  7686. union iwreq_data *wrqu, char *extra)
  7687. {
  7688. struct ipw_priv *priv = ieee80211_priv(dev);
  7689. int length;
  7690. DECLARE_SSID_BUF(ssid);
  7691. mutex_lock(&priv->mutex);
  7692. if (!wrqu->essid.flags)
  7693. {
  7694. IPW_DEBUG_WX("Setting ESSID to ANY\n");
  7695. ipw_disassociate(priv);
  7696. priv->config &= ~CFG_STATIC_ESSID;
  7697. ipw_associate(priv);
  7698. mutex_unlock(&priv->mutex);
  7699. return 0;
  7700. }
  7701. length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
  7702. priv->config |= CFG_STATIC_ESSID;
  7703. if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
  7704. && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
  7705. IPW_DEBUG_WX("ESSID set to current ESSID.\n");
  7706. mutex_unlock(&priv->mutex);
  7707. return 0;
  7708. }
  7709. IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
  7710. print_ssid(ssid, extra, length), length);
  7711. priv->essid_len = length;
  7712. memcpy(priv->essid, extra, priv->essid_len);
  7713. /* Network configuration changed -- force [re]association */
  7714. IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
  7715. if (!ipw_disassociate(priv))
  7716. ipw_associate(priv);
  7717. mutex_unlock(&priv->mutex);
  7718. return 0;
  7719. }
  7720. static int ipw_wx_get_essid(struct net_device *dev,
  7721. struct iw_request_info *info,
  7722. union iwreq_data *wrqu, char *extra)
  7723. {
  7724. struct ipw_priv *priv = ieee80211_priv(dev);
  7725. DECLARE_SSID_BUF(ssid);
  7726. /* If we are associated, trying to associate, or have a statically
  7727. * configured ESSID then return that; otherwise return ANY */
  7728. mutex_lock(&priv->mutex);
  7729. if (priv->config & CFG_STATIC_ESSID ||
  7730. priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
  7731. IPW_DEBUG_WX("Getting essid: '%s'\n",
  7732. print_ssid(ssid, priv->essid, priv->essid_len));
  7733. memcpy(extra, priv->essid, priv->essid_len);
  7734. wrqu->essid.length = priv->essid_len;
  7735. wrqu->essid.flags = 1; /* active */
  7736. } else {
  7737. IPW_DEBUG_WX("Getting essid: ANY\n");
  7738. wrqu->essid.length = 0;
  7739. wrqu->essid.flags = 0; /* active */
  7740. }
  7741. mutex_unlock(&priv->mutex);
  7742. return 0;
  7743. }
  7744. static int ipw_wx_set_nick(struct net_device *dev,
  7745. struct iw_request_info *info,
  7746. union iwreq_data *wrqu, char *extra)
  7747. {
  7748. struct ipw_priv *priv = ieee80211_priv(dev);
  7749. IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
  7750. if (wrqu->data.length > IW_ESSID_MAX_SIZE)
  7751. return -E2BIG;
  7752. mutex_lock(&priv->mutex);
  7753. wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
  7754. memset(priv->nick, 0, sizeof(priv->nick));
  7755. memcpy(priv->nick, extra, wrqu->data.length);
  7756. IPW_DEBUG_TRACE("<<\n");
  7757. mutex_unlock(&priv->mutex);
  7758. return 0;
  7759. }
  7760. static int ipw_wx_get_nick(struct net_device *dev,
  7761. struct iw_request_info *info,
  7762. union iwreq_data *wrqu, char *extra)
  7763. {
  7764. struct ipw_priv *priv = ieee80211_priv(dev);
  7765. IPW_DEBUG_WX("Getting nick\n");
  7766. mutex_lock(&priv->mutex);
  7767. wrqu->data.length = strlen(priv->nick);
  7768. memcpy(extra, priv->nick, wrqu->data.length);
  7769. wrqu->data.flags = 1; /* active */
  7770. mutex_unlock(&priv->mutex);
  7771. return 0;
  7772. }
  7773. static int ipw_wx_set_sens(struct net_device *dev,
  7774. struct iw_request_info *info,
  7775. union iwreq_data *wrqu, char *extra)
  7776. {
  7777. struct ipw_priv *priv = ieee80211_priv(dev);
  7778. int err = 0;
  7779. IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
  7780. IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
  7781. mutex_lock(&priv->mutex);
  7782. if (wrqu->sens.fixed == 0)
  7783. {
  7784. priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
  7785. priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
  7786. goto out;
  7787. }
  7788. if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
  7789. (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
  7790. err = -EINVAL;
  7791. goto out;
  7792. }
  7793. priv->roaming_threshold = wrqu->sens.value;
  7794. priv->disassociate_threshold = 3*wrqu->sens.value;
  7795. out:
  7796. mutex_unlock(&priv->mutex);
  7797. return err;
  7798. }
  7799. static int ipw_wx_get_sens(struct net_device *dev,
  7800. struct iw_request_info *info,
  7801. union iwreq_data *wrqu, char *extra)
  7802. {
  7803. struct ipw_priv *priv = ieee80211_priv(dev);
  7804. mutex_lock(&priv->mutex);
  7805. wrqu->sens.fixed = 1;
  7806. wrqu->sens.value = priv->roaming_threshold;
  7807. mutex_unlock(&priv->mutex);
  7808. IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
  7809. wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
  7810. return 0;
  7811. }
  7812. static int ipw_wx_set_rate(struct net_device *dev,
  7813. struct iw_request_info *info,
  7814. union iwreq_data *wrqu, char *extra)
  7815. {
  7816. /* TODO: We should use semaphores or locks for access to priv */
  7817. struct ipw_priv *priv = ieee80211_priv(dev);
  7818. u32 target_rate = wrqu->bitrate.value;
  7819. u32 fixed, mask;
  7820. /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
  7821. /* value = X, fixed = 1 means only rate X */
  7822. /* value = X, fixed = 0 means all rates lower equal X */
  7823. if (target_rate == -1) {
  7824. fixed = 0;
  7825. mask = IEEE80211_DEFAULT_RATES_MASK;
  7826. /* Now we should reassociate */
  7827. goto apply;
  7828. }
  7829. mask = 0;
  7830. fixed = wrqu->bitrate.fixed;
  7831. if (target_rate == 1000000 || !fixed)
  7832. mask |= IEEE80211_CCK_RATE_1MB_MASK;
  7833. if (target_rate == 1000000)
  7834. goto apply;
  7835. if (target_rate == 2000000 || !fixed)
  7836. mask |= IEEE80211_CCK_RATE_2MB_MASK;
  7837. if (target_rate == 2000000)
  7838. goto apply;
  7839. if (target_rate == 5500000 || !fixed)
  7840. mask |= IEEE80211_CCK_RATE_5MB_MASK;
  7841. if (target_rate == 5500000)
  7842. goto apply;
  7843. if (target_rate == 6000000 || !fixed)
  7844. mask |= IEEE80211_OFDM_RATE_6MB_MASK;
  7845. if (target_rate == 6000000)
  7846. goto apply;
  7847. if (target_rate == 9000000 || !fixed)
  7848. mask |= IEEE80211_OFDM_RATE_9MB_MASK;
  7849. if (target_rate == 9000000)
  7850. goto apply;
  7851. if (target_rate == 11000000 || !fixed)
  7852. mask |= IEEE80211_CCK_RATE_11MB_MASK;
  7853. if (target_rate == 11000000)
  7854. goto apply;
  7855. if (target_rate == 12000000 || !fixed)
  7856. mask |= IEEE80211_OFDM_RATE_12MB_MASK;
  7857. if (target_rate == 12000000)
  7858. goto apply;
  7859. if (target_rate == 18000000 || !fixed)
  7860. mask |= IEEE80211_OFDM_RATE_18MB_MASK;
  7861. if (target_rate == 18000000)
  7862. goto apply;
  7863. if (target_rate == 24000000 || !fixed)
  7864. mask |= IEEE80211_OFDM_RATE_24MB_MASK;
  7865. if (target_rate == 24000000)
  7866. goto apply;
  7867. if (target_rate == 36000000 || !fixed)
  7868. mask |= IEEE80211_OFDM_RATE_36MB_MASK;
  7869. if (target_rate == 36000000)
  7870. goto apply;
  7871. if (target_rate == 48000000 || !fixed)
  7872. mask |= IEEE80211_OFDM_RATE_48MB_MASK;
  7873. if (target_rate == 48000000)
  7874. goto apply;
  7875. if (target_rate == 54000000 || !fixed)
  7876. mask |= IEEE80211_OFDM_RATE_54MB_MASK;
  7877. if (target_rate == 54000000)
  7878. goto apply;
  7879. IPW_DEBUG_WX("invalid rate specified, returning error\n");
  7880. return -EINVAL;
  7881. apply:
  7882. IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
  7883. mask, fixed ? "fixed" : "sub-rates");
  7884. mutex_lock(&priv->mutex);
  7885. if (mask == IEEE80211_DEFAULT_RATES_MASK) {
  7886. priv->config &= ~CFG_FIXED_RATE;
  7887. ipw_set_fixed_rate(priv, priv->ieee->mode);
  7888. } else
  7889. priv->config |= CFG_FIXED_RATE;
  7890. if (priv->rates_mask == mask) {
  7891. IPW_DEBUG_WX("Mask set to current mask.\n");
  7892. mutex_unlock(&priv->mutex);
  7893. return 0;
  7894. }
  7895. priv->rates_mask = mask;
  7896. /* Network configuration changed -- force [re]association */
  7897. IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
  7898. if (!ipw_disassociate(priv))
  7899. ipw_associate(priv);
  7900. mutex_unlock(&priv->mutex);
  7901. return 0;
  7902. }
  7903. static int ipw_wx_get_rate(struct net_device *dev,
  7904. struct iw_request_info *info,
  7905. union iwreq_data *wrqu, char *extra)
  7906. {
  7907. struct ipw_priv *priv = ieee80211_priv(dev);
  7908. mutex_lock(&priv->mutex);
  7909. wrqu->bitrate.value = priv->last_rate;
  7910. wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
  7911. mutex_unlock(&priv->mutex);
  7912. IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
  7913. return 0;
  7914. }
  7915. static int ipw_wx_set_rts(struct net_device *dev,
  7916. struct iw_request_info *info,
  7917. union iwreq_data *wrqu, char *extra)
  7918. {
  7919. struct ipw_priv *priv = ieee80211_priv(dev);
  7920. mutex_lock(&priv->mutex);
  7921. if (wrqu->rts.disabled || !wrqu->rts.fixed)
  7922. priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
  7923. else {
  7924. if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
  7925. wrqu->rts.value > MAX_RTS_THRESHOLD) {
  7926. mutex_unlock(&priv->mutex);
  7927. return -EINVAL;
  7928. }
  7929. priv->rts_threshold = wrqu->rts.value;
  7930. }
  7931. ipw_send_rts_threshold(priv, priv->rts_threshold);
  7932. mutex_unlock(&priv->mutex);
  7933. IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
  7934. return 0;
  7935. }
  7936. static int ipw_wx_get_rts(struct net_device *dev,
  7937. struct iw_request_info *info,
  7938. union iwreq_data *wrqu, char *extra)
  7939. {
  7940. struct ipw_priv *priv = ieee80211_priv(dev);
  7941. mutex_lock(&priv->mutex);
  7942. wrqu->rts.value = priv->rts_threshold;
  7943. wrqu->rts.fixed = 0; /* no auto select */
  7944. wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
  7945. mutex_unlock(&priv->mutex);
  7946. IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
  7947. return 0;
  7948. }
  7949. static int ipw_wx_set_txpow(struct net_device *dev,
  7950. struct iw_request_info *info,
  7951. union iwreq_data *wrqu, char *extra)
  7952. {
  7953. struct ipw_priv *priv = ieee80211_priv(dev);
  7954. int err = 0;
  7955. mutex_lock(&priv->mutex);
  7956. if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
  7957. err = -EINPROGRESS;
  7958. goto out;
  7959. }
  7960. if (!wrqu->power.fixed)
  7961. wrqu->power.value = IPW_TX_POWER_DEFAULT;
  7962. if (wrqu->power.flags != IW_TXPOW_DBM) {
  7963. err = -EINVAL;
  7964. goto out;
  7965. }
  7966. if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
  7967. (wrqu->power.value < IPW_TX_POWER_MIN)) {
  7968. err = -EINVAL;
  7969. goto out;
  7970. }
  7971. priv->tx_power = wrqu->power.value;
  7972. err = ipw_set_tx_power(priv);
  7973. out:
  7974. mutex_unlock(&priv->mutex);
  7975. return err;
  7976. }
  7977. static int ipw_wx_get_txpow(struct net_device *dev,
  7978. struct iw_request_info *info,
  7979. union iwreq_data *wrqu, char *extra)
  7980. {
  7981. struct ipw_priv *priv = ieee80211_priv(dev);
  7982. mutex_lock(&priv->mutex);
  7983. wrqu->power.value = priv->tx_power;
  7984. wrqu->power.fixed = 1;
  7985. wrqu->power.flags = IW_TXPOW_DBM;
  7986. wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
  7987. mutex_unlock(&priv->mutex);
  7988. IPW_DEBUG_WX("GET TX Power -> %s %d \n",
  7989. wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
  7990. return 0;
  7991. }
  7992. static int ipw_wx_set_frag(struct net_device *dev,
  7993. struct iw_request_info *info,
  7994. union iwreq_data *wrqu, char *extra)
  7995. {
  7996. struct ipw_priv *priv = ieee80211_priv(dev);
  7997. mutex_lock(&priv->mutex);
  7998. if (wrqu->frag.disabled || !wrqu->frag.fixed)
  7999. priv->ieee->fts = DEFAULT_FTS;
  8000. else {
  8001. if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
  8002. wrqu->frag.value > MAX_FRAG_THRESHOLD) {
  8003. mutex_unlock(&priv->mutex);
  8004. return -EINVAL;
  8005. }
  8006. priv->ieee->fts = wrqu->frag.value & ~0x1;
  8007. }
  8008. ipw_send_frag_threshold(priv, wrqu->frag.value);
  8009. mutex_unlock(&priv->mutex);
  8010. IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
  8011. return 0;
  8012. }
  8013. static int ipw_wx_get_frag(struct net_device *dev,
  8014. struct iw_request_info *info,
  8015. union iwreq_data *wrqu, char *extra)
  8016. {
  8017. struct ipw_priv *priv = ieee80211_priv(dev);
  8018. mutex_lock(&priv->mutex);
  8019. wrqu->frag.value = priv->ieee->fts;
  8020. wrqu->frag.fixed = 0; /* no auto select */
  8021. wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
  8022. mutex_unlock(&priv->mutex);
  8023. IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
  8024. return 0;
  8025. }
  8026. static int ipw_wx_set_retry(struct net_device *dev,
  8027. struct iw_request_info *info,
  8028. union iwreq_data *wrqu, char *extra)
  8029. {
  8030. struct ipw_priv *priv = ieee80211_priv(dev);
  8031. if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
  8032. return -EINVAL;
  8033. if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
  8034. return 0;
  8035. if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
  8036. return -EINVAL;
  8037. mutex_lock(&priv->mutex);
  8038. if (wrqu->retry.flags & IW_RETRY_SHORT)
  8039. priv->short_retry_limit = (u8) wrqu->retry.value;
  8040. else if (wrqu->retry.flags & IW_RETRY_LONG)
  8041. priv->long_retry_limit = (u8) wrqu->retry.value;
  8042. else {
  8043. priv->short_retry_limit = (u8) wrqu->retry.value;
  8044. priv->long_retry_limit = (u8) wrqu->retry.value;
  8045. }
  8046. ipw_send_retry_limit(priv, priv->short_retry_limit,
  8047. priv->long_retry_limit);
  8048. mutex_unlock(&priv->mutex);
  8049. IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
  8050. priv->short_retry_limit, priv->long_retry_limit);
  8051. return 0;
  8052. }
  8053. static int ipw_wx_get_retry(struct net_device *dev,
  8054. struct iw_request_info *info,
  8055. union iwreq_data *wrqu, char *extra)
  8056. {
  8057. struct ipw_priv *priv = ieee80211_priv(dev);
  8058. mutex_lock(&priv->mutex);
  8059. wrqu->retry.disabled = 0;
  8060. if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
  8061. mutex_unlock(&priv->mutex);
  8062. return -EINVAL;
  8063. }
  8064. if (wrqu->retry.flags & IW_RETRY_LONG) {
  8065. wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
  8066. wrqu->retry.value = priv->long_retry_limit;
  8067. } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
  8068. wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
  8069. wrqu->retry.value = priv->short_retry_limit;
  8070. } else {
  8071. wrqu->retry.flags = IW_RETRY_LIMIT;
  8072. wrqu->retry.value = priv->short_retry_limit;
  8073. }
  8074. mutex_unlock(&priv->mutex);
  8075. IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
  8076. return 0;
  8077. }
  8078. static int ipw_wx_set_scan(struct net_device *dev,
  8079. struct iw_request_info *info,
  8080. union iwreq_data *wrqu, char *extra)
  8081. {
  8082. struct ipw_priv *priv = ieee80211_priv(dev);
  8083. struct iw_scan_req *req = (struct iw_scan_req *)extra;
  8084. struct delayed_work *work = NULL;
  8085. mutex_lock(&priv->mutex);
  8086. priv->user_requested_scan = 1;
  8087. if (wrqu->data.length == sizeof(struct iw_scan_req)) {
  8088. if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
  8089. int len = min((int)req->essid_len,
  8090. (int)sizeof(priv->direct_scan_ssid));
  8091. memcpy(priv->direct_scan_ssid, req->essid, len);
  8092. priv->direct_scan_ssid_len = len;
  8093. work = &priv->request_direct_scan;
  8094. } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
  8095. work = &priv->request_passive_scan;
  8096. }
  8097. } else {
  8098. /* Normal active broadcast scan */
  8099. work = &priv->request_scan;
  8100. }
  8101. mutex_unlock(&priv->mutex);
  8102. IPW_DEBUG_WX("Start scan\n");
  8103. queue_delayed_work(priv->workqueue, work, 0);
  8104. return 0;
  8105. }
  8106. static int ipw_wx_get_scan(struct net_device *dev,
  8107. struct iw_request_info *info,
  8108. union iwreq_data *wrqu, char *extra)
  8109. {
  8110. struct ipw_priv *priv = ieee80211_priv(dev);
  8111. return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
  8112. }
  8113. static int ipw_wx_set_encode(struct net_device *dev,
  8114. struct iw_request_info *info,
  8115. union iwreq_data *wrqu, char *key)
  8116. {
  8117. struct ipw_priv *priv = ieee80211_priv(dev);
  8118. int ret;
  8119. u32 cap = priv->capability;
  8120. mutex_lock(&priv->mutex);
  8121. ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
  8122. /* In IBSS mode, we need to notify the firmware to update
  8123. * the beacon info after we changed the capability. */
  8124. if (cap != priv->capability &&
  8125. priv->ieee->iw_mode == IW_MODE_ADHOC &&
  8126. priv->status & STATUS_ASSOCIATED)
  8127. ipw_disassociate(priv);
  8128. mutex_unlock(&priv->mutex);
  8129. return ret;
  8130. }
  8131. static int ipw_wx_get_encode(struct net_device *dev,
  8132. struct iw_request_info *info,
  8133. union iwreq_data *wrqu, char *key)
  8134. {
  8135. struct ipw_priv *priv = ieee80211_priv(dev);
  8136. return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
  8137. }
  8138. static int ipw_wx_set_power(struct net_device *dev,
  8139. struct iw_request_info *info,
  8140. union iwreq_data *wrqu, char *extra)
  8141. {
  8142. struct ipw_priv *priv = ieee80211_priv(dev);
  8143. int err;
  8144. mutex_lock(&priv->mutex);
  8145. if (wrqu->power.disabled) {
  8146. priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
  8147. err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
  8148. if (err) {
  8149. IPW_DEBUG_WX("failed setting power mode.\n");
  8150. mutex_unlock(&priv->mutex);
  8151. return err;
  8152. }
  8153. IPW_DEBUG_WX("SET Power Management Mode -> off\n");
  8154. mutex_unlock(&priv->mutex);
  8155. return 0;
  8156. }
  8157. switch (wrqu->power.flags & IW_POWER_MODE) {
  8158. case IW_POWER_ON: /* If not specified */
  8159. case IW_POWER_MODE: /* If set all mask */
  8160. case IW_POWER_ALL_R: /* If explicitly state all */
  8161. break;
  8162. default: /* Otherwise we don't support it */
  8163. IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
  8164. wrqu->power.flags);
  8165. mutex_unlock(&priv->mutex);
  8166. return -EOPNOTSUPP;
  8167. }
  8168. /* If the user hasn't specified a power management mode yet, default
  8169. * to BATTERY */
  8170. if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
  8171. priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
  8172. else
  8173. priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
  8174. err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
  8175. if (err) {
  8176. IPW_DEBUG_WX("failed setting power mode.\n");
  8177. mutex_unlock(&priv->mutex);
  8178. return err;
  8179. }
  8180. IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
  8181. mutex_unlock(&priv->mutex);
  8182. return 0;
  8183. }
  8184. static int ipw_wx_get_power(struct net_device *dev,
  8185. struct iw_request_info *info,
  8186. union iwreq_data *wrqu, char *extra)
  8187. {
  8188. struct ipw_priv *priv = ieee80211_priv(dev);
  8189. mutex_lock(&priv->mutex);
  8190. if (!(priv->power_mode & IPW_POWER_ENABLED))
  8191. wrqu->power.disabled = 1;
  8192. else
  8193. wrqu->power.disabled = 0;
  8194. mutex_unlock(&priv->mutex);
  8195. IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
  8196. return 0;
  8197. }
  8198. static int ipw_wx_set_powermode(struct net_device *dev,
  8199. struct iw_request_info *info,
  8200. union iwreq_data *wrqu, char *extra)
  8201. {
  8202. struct ipw_priv *priv = ieee80211_priv(dev);
  8203. int mode = *(int *)extra;
  8204. int err;
  8205. mutex_lock(&priv->mutex);
  8206. if ((mode < 1) || (mode > IPW_POWER_LIMIT))
  8207. mode = IPW_POWER_AC;
  8208. if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
  8209. err = ipw_send_power_mode(priv, mode);
  8210. if (err) {
  8211. IPW_DEBUG_WX("failed setting power mode.\n");
  8212. mutex_unlock(&priv->mutex);
  8213. return err;
  8214. }
  8215. priv->power_mode = IPW_POWER_ENABLED | mode;
  8216. }
  8217. mutex_unlock(&priv->mutex);
  8218. return 0;
  8219. }
  8220. #define MAX_WX_STRING 80
  8221. static int ipw_wx_get_powermode(struct net_device *dev,
  8222. struct iw_request_info *info,
  8223. union iwreq_data *wrqu, char *extra)
  8224. {
  8225. struct ipw_priv *priv = ieee80211_priv(dev);
  8226. int level = IPW_POWER_LEVEL(priv->power_mode);
  8227. char *p = extra;
  8228. p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
  8229. switch (level) {
  8230. case IPW_POWER_AC:
  8231. p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
  8232. break;
  8233. case IPW_POWER_BATTERY:
  8234. p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
  8235. break;
  8236. default:
  8237. p += snprintf(p, MAX_WX_STRING - (p - extra),
  8238. "(Timeout %dms, Period %dms)",
  8239. timeout_duration[level - 1] / 1000,
  8240. period_duration[level - 1] / 1000);
  8241. }
  8242. if (!(priv->power_mode & IPW_POWER_ENABLED))
  8243. p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
  8244. wrqu->data.length = p - extra + 1;
  8245. return 0;
  8246. }
  8247. static int ipw_wx_set_wireless_mode(struct net_device *dev,
  8248. struct iw_request_info *info,
  8249. union iwreq_data *wrqu, char *extra)
  8250. {
  8251. struct ipw_priv *priv = ieee80211_priv(dev);
  8252. int mode = *(int *)extra;
  8253. u8 band = 0, modulation = 0;
  8254. if (mode == 0 || mode & ~IEEE_MODE_MASK) {
  8255. IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
  8256. return -EINVAL;
  8257. }
  8258. mutex_lock(&priv->mutex);
  8259. if (priv->adapter == IPW_2915ABG) {
  8260. priv->ieee->abg_true = 1;
  8261. if (mode & IEEE_A) {
  8262. band |= IEEE80211_52GHZ_BAND;
  8263. modulation |= IEEE80211_OFDM_MODULATION;
  8264. } else
  8265. priv->ieee->abg_true = 0;
  8266. } else {
  8267. if (mode & IEEE_A) {
  8268. IPW_WARNING("Attempt to set 2200BG into "
  8269. "802.11a mode\n");
  8270. mutex_unlock(&priv->mutex);
  8271. return -EINVAL;
  8272. }
  8273. priv->ieee->abg_true = 0;
  8274. }
  8275. if (mode & IEEE_B) {
  8276. band |= IEEE80211_24GHZ_BAND;
  8277. modulation |= IEEE80211_CCK_MODULATION;
  8278. } else
  8279. priv->ieee->abg_true = 0;
  8280. if (mode & IEEE_G) {
  8281. band |= IEEE80211_24GHZ_BAND;
  8282. modulation |= IEEE80211_OFDM_MODULATION;
  8283. } else
  8284. priv->ieee->abg_true = 0;
  8285. priv->ieee->mode = mode;
  8286. priv->ieee->freq_band = band;
  8287. priv->ieee->modulation = modulation;
  8288. init_supported_rates(priv, &priv->rates);
  8289. /* Network configuration changed -- force [re]association */
  8290. IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
  8291. if (!ipw_disassociate(priv)) {
  8292. ipw_send_supported_rates(priv, &priv->rates);
  8293. ipw_associate(priv);
  8294. }
  8295. /* Update the band LEDs */
  8296. ipw_led_band_on(priv);
  8297. IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
  8298. mode & IEEE_A ? 'a' : '.',
  8299. mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
  8300. mutex_unlock(&priv->mutex);
  8301. return 0;
  8302. }
  8303. static int ipw_wx_get_wireless_mode(struct net_device *dev,
  8304. struct iw_request_info *info,
  8305. union iwreq_data *wrqu, char *extra)
  8306. {
  8307. struct ipw_priv *priv = ieee80211_priv(dev);
  8308. mutex_lock(&priv->mutex);
  8309. switch (priv->ieee->mode) {
  8310. case IEEE_A:
  8311. strncpy(extra, "802.11a (1)", MAX_WX_STRING);
  8312. break;
  8313. case IEEE_B:
  8314. strncpy(extra, "802.11b (2)", MAX_WX_STRING);
  8315. break;
  8316. case IEEE_A | IEEE_B:
  8317. strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
  8318. break;
  8319. case IEEE_G:
  8320. strncpy(extra, "802.11g (4)", MAX_WX_STRING);
  8321. break;
  8322. case IEEE_A | IEEE_G:
  8323. strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
  8324. break;
  8325. case IEEE_B | IEEE_G:
  8326. strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
  8327. break;
  8328. case IEEE_A | IEEE_B | IEEE_G:
  8329. strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
  8330. break;
  8331. default:
  8332. strncpy(extra, "unknown", MAX_WX_STRING);
  8333. break;
  8334. }
  8335. IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
  8336. wrqu->data.length = strlen(extra) + 1;
  8337. mutex_unlock(&priv->mutex);
  8338. return 0;
  8339. }
  8340. static int ipw_wx_set_preamble(struct net_device *dev,
  8341. struct iw_request_info *info,
  8342. union iwreq_data *wrqu, char *extra)
  8343. {
  8344. struct ipw_priv *priv = ieee80211_priv(dev);
  8345. int mode = *(int *)extra;
  8346. mutex_lock(&priv->mutex);
  8347. /* Switching from SHORT -> LONG requires a disassociation */
  8348. if (mode == 1) {
  8349. if (!(priv->config & CFG_PREAMBLE_LONG)) {
  8350. priv->config |= CFG_PREAMBLE_LONG;
  8351. /* Network configuration changed -- force [re]association */
  8352. IPW_DEBUG_ASSOC
  8353. ("[re]association triggered due to preamble change.\n");
  8354. if (!ipw_disassociate(priv))
  8355. ipw_associate(priv);
  8356. }
  8357. goto done;
  8358. }
  8359. if (mode == 0) {
  8360. priv->config &= ~CFG_PREAMBLE_LONG;
  8361. goto done;
  8362. }
  8363. mutex_unlock(&priv->mutex);
  8364. return -EINVAL;
  8365. done:
  8366. mutex_unlock(&priv->mutex);
  8367. return 0;
  8368. }
  8369. static int ipw_wx_get_preamble(struct net_device *dev,
  8370. struct iw_request_info *info,
  8371. union iwreq_data *wrqu, char *extra)
  8372. {
  8373. struct ipw_priv *priv = ieee80211_priv(dev);
  8374. mutex_lock(&priv->mutex);
  8375. if (priv->config & CFG_PREAMBLE_LONG)
  8376. snprintf(wrqu->name, IFNAMSIZ, "long (1)");
  8377. else
  8378. snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
  8379. mutex_unlock(&priv->mutex);
  8380. return 0;
  8381. }
  8382. #ifdef CONFIG_IPW2200_MONITOR
  8383. static int ipw_wx_set_monitor(struct net_device *dev,
  8384. struct iw_request_info *info,
  8385. union iwreq_data *wrqu, char *extra)
  8386. {
  8387. struct ipw_priv *priv = ieee80211_priv(dev);
  8388. int *parms = (int *)extra;
  8389. int enable = (parms[0] > 0);
  8390. mutex_lock(&priv->mutex);
  8391. IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
  8392. if (enable) {
  8393. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  8394. #ifdef CONFIG_IPW2200_RADIOTAP
  8395. priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  8396. #else
  8397. priv->net_dev->type = ARPHRD_IEEE80211;
  8398. #endif
  8399. queue_work(priv->workqueue, &priv->adapter_restart);
  8400. }
  8401. ipw_set_channel(priv, parms[1]);
  8402. } else {
  8403. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  8404. mutex_unlock(&priv->mutex);
  8405. return 0;
  8406. }
  8407. priv->net_dev->type = ARPHRD_ETHER;
  8408. queue_work(priv->workqueue, &priv->adapter_restart);
  8409. }
  8410. mutex_unlock(&priv->mutex);
  8411. return 0;
  8412. }
  8413. #endif /* CONFIG_IPW2200_MONITOR */
  8414. static int ipw_wx_reset(struct net_device *dev,
  8415. struct iw_request_info *info,
  8416. union iwreq_data *wrqu, char *extra)
  8417. {
  8418. struct ipw_priv *priv = ieee80211_priv(dev);
  8419. IPW_DEBUG_WX("RESET\n");
  8420. queue_work(priv->workqueue, &priv->adapter_restart);
  8421. return 0;
  8422. }
  8423. static int ipw_wx_sw_reset(struct net_device *dev,
  8424. struct iw_request_info *info,
  8425. union iwreq_data *wrqu, char *extra)
  8426. {
  8427. struct ipw_priv *priv = ieee80211_priv(dev);
  8428. union iwreq_data wrqu_sec = {
  8429. .encoding = {
  8430. .flags = IW_ENCODE_DISABLED,
  8431. },
  8432. };
  8433. int ret;
  8434. IPW_DEBUG_WX("SW_RESET\n");
  8435. mutex_lock(&priv->mutex);
  8436. ret = ipw_sw_reset(priv, 2);
  8437. if (!ret) {
  8438. free_firmware();
  8439. ipw_adapter_restart(priv);
  8440. }
  8441. /* The SW reset bit might have been toggled on by the 'disable'
  8442. * module parameter, so take appropriate action */
  8443. ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
  8444. mutex_unlock(&priv->mutex);
  8445. ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
  8446. mutex_lock(&priv->mutex);
  8447. if (!(priv->status & STATUS_RF_KILL_MASK)) {
  8448. /* Configuration likely changed -- force [re]association */
  8449. IPW_DEBUG_ASSOC("[re]association triggered due to sw "
  8450. "reset.\n");
  8451. if (!ipw_disassociate(priv))
  8452. ipw_associate(priv);
  8453. }
  8454. mutex_unlock(&priv->mutex);
  8455. return 0;
  8456. }
  8457. /* Rebase the WE IOCTLs to zero for the handler array */
  8458. #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
  8459. static iw_handler ipw_wx_handlers[] = {
  8460. IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
  8461. IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
  8462. IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
  8463. IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
  8464. IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
  8465. IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
  8466. IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
  8467. IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
  8468. IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
  8469. IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
  8470. IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
  8471. IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
  8472. IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
  8473. IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
  8474. IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
  8475. IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
  8476. IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
  8477. IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
  8478. IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
  8479. IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
  8480. IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
  8481. IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
  8482. IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
  8483. IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
  8484. IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
  8485. IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
  8486. IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
  8487. IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
  8488. IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
  8489. IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
  8490. IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
  8491. IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
  8492. IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
  8493. IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
  8494. IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
  8495. IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
  8496. IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
  8497. IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
  8498. IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
  8499. IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
  8500. IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
  8501. };
  8502. enum {
  8503. IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
  8504. IPW_PRIV_GET_POWER,
  8505. IPW_PRIV_SET_MODE,
  8506. IPW_PRIV_GET_MODE,
  8507. IPW_PRIV_SET_PREAMBLE,
  8508. IPW_PRIV_GET_PREAMBLE,
  8509. IPW_PRIV_RESET,
  8510. IPW_PRIV_SW_RESET,
  8511. #ifdef CONFIG_IPW2200_MONITOR
  8512. IPW_PRIV_SET_MONITOR,
  8513. #endif
  8514. };
  8515. static struct iw_priv_args ipw_priv_args[] = {
  8516. {
  8517. .cmd = IPW_PRIV_SET_POWER,
  8518. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  8519. .name = "set_power"},
  8520. {
  8521. .cmd = IPW_PRIV_GET_POWER,
  8522. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
  8523. .name = "get_power"},
  8524. {
  8525. .cmd = IPW_PRIV_SET_MODE,
  8526. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  8527. .name = "set_mode"},
  8528. {
  8529. .cmd = IPW_PRIV_GET_MODE,
  8530. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
  8531. .name = "get_mode"},
  8532. {
  8533. .cmd = IPW_PRIV_SET_PREAMBLE,
  8534. .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
  8535. .name = "set_preamble"},
  8536. {
  8537. .cmd = IPW_PRIV_GET_PREAMBLE,
  8538. .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
  8539. .name = "get_preamble"},
  8540. {
  8541. IPW_PRIV_RESET,
  8542. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
  8543. {
  8544. IPW_PRIV_SW_RESET,
  8545. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
  8546. #ifdef CONFIG_IPW2200_MONITOR
  8547. {
  8548. IPW_PRIV_SET_MONITOR,
  8549. IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
  8550. #endif /* CONFIG_IPW2200_MONITOR */
  8551. };
  8552. static iw_handler ipw_priv_handler[] = {
  8553. ipw_wx_set_powermode,
  8554. ipw_wx_get_powermode,
  8555. ipw_wx_set_wireless_mode,
  8556. ipw_wx_get_wireless_mode,
  8557. ipw_wx_set_preamble,
  8558. ipw_wx_get_preamble,
  8559. ipw_wx_reset,
  8560. ipw_wx_sw_reset,
  8561. #ifdef CONFIG_IPW2200_MONITOR
  8562. ipw_wx_set_monitor,
  8563. #endif
  8564. };
  8565. static struct iw_handler_def ipw_wx_handler_def = {
  8566. .standard = ipw_wx_handlers,
  8567. .num_standard = ARRAY_SIZE(ipw_wx_handlers),
  8568. .num_private = ARRAY_SIZE(ipw_priv_handler),
  8569. .num_private_args = ARRAY_SIZE(ipw_priv_args),
  8570. .private = ipw_priv_handler,
  8571. .private_args = ipw_priv_args,
  8572. .get_wireless_stats = ipw_get_wireless_stats,
  8573. };
  8574. /*
  8575. * Get wireless statistics.
  8576. * Called by /proc/net/wireless
  8577. * Also called by SIOCGIWSTATS
  8578. */
  8579. static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
  8580. {
  8581. struct ipw_priv *priv = ieee80211_priv(dev);
  8582. struct iw_statistics *wstats;
  8583. wstats = &priv->wstats;
  8584. /* if hw is disabled, then ipw_get_ordinal() can't be called.
  8585. * netdev->get_wireless_stats seems to be called before fw is
  8586. * initialized. STATUS_ASSOCIATED will only be set if the hw is up
  8587. * and associated; if not associcated, the values are all meaningless
  8588. * anyway, so set them all to NULL and INVALID */
  8589. if (!(priv->status & STATUS_ASSOCIATED)) {
  8590. wstats->miss.beacon = 0;
  8591. wstats->discard.retries = 0;
  8592. wstats->qual.qual = 0;
  8593. wstats->qual.level = 0;
  8594. wstats->qual.noise = 0;
  8595. wstats->qual.updated = 7;
  8596. wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
  8597. IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
  8598. return wstats;
  8599. }
  8600. wstats->qual.qual = priv->quality;
  8601. wstats->qual.level = priv->exp_avg_rssi;
  8602. wstats->qual.noise = priv->exp_avg_noise;
  8603. wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
  8604. IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
  8605. wstats->miss.beacon = average_value(&priv->average_missed_beacons);
  8606. wstats->discard.retries = priv->last_tx_failures;
  8607. wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
  8608. /* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
  8609. goto fail_get_ordinal;
  8610. wstats->discard.retries += tx_retry; */
  8611. return wstats;
  8612. }
  8613. /* net device stuff */
  8614. static void init_sys_config(struct ipw_sys_config *sys_config)
  8615. {
  8616. memset(sys_config, 0, sizeof(struct ipw_sys_config));
  8617. sys_config->bt_coexistence = 0;
  8618. sys_config->answer_broadcast_ssid_probe = 0;
  8619. sys_config->accept_all_data_frames = 0;
  8620. sys_config->accept_non_directed_frames = 1;
  8621. sys_config->exclude_unicast_unencrypted = 0;
  8622. sys_config->disable_unicast_decryption = 1;
  8623. sys_config->exclude_multicast_unencrypted = 0;
  8624. sys_config->disable_multicast_decryption = 1;
  8625. if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
  8626. antenna = CFG_SYS_ANTENNA_BOTH;
  8627. sys_config->antenna_diversity = antenna;
  8628. sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
  8629. sys_config->dot11g_auto_detection = 0;
  8630. sys_config->enable_cts_to_self = 0;
  8631. sys_config->bt_coexist_collision_thr = 0;
  8632. sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
  8633. sys_config->silence_threshold = 0x1e;
  8634. }
  8635. static int ipw_net_open(struct net_device *dev)
  8636. {
  8637. IPW_DEBUG_INFO("dev->open\n");
  8638. netif_start_queue(dev);
  8639. return 0;
  8640. }
  8641. static int ipw_net_stop(struct net_device *dev)
  8642. {
  8643. IPW_DEBUG_INFO("dev->close\n");
  8644. netif_stop_queue(dev);
  8645. return 0;
  8646. }
  8647. /*
  8648. todo:
  8649. modify to send one tfd per fragment instead of using chunking. otherwise
  8650. we need to heavily modify the ieee80211_skb_to_txb.
  8651. */
  8652. static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
  8653. int pri)
  8654. {
  8655. struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
  8656. txb->fragments[0]->data;
  8657. int i = 0;
  8658. struct tfd_frame *tfd;
  8659. #ifdef CONFIG_IPW2200_QOS
  8660. int tx_id = ipw_get_tx_queue_number(priv, pri);
  8661. struct clx2_tx_queue *txq = &priv->txq[tx_id];
  8662. #else
  8663. struct clx2_tx_queue *txq = &priv->txq[0];
  8664. #endif
  8665. struct clx2_queue *q = &txq->q;
  8666. u8 id, hdr_len, unicast;
  8667. u16 remaining_bytes;
  8668. int fc;
  8669. if (!(priv->status & STATUS_ASSOCIATED))
  8670. goto drop;
  8671. hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
  8672. switch (priv->ieee->iw_mode) {
  8673. case IW_MODE_ADHOC:
  8674. unicast = !is_multicast_ether_addr(hdr->addr1);
  8675. id = ipw_find_station(priv, hdr->addr1);
  8676. if (id == IPW_INVALID_STATION) {
  8677. id = ipw_add_station(priv, hdr->addr1);
  8678. if (id == IPW_INVALID_STATION) {
  8679. IPW_WARNING("Attempt to send data to "
  8680. "invalid cell: %pM\n",
  8681. hdr->addr1);
  8682. goto drop;
  8683. }
  8684. }
  8685. break;
  8686. case IW_MODE_INFRA:
  8687. default:
  8688. unicast = !is_multicast_ether_addr(hdr->addr3);
  8689. id = 0;
  8690. break;
  8691. }
  8692. tfd = &txq->bd[q->first_empty];
  8693. txq->txb[q->first_empty] = txb;
  8694. memset(tfd, 0, sizeof(*tfd));
  8695. tfd->u.data.station_number = id;
  8696. tfd->control_flags.message_type = TX_FRAME_TYPE;
  8697. tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
  8698. tfd->u.data.cmd_id = DINO_CMD_TX;
  8699. tfd->u.data.len = cpu_to_le16(txb->payload_size);
  8700. remaining_bytes = txb->payload_size;
  8701. if (priv->assoc_request.ieee_mode == IPW_B_MODE)
  8702. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
  8703. else
  8704. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
  8705. if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
  8706. tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
  8707. fc = le16_to_cpu(hdr->frame_ctl);
  8708. hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
  8709. memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
  8710. if (likely(unicast))
  8711. tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
  8712. if (txb->encrypted && !priv->ieee->host_encrypt) {
  8713. switch (priv->ieee->sec.level) {
  8714. case SEC_LEVEL_3:
  8715. tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
  8716. cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8717. /* XXX: ACK flag must be set for CCMP even if it
  8718. * is a multicast/broadcast packet, because CCMP
  8719. * group communication encrypted by GTK is
  8720. * actually done by the AP. */
  8721. if (!unicast)
  8722. tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
  8723. tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
  8724. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
  8725. tfd->u.data.key_index = 0;
  8726. tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
  8727. break;
  8728. case SEC_LEVEL_2:
  8729. tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
  8730. cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8731. tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
  8732. tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
  8733. tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
  8734. break;
  8735. case SEC_LEVEL_1:
  8736. tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
  8737. cpu_to_le16(IEEE80211_FCTL_PROTECTED);
  8738. tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
  8739. if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
  8740. 40)
  8741. tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
  8742. else
  8743. tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
  8744. break;
  8745. case SEC_LEVEL_0:
  8746. break;
  8747. default:
  8748. printk(KERN_ERR "Unknow security level %d\n",
  8749. priv->ieee->sec.level);
  8750. break;
  8751. }
  8752. } else
  8753. /* No hardware encryption */
  8754. tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
  8755. #ifdef CONFIG_IPW2200_QOS
  8756. if (fc & IEEE80211_STYPE_QOS_DATA)
  8757. ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
  8758. #endif /* CONFIG_IPW2200_QOS */
  8759. /* payload */
  8760. tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
  8761. txb->nr_frags));
  8762. IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
  8763. txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
  8764. for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
  8765. IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
  8766. i, le32_to_cpu(tfd->u.data.num_chunks),
  8767. txb->fragments[i]->len - hdr_len);
  8768. IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
  8769. i, tfd->u.data.num_chunks,
  8770. txb->fragments[i]->len - hdr_len);
  8771. printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
  8772. txb->fragments[i]->len - hdr_len);
  8773. tfd->u.data.chunk_ptr[i] =
  8774. cpu_to_le32(pci_map_single
  8775. (priv->pci_dev,
  8776. txb->fragments[i]->data + hdr_len,
  8777. txb->fragments[i]->len - hdr_len,
  8778. PCI_DMA_TODEVICE));
  8779. tfd->u.data.chunk_len[i] =
  8780. cpu_to_le16(txb->fragments[i]->len - hdr_len);
  8781. }
  8782. if (i != txb->nr_frags) {
  8783. struct sk_buff *skb;
  8784. u16 remaining_bytes = 0;
  8785. int j;
  8786. for (j = i; j < txb->nr_frags; j++)
  8787. remaining_bytes += txb->fragments[j]->len - hdr_len;
  8788. printk(KERN_INFO "Trying to reallocate for %d bytes\n",
  8789. remaining_bytes);
  8790. skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
  8791. if (skb != NULL) {
  8792. tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
  8793. for (j = i; j < txb->nr_frags; j++) {
  8794. int size = txb->fragments[j]->len - hdr_len;
  8795. printk(KERN_INFO "Adding frag %d %d...\n",
  8796. j, size);
  8797. memcpy(skb_put(skb, size),
  8798. txb->fragments[j]->data + hdr_len, size);
  8799. }
  8800. dev_kfree_skb_any(txb->fragments[i]);
  8801. txb->fragments[i] = skb;
  8802. tfd->u.data.chunk_ptr[i] =
  8803. cpu_to_le32(pci_map_single
  8804. (priv->pci_dev, skb->data,
  8805. remaining_bytes,
  8806. PCI_DMA_TODEVICE));
  8807. le32_add_cpu(&tfd->u.data.num_chunks, 1);
  8808. }
  8809. }
  8810. /* kick DMA */
  8811. q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
  8812. ipw_write32(priv, q->reg_w, q->first_empty);
  8813. if (ipw_tx_queue_space(q) < q->high_mark)
  8814. netif_stop_queue(priv->net_dev);
  8815. return NETDEV_TX_OK;
  8816. drop:
  8817. IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
  8818. ieee80211_txb_free(txb);
  8819. return NETDEV_TX_OK;
  8820. }
  8821. static int ipw_net_is_queue_full(struct net_device *dev, int pri)
  8822. {
  8823. struct ipw_priv *priv = ieee80211_priv(dev);
  8824. #ifdef CONFIG_IPW2200_QOS
  8825. int tx_id = ipw_get_tx_queue_number(priv, pri);
  8826. struct clx2_tx_queue *txq = &priv->txq[tx_id];
  8827. #else
  8828. struct clx2_tx_queue *txq = &priv->txq[0];
  8829. #endif /* CONFIG_IPW2200_QOS */
  8830. if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
  8831. return 1;
  8832. return 0;
  8833. }
  8834. #ifdef CONFIG_IPW2200_PROMISCUOUS
  8835. static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
  8836. struct ieee80211_txb *txb)
  8837. {
  8838. struct ieee80211_rx_stats dummystats;
  8839. struct ieee80211_hdr *hdr;
  8840. u8 n;
  8841. u16 filter = priv->prom_priv->filter;
  8842. int hdr_only = 0;
  8843. if (filter & IPW_PROM_NO_TX)
  8844. return;
  8845. memset(&dummystats, 0, sizeof(dummystats));
  8846. /* Filtering of fragment chains is done agains the first fragment */
  8847. hdr = (void *)txb->fragments[0]->data;
  8848. if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
  8849. if (filter & IPW_PROM_NO_MGMT)
  8850. return;
  8851. if (filter & IPW_PROM_MGMT_HEADER_ONLY)
  8852. hdr_only = 1;
  8853. } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
  8854. if (filter & IPW_PROM_NO_CTL)
  8855. return;
  8856. if (filter & IPW_PROM_CTL_HEADER_ONLY)
  8857. hdr_only = 1;
  8858. } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
  8859. if (filter & IPW_PROM_NO_DATA)
  8860. return;
  8861. if (filter & IPW_PROM_DATA_HEADER_ONLY)
  8862. hdr_only = 1;
  8863. }
  8864. for(n=0; n<txb->nr_frags; ++n) {
  8865. struct sk_buff *src = txb->fragments[n];
  8866. struct sk_buff *dst;
  8867. struct ieee80211_radiotap_header *rt_hdr;
  8868. int len;
  8869. if (hdr_only) {
  8870. hdr = (void *)src->data;
  8871. len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
  8872. } else
  8873. len = src->len;
  8874. dst = alloc_skb(len + sizeof(*rt_hdr), GFP_ATOMIC);
  8875. if (!dst)
  8876. continue;
  8877. rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
  8878. rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
  8879. rt_hdr->it_pad = 0;
  8880. rt_hdr->it_present = 0; /* after all, it's just an idea */
  8881. rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
  8882. *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
  8883. ieee80211chan2mhz(priv->channel));
  8884. if (priv->channel > 14) /* 802.11a */
  8885. *(__le16*)skb_put(dst, sizeof(u16)) =
  8886. cpu_to_le16(IEEE80211_CHAN_OFDM |
  8887. IEEE80211_CHAN_5GHZ);
  8888. else if (priv->ieee->mode == IEEE_B) /* 802.11b */
  8889. *(__le16*)skb_put(dst, sizeof(u16)) =
  8890. cpu_to_le16(IEEE80211_CHAN_CCK |
  8891. IEEE80211_CHAN_2GHZ);
  8892. else /* 802.11g */
  8893. *(__le16*)skb_put(dst, sizeof(u16)) =
  8894. cpu_to_le16(IEEE80211_CHAN_OFDM |
  8895. IEEE80211_CHAN_2GHZ);
  8896. rt_hdr->it_len = cpu_to_le16(dst->len);
  8897. skb_copy_from_linear_data(src, skb_put(dst, len), len);
  8898. if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
  8899. dev_kfree_skb_any(dst);
  8900. }
  8901. }
  8902. #endif
  8903. static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
  8904. struct net_device *dev, int pri)
  8905. {
  8906. struct ipw_priv *priv = ieee80211_priv(dev);
  8907. unsigned long flags;
  8908. int ret;
  8909. IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
  8910. spin_lock_irqsave(&priv->lock, flags);
  8911. #ifdef CONFIG_IPW2200_PROMISCUOUS
  8912. if (rtap_iface && netif_running(priv->prom_net_dev))
  8913. ipw_handle_promiscuous_tx(priv, txb);
  8914. #endif
  8915. ret = ipw_tx_skb(priv, txb, pri);
  8916. if (ret == NETDEV_TX_OK)
  8917. __ipw_led_activity_on(priv);
  8918. spin_unlock_irqrestore(&priv->lock, flags);
  8919. return ret;
  8920. }
  8921. static void ipw_net_set_multicast_list(struct net_device *dev)
  8922. {
  8923. }
  8924. static int ipw_net_set_mac_address(struct net_device *dev, void *p)
  8925. {
  8926. struct ipw_priv *priv = ieee80211_priv(dev);
  8927. struct sockaddr *addr = p;
  8928. if (!is_valid_ether_addr(addr->sa_data))
  8929. return -EADDRNOTAVAIL;
  8930. mutex_lock(&priv->mutex);
  8931. priv->config |= CFG_CUSTOM_MAC;
  8932. memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
  8933. printk(KERN_INFO "%s: Setting MAC to %pM\n",
  8934. priv->net_dev->name, priv->mac_addr);
  8935. queue_work(priv->workqueue, &priv->adapter_restart);
  8936. mutex_unlock(&priv->mutex);
  8937. return 0;
  8938. }
  8939. static void ipw_ethtool_get_drvinfo(struct net_device *dev,
  8940. struct ethtool_drvinfo *info)
  8941. {
  8942. struct ipw_priv *p = ieee80211_priv(dev);
  8943. char vers[64];
  8944. char date[32];
  8945. u32 len;
  8946. strcpy(info->driver, DRV_NAME);
  8947. strcpy(info->version, DRV_VERSION);
  8948. len = sizeof(vers);
  8949. ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
  8950. len = sizeof(date);
  8951. ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
  8952. snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
  8953. vers, date);
  8954. strcpy(info->bus_info, pci_name(p->pci_dev));
  8955. info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
  8956. }
  8957. static u32 ipw_ethtool_get_link(struct net_device *dev)
  8958. {
  8959. struct ipw_priv *priv = ieee80211_priv(dev);
  8960. return (priv->status & STATUS_ASSOCIATED) != 0;
  8961. }
  8962. static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
  8963. {
  8964. return IPW_EEPROM_IMAGE_SIZE;
  8965. }
  8966. static int ipw_ethtool_get_eeprom(struct net_device *dev,
  8967. struct ethtool_eeprom *eeprom, u8 * bytes)
  8968. {
  8969. struct ipw_priv *p = ieee80211_priv(dev);
  8970. if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
  8971. return -EINVAL;
  8972. mutex_lock(&p->mutex);
  8973. memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
  8974. mutex_unlock(&p->mutex);
  8975. return 0;
  8976. }
  8977. static int ipw_ethtool_set_eeprom(struct net_device *dev,
  8978. struct ethtool_eeprom *eeprom, u8 * bytes)
  8979. {
  8980. struct ipw_priv *p = ieee80211_priv(dev);
  8981. int i;
  8982. if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
  8983. return -EINVAL;
  8984. mutex_lock(&p->mutex);
  8985. memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
  8986. for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
  8987. ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
  8988. mutex_unlock(&p->mutex);
  8989. return 0;
  8990. }
  8991. static const struct ethtool_ops ipw_ethtool_ops = {
  8992. .get_link = ipw_ethtool_get_link,
  8993. .get_drvinfo = ipw_ethtool_get_drvinfo,
  8994. .get_eeprom_len = ipw_ethtool_get_eeprom_len,
  8995. .get_eeprom = ipw_ethtool_get_eeprom,
  8996. .set_eeprom = ipw_ethtool_set_eeprom,
  8997. };
  8998. static irqreturn_t ipw_isr(int irq, void *data)
  8999. {
  9000. struct ipw_priv *priv = data;
  9001. u32 inta, inta_mask;
  9002. if (!priv)
  9003. return IRQ_NONE;
  9004. spin_lock(&priv->irq_lock);
  9005. if (!(priv->status & STATUS_INT_ENABLED)) {
  9006. /* IRQ is disabled */
  9007. goto none;
  9008. }
  9009. inta = ipw_read32(priv, IPW_INTA_RW);
  9010. inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
  9011. if (inta == 0xFFFFFFFF) {
  9012. /* Hardware disappeared */
  9013. IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
  9014. goto none;
  9015. }
  9016. if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
  9017. /* Shared interrupt */
  9018. goto none;
  9019. }
  9020. /* tell the device to stop sending interrupts */
  9021. __ipw_disable_interrupts(priv);
  9022. /* ack current interrupts */
  9023. inta &= (IPW_INTA_MASK_ALL & inta_mask);
  9024. ipw_write32(priv, IPW_INTA_RW, inta);
  9025. /* Cache INTA value for our tasklet */
  9026. priv->isr_inta = inta;
  9027. tasklet_schedule(&priv->irq_tasklet);
  9028. spin_unlock(&priv->irq_lock);
  9029. return IRQ_HANDLED;
  9030. none:
  9031. spin_unlock(&priv->irq_lock);
  9032. return IRQ_NONE;
  9033. }
  9034. static void ipw_rf_kill(void *adapter)
  9035. {
  9036. struct ipw_priv *priv = adapter;
  9037. unsigned long flags;
  9038. spin_lock_irqsave(&priv->lock, flags);
  9039. if (rf_kill_active(priv)) {
  9040. IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
  9041. if (priv->workqueue)
  9042. queue_delayed_work(priv->workqueue,
  9043. &priv->rf_kill, 2 * HZ);
  9044. goto exit_unlock;
  9045. }
  9046. /* RF Kill is now disabled, so bring the device back up */
  9047. if (!(priv->status & STATUS_RF_KILL_MASK)) {
  9048. IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
  9049. "device\n");
  9050. /* we can not do an adapter restart while inside an irq lock */
  9051. queue_work(priv->workqueue, &priv->adapter_restart);
  9052. } else
  9053. IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
  9054. "enabled\n");
  9055. exit_unlock:
  9056. spin_unlock_irqrestore(&priv->lock, flags);
  9057. }
  9058. static void ipw_bg_rf_kill(struct work_struct *work)
  9059. {
  9060. struct ipw_priv *priv =
  9061. container_of(work, struct ipw_priv, rf_kill.work);
  9062. mutex_lock(&priv->mutex);
  9063. ipw_rf_kill(priv);
  9064. mutex_unlock(&priv->mutex);
  9065. }
  9066. static void ipw_link_up(struct ipw_priv *priv)
  9067. {
  9068. priv->last_seq_num = -1;
  9069. priv->last_frag_num = -1;
  9070. priv->last_packet_time = 0;
  9071. netif_carrier_on(priv->net_dev);
  9072. cancel_delayed_work(&priv->request_scan);
  9073. cancel_delayed_work(&priv->request_direct_scan);
  9074. cancel_delayed_work(&priv->request_passive_scan);
  9075. cancel_delayed_work(&priv->scan_event);
  9076. ipw_reset_stats(priv);
  9077. /* Ensure the rate is updated immediately */
  9078. priv->last_rate = ipw_get_current_rate(priv);
  9079. ipw_gather_stats(priv);
  9080. ipw_led_link_up(priv);
  9081. notify_wx_assoc_event(priv);
  9082. if (priv->config & CFG_BACKGROUND_SCAN)
  9083. queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
  9084. }
  9085. static void ipw_bg_link_up(struct work_struct *work)
  9086. {
  9087. struct ipw_priv *priv =
  9088. container_of(work, struct ipw_priv, link_up);
  9089. mutex_lock(&priv->mutex);
  9090. ipw_link_up(priv);
  9091. mutex_unlock(&priv->mutex);
  9092. }
  9093. static void ipw_link_down(struct ipw_priv *priv)
  9094. {
  9095. ipw_led_link_down(priv);
  9096. netif_carrier_off(priv->net_dev);
  9097. notify_wx_assoc_event(priv);
  9098. /* Cancel any queued work ... */
  9099. cancel_delayed_work(&priv->request_scan);
  9100. cancel_delayed_work(&priv->request_direct_scan);
  9101. cancel_delayed_work(&priv->request_passive_scan);
  9102. cancel_delayed_work(&priv->adhoc_check);
  9103. cancel_delayed_work(&priv->gather_stats);
  9104. ipw_reset_stats(priv);
  9105. if (!(priv->status & STATUS_EXIT_PENDING)) {
  9106. /* Queue up another scan... */
  9107. queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
  9108. } else
  9109. cancel_delayed_work(&priv->scan_event);
  9110. }
  9111. static void ipw_bg_link_down(struct work_struct *work)
  9112. {
  9113. struct ipw_priv *priv =
  9114. container_of(work, struct ipw_priv, link_down);
  9115. mutex_lock(&priv->mutex);
  9116. ipw_link_down(priv);
  9117. mutex_unlock(&priv->mutex);
  9118. }
  9119. static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
  9120. {
  9121. int ret = 0;
  9122. priv->workqueue = create_workqueue(DRV_NAME);
  9123. init_waitqueue_head(&priv->wait_command_queue);
  9124. init_waitqueue_head(&priv->wait_state);
  9125. INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
  9126. INIT_WORK(&priv->associate, ipw_bg_associate);
  9127. INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
  9128. INIT_WORK(&priv->system_config, ipw_system_config);
  9129. INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
  9130. INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
  9131. INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
  9132. INIT_WORK(&priv->up, ipw_bg_up);
  9133. INIT_WORK(&priv->down, ipw_bg_down);
  9134. INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
  9135. INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
  9136. INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
  9137. INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
  9138. INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
  9139. INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
  9140. INIT_WORK(&priv->roam, ipw_bg_roam);
  9141. INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
  9142. INIT_WORK(&priv->link_up, ipw_bg_link_up);
  9143. INIT_WORK(&priv->link_down, ipw_bg_link_down);
  9144. INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
  9145. INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
  9146. INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
  9147. INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
  9148. #ifdef CONFIG_IPW2200_QOS
  9149. INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
  9150. #endif /* CONFIG_IPW2200_QOS */
  9151. tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
  9152. ipw_irq_tasklet, (unsigned long)priv);
  9153. return ret;
  9154. }
  9155. static void shim__set_security(struct net_device *dev,
  9156. struct ieee80211_security *sec)
  9157. {
  9158. struct ipw_priv *priv = ieee80211_priv(dev);
  9159. int i;
  9160. for (i = 0; i < 4; i++) {
  9161. if (sec->flags & (1 << i)) {
  9162. priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
  9163. priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
  9164. if (sec->key_sizes[i] == 0)
  9165. priv->ieee->sec.flags &= ~(1 << i);
  9166. else {
  9167. memcpy(priv->ieee->sec.keys[i], sec->keys[i],
  9168. sec->key_sizes[i]);
  9169. priv->ieee->sec.flags |= (1 << i);
  9170. }
  9171. priv->status |= STATUS_SECURITY_UPDATED;
  9172. } else if (sec->level != SEC_LEVEL_1)
  9173. priv->ieee->sec.flags &= ~(1 << i);
  9174. }
  9175. if (sec->flags & SEC_ACTIVE_KEY) {
  9176. if (sec->active_key <= 3) {
  9177. priv->ieee->sec.active_key = sec->active_key;
  9178. priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
  9179. } else
  9180. priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
  9181. priv->status |= STATUS_SECURITY_UPDATED;
  9182. } else
  9183. priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
  9184. if ((sec->flags & SEC_AUTH_MODE) &&
  9185. (priv->ieee->sec.auth_mode != sec->auth_mode)) {
  9186. priv->ieee->sec.auth_mode = sec->auth_mode;
  9187. priv->ieee->sec.flags |= SEC_AUTH_MODE;
  9188. if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
  9189. priv->capability |= CAP_SHARED_KEY;
  9190. else
  9191. priv->capability &= ~CAP_SHARED_KEY;
  9192. priv->status |= STATUS_SECURITY_UPDATED;
  9193. }
  9194. if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
  9195. priv->ieee->sec.flags |= SEC_ENABLED;
  9196. priv->ieee->sec.enabled = sec->enabled;
  9197. priv->status |= STATUS_SECURITY_UPDATED;
  9198. if (sec->enabled)
  9199. priv->capability |= CAP_PRIVACY_ON;
  9200. else
  9201. priv->capability &= ~CAP_PRIVACY_ON;
  9202. }
  9203. if (sec->flags & SEC_ENCRYPT)
  9204. priv->ieee->sec.encrypt = sec->encrypt;
  9205. if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
  9206. priv->ieee->sec.level = sec->level;
  9207. priv->ieee->sec.flags |= SEC_LEVEL;
  9208. priv->status |= STATUS_SECURITY_UPDATED;
  9209. }
  9210. if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
  9211. ipw_set_hwcrypto_keys(priv);
  9212. /* To match current functionality of ipw2100 (which works well w/
  9213. * various supplicants, we don't force a disassociate if the
  9214. * privacy capability changes ... */
  9215. #if 0
  9216. if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
  9217. (((priv->assoc_request.capability &
  9218. cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
  9219. (!(priv->assoc_request.capability &
  9220. cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
  9221. IPW_DEBUG_ASSOC("Disassociating due to capability "
  9222. "change.\n");
  9223. ipw_disassociate(priv);
  9224. }
  9225. #endif
  9226. }
  9227. static int init_supported_rates(struct ipw_priv *priv,
  9228. struct ipw_supported_rates *rates)
  9229. {
  9230. /* TODO: Mask out rates based on priv->rates_mask */
  9231. memset(rates, 0, sizeof(*rates));
  9232. /* configure supported rates */
  9233. switch (priv->ieee->freq_band) {
  9234. case IEEE80211_52GHZ_BAND:
  9235. rates->ieee_mode = IPW_A_MODE;
  9236. rates->purpose = IPW_RATE_CAPABILITIES;
  9237. ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
  9238. IEEE80211_OFDM_DEFAULT_RATES_MASK);
  9239. break;
  9240. default: /* Mixed or 2.4Ghz */
  9241. rates->ieee_mode = IPW_G_MODE;
  9242. rates->purpose = IPW_RATE_CAPABILITIES;
  9243. ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
  9244. IEEE80211_CCK_DEFAULT_RATES_MASK);
  9245. if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
  9246. ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
  9247. IEEE80211_OFDM_DEFAULT_RATES_MASK);
  9248. }
  9249. break;
  9250. }
  9251. return 0;
  9252. }
  9253. static int ipw_config(struct ipw_priv *priv)
  9254. {
  9255. /* This is only called from ipw_up, which resets/reloads the firmware
  9256. so, we don't need to first disable the card before we configure
  9257. it */
  9258. if (ipw_set_tx_power(priv))
  9259. goto error;
  9260. /* initialize adapter address */
  9261. if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
  9262. goto error;
  9263. /* set basic system config settings */
  9264. init_sys_config(&priv->sys_config);
  9265. /* Support Bluetooth if we have BT h/w on board, and user wants to.
  9266. * Does not support BT priority yet (don't abort or defer our Tx) */
  9267. if (bt_coexist) {
  9268. unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
  9269. if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
  9270. priv->sys_config.bt_coexistence
  9271. |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
  9272. if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
  9273. priv->sys_config.bt_coexistence
  9274. |= CFG_BT_COEXISTENCE_OOB;
  9275. }
  9276. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9277. if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
  9278. priv->sys_config.accept_all_data_frames = 1;
  9279. priv->sys_config.accept_non_directed_frames = 1;
  9280. priv->sys_config.accept_all_mgmt_bcpr = 1;
  9281. priv->sys_config.accept_all_mgmt_frames = 1;
  9282. }
  9283. #endif
  9284. if (priv->ieee->iw_mode == IW_MODE_ADHOC)
  9285. priv->sys_config.answer_broadcast_ssid_probe = 1;
  9286. else
  9287. priv->sys_config.answer_broadcast_ssid_probe = 0;
  9288. if (ipw_send_system_config(priv))
  9289. goto error;
  9290. init_supported_rates(priv, &priv->rates);
  9291. if (ipw_send_supported_rates(priv, &priv->rates))
  9292. goto error;
  9293. /* Set request-to-send threshold */
  9294. if (priv->rts_threshold) {
  9295. if (ipw_send_rts_threshold(priv, priv->rts_threshold))
  9296. goto error;
  9297. }
  9298. #ifdef CONFIG_IPW2200_QOS
  9299. IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
  9300. ipw_qos_activate(priv, NULL);
  9301. #endif /* CONFIG_IPW2200_QOS */
  9302. if (ipw_set_random_seed(priv))
  9303. goto error;
  9304. /* final state transition to the RUN state */
  9305. if (ipw_send_host_complete(priv))
  9306. goto error;
  9307. priv->status |= STATUS_INIT;
  9308. ipw_led_init(priv);
  9309. ipw_led_radio_on(priv);
  9310. priv->notif_missed_beacons = 0;
  9311. /* Set hardware WEP key if it is configured. */
  9312. if ((priv->capability & CAP_PRIVACY_ON) &&
  9313. (priv->ieee->sec.level == SEC_LEVEL_1) &&
  9314. !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
  9315. ipw_set_hwcrypto_keys(priv);
  9316. return 0;
  9317. error:
  9318. return -EIO;
  9319. }
  9320. /*
  9321. * NOTE:
  9322. *
  9323. * These tables have been tested in conjunction with the
  9324. * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
  9325. *
  9326. * Altering this values, using it on other hardware, or in geographies
  9327. * not intended for resale of the above mentioned Intel adapters has
  9328. * not been tested.
  9329. *
  9330. * Remember to update the table in README.ipw2200 when changing this
  9331. * table.
  9332. *
  9333. */
  9334. static const struct ieee80211_geo ipw_geos[] = {
  9335. { /* Restricted */
  9336. "---",
  9337. .bg_channels = 11,
  9338. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9339. {2427, 4}, {2432, 5}, {2437, 6},
  9340. {2442, 7}, {2447, 8}, {2452, 9},
  9341. {2457, 10}, {2462, 11}},
  9342. },
  9343. { /* Custom US/Canada */
  9344. "ZZF",
  9345. .bg_channels = 11,
  9346. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9347. {2427, 4}, {2432, 5}, {2437, 6},
  9348. {2442, 7}, {2447, 8}, {2452, 9},
  9349. {2457, 10}, {2462, 11}},
  9350. .a_channels = 8,
  9351. .a = {{5180, 36},
  9352. {5200, 40},
  9353. {5220, 44},
  9354. {5240, 48},
  9355. {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
  9356. {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
  9357. {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
  9358. {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
  9359. },
  9360. { /* Rest of World */
  9361. "ZZD",
  9362. .bg_channels = 13,
  9363. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9364. {2427, 4}, {2432, 5}, {2437, 6},
  9365. {2442, 7}, {2447, 8}, {2452, 9},
  9366. {2457, 10}, {2462, 11}, {2467, 12},
  9367. {2472, 13}},
  9368. },
  9369. { /* Custom USA & Europe & High */
  9370. "ZZA",
  9371. .bg_channels = 11,
  9372. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9373. {2427, 4}, {2432, 5}, {2437, 6},
  9374. {2442, 7}, {2447, 8}, {2452, 9},
  9375. {2457, 10}, {2462, 11}},
  9376. .a_channels = 13,
  9377. .a = {{5180, 36},
  9378. {5200, 40},
  9379. {5220, 44},
  9380. {5240, 48},
  9381. {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
  9382. {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
  9383. {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
  9384. {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
  9385. {5745, 149},
  9386. {5765, 153},
  9387. {5785, 157},
  9388. {5805, 161},
  9389. {5825, 165}},
  9390. },
  9391. { /* Custom NA & Europe */
  9392. "ZZB",
  9393. .bg_channels = 11,
  9394. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9395. {2427, 4}, {2432, 5}, {2437, 6},
  9396. {2442, 7}, {2447, 8}, {2452, 9},
  9397. {2457, 10}, {2462, 11}},
  9398. .a_channels = 13,
  9399. .a = {{5180, 36},
  9400. {5200, 40},
  9401. {5220, 44},
  9402. {5240, 48},
  9403. {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
  9404. {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
  9405. {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
  9406. {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
  9407. {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
  9408. {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
  9409. {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
  9410. {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
  9411. {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
  9412. },
  9413. { /* Custom Japan */
  9414. "ZZC",
  9415. .bg_channels = 11,
  9416. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9417. {2427, 4}, {2432, 5}, {2437, 6},
  9418. {2442, 7}, {2447, 8}, {2452, 9},
  9419. {2457, 10}, {2462, 11}},
  9420. .a_channels = 4,
  9421. .a = {{5170, 34}, {5190, 38},
  9422. {5210, 42}, {5230, 46}},
  9423. },
  9424. { /* Custom */
  9425. "ZZM",
  9426. .bg_channels = 11,
  9427. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9428. {2427, 4}, {2432, 5}, {2437, 6},
  9429. {2442, 7}, {2447, 8}, {2452, 9},
  9430. {2457, 10}, {2462, 11}},
  9431. },
  9432. { /* Europe */
  9433. "ZZE",
  9434. .bg_channels = 13,
  9435. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9436. {2427, 4}, {2432, 5}, {2437, 6},
  9437. {2442, 7}, {2447, 8}, {2452, 9},
  9438. {2457, 10}, {2462, 11}, {2467, 12},
  9439. {2472, 13}},
  9440. .a_channels = 19,
  9441. .a = {{5180, 36},
  9442. {5200, 40},
  9443. {5220, 44},
  9444. {5240, 48},
  9445. {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
  9446. {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
  9447. {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
  9448. {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
  9449. {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
  9450. {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
  9451. {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
  9452. {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
  9453. {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
  9454. {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
  9455. {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
  9456. {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
  9457. {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
  9458. {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
  9459. {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
  9460. },
  9461. { /* Custom Japan */
  9462. "ZZJ",
  9463. .bg_channels = 14,
  9464. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9465. {2427, 4}, {2432, 5}, {2437, 6},
  9466. {2442, 7}, {2447, 8}, {2452, 9},
  9467. {2457, 10}, {2462, 11}, {2467, 12},
  9468. {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
  9469. .a_channels = 4,
  9470. .a = {{5170, 34}, {5190, 38},
  9471. {5210, 42}, {5230, 46}},
  9472. },
  9473. { /* Rest of World */
  9474. "ZZR",
  9475. .bg_channels = 14,
  9476. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9477. {2427, 4}, {2432, 5}, {2437, 6},
  9478. {2442, 7}, {2447, 8}, {2452, 9},
  9479. {2457, 10}, {2462, 11}, {2467, 12},
  9480. {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
  9481. IEEE80211_CH_PASSIVE_ONLY}},
  9482. },
  9483. { /* High Band */
  9484. "ZZH",
  9485. .bg_channels = 13,
  9486. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9487. {2427, 4}, {2432, 5}, {2437, 6},
  9488. {2442, 7}, {2447, 8}, {2452, 9},
  9489. {2457, 10}, {2462, 11},
  9490. {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
  9491. {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
  9492. .a_channels = 4,
  9493. .a = {{5745, 149}, {5765, 153},
  9494. {5785, 157}, {5805, 161}},
  9495. },
  9496. { /* Custom Europe */
  9497. "ZZG",
  9498. .bg_channels = 13,
  9499. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9500. {2427, 4}, {2432, 5}, {2437, 6},
  9501. {2442, 7}, {2447, 8}, {2452, 9},
  9502. {2457, 10}, {2462, 11},
  9503. {2467, 12}, {2472, 13}},
  9504. .a_channels = 4,
  9505. .a = {{5180, 36}, {5200, 40},
  9506. {5220, 44}, {5240, 48}},
  9507. },
  9508. { /* Europe */
  9509. "ZZK",
  9510. .bg_channels = 13,
  9511. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9512. {2427, 4}, {2432, 5}, {2437, 6},
  9513. {2442, 7}, {2447, 8}, {2452, 9},
  9514. {2457, 10}, {2462, 11},
  9515. {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
  9516. {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
  9517. .a_channels = 24,
  9518. .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
  9519. {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
  9520. {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
  9521. {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
  9522. {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
  9523. {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
  9524. {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
  9525. {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
  9526. {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
  9527. {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
  9528. {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
  9529. {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
  9530. {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
  9531. {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
  9532. {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
  9533. {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
  9534. {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
  9535. {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
  9536. {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
  9537. {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
  9538. {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
  9539. {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
  9540. {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
  9541. {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
  9542. },
  9543. { /* Europe */
  9544. "ZZL",
  9545. .bg_channels = 11,
  9546. .bg = {{2412, 1}, {2417, 2}, {2422, 3},
  9547. {2427, 4}, {2432, 5}, {2437, 6},
  9548. {2442, 7}, {2447, 8}, {2452, 9},
  9549. {2457, 10}, {2462, 11}},
  9550. .a_channels = 13,
  9551. .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
  9552. {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
  9553. {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
  9554. {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
  9555. {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
  9556. {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
  9557. {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
  9558. {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
  9559. {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
  9560. {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
  9561. {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
  9562. {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
  9563. {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
  9564. }
  9565. };
  9566. #define MAX_HW_RESTARTS 5
  9567. static int ipw_up(struct ipw_priv *priv)
  9568. {
  9569. int rc, i, j;
  9570. /* Age scan list entries found before suspend */
  9571. if (priv->suspend_time) {
  9572. ieee80211_networks_age(priv->ieee, priv->suspend_time);
  9573. priv->suspend_time = 0;
  9574. }
  9575. if (priv->status & STATUS_EXIT_PENDING)
  9576. return -EIO;
  9577. if (cmdlog && !priv->cmdlog) {
  9578. priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
  9579. GFP_KERNEL);
  9580. if (priv->cmdlog == NULL) {
  9581. IPW_ERROR("Error allocating %d command log entries.\n",
  9582. cmdlog);
  9583. return -ENOMEM;
  9584. } else {
  9585. priv->cmdlog_len = cmdlog;
  9586. }
  9587. }
  9588. for (i = 0; i < MAX_HW_RESTARTS; i++) {
  9589. /* Load the microcode, firmware, and eeprom.
  9590. * Also start the clocks. */
  9591. rc = ipw_load(priv);
  9592. if (rc) {
  9593. IPW_ERROR("Unable to load firmware: %d\n", rc);
  9594. return rc;
  9595. }
  9596. ipw_init_ordinals(priv);
  9597. if (!(priv->config & CFG_CUSTOM_MAC))
  9598. eeprom_parse_mac(priv, priv->mac_addr);
  9599. memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
  9600. for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
  9601. if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
  9602. ipw_geos[j].name, 3))
  9603. break;
  9604. }
  9605. if (j == ARRAY_SIZE(ipw_geos)) {
  9606. IPW_WARNING("SKU [%c%c%c] not recognized.\n",
  9607. priv->eeprom[EEPROM_COUNTRY_CODE + 0],
  9608. priv->eeprom[EEPROM_COUNTRY_CODE + 1],
  9609. priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
  9610. j = 0;
  9611. }
  9612. if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
  9613. IPW_WARNING("Could not set geography.");
  9614. return 0;
  9615. }
  9616. if (priv->status & STATUS_RF_KILL_SW) {
  9617. IPW_WARNING("Radio disabled by module parameter.\n");
  9618. return 0;
  9619. } else if (rf_kill_active(priv)) {
  9620. IPW_WARNING("Radio Frequency Kill Switch is On:\n"
  9621. "Kill switch must be turned off for "
  9622. "wireless networking to work.\n");
  9623. queue_delayed_work(priv->workqueue, &priv->rf_kill,
  9624. 2 * HZ);
  9625. return 0;
  9626. }
  9627. rc = ipw_config(priv);
  9628. if (!rc) {
  9629. IPW_DEBUG_INFO("Configured device on count %i\n", i);
  9630. /* If configure to try and auto-associate, kick
  9631. * off a scan. */
  9632. queue_delayed_work(priv->workqueue,
  9633. &priv->request_scan, 0);
  9634. return 0;
  9635. }
  9636. IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
  9637. IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
  9638. i, MAX_HW_RESTARTS);
  9639. /* We had an error bringing up the hardware, so take it
  9640. * all the way back down so we can try again */
  9641. ipw_down(priv);
  9642. }
  9643. /* tried to restart and config the device for as long as our
  9644. * patience could withstand */
  9645. IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
  9646. return -EIO;
  9647. }
  9648. static void ipw_bg_up(struct work_struct *work)
  9649. {
  9650. struct ipw_priv *priv =
  9651. container_of(work, struct ipw_priv, up);
  9652. mutex_lock(&priv->mutex);
  9653. ipw_up(priv);
  9654. mutex_unlock(&priv->mutex);
  9655. }
  9656. static void ipw_deinit(struct ipw_priv *priv)
  9657. {
  9658. int i;
  9659. if (priv->status & STATUS_SCANNING) {
  9660. IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
  9661. ipw_abort_scan(priv);
  9662. }
  9663. if (priv->status & STATUS_ASSOCIATED) {
  9664. IPW_DEBUG_INFO("Disassociating during shutdown.\n");
  9665. ipw_disassociate(priv);
  9666. }
  9667. ipw_led_shutdown(priv);
  9668. /* Wait up to 1s for status to change to not scanning and not
  9669. * associated (disassociation can take a while for a ful 802.11
  9670. * exchange */
  9671. for (i = 1000; i && (priv->status &
  9672. (STATUS_DISASSOCIATING |
  9673. STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
  9674. udelay(10);
  9675. if (priv->status & (STATUS_DISASSOCIATING |
  9676. STATUS_ASSOCIATED | STATUS_SCANNING))
  9677. IPW_DEBUG_INFO("Still associated or scanning...\n");
  9678. else
  9679. IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
  9680. /* Attempt to disable the card */
  9681. ipw_send_card_disable(priv, 0);
  9682. priv->status &= ~STATUS_INIT;
  9683. }
  9684. static void ipw_down(struct ipw_priv *priv)
  9685. {
  9686. int exit_pending = priv->status & STATUS_EXIT_PENDING;
  9687. priv->status |= STATUS_EXIT_PENDING;
  9688. if (ipw_is_init(priv))
  9689. ipw_deinit(priv);
  9690. /* Wipe out the EXIT_PENDING status bit if we are not actually
  9691. * exiting the module */
  9692. if (!exit_pending)
  9693. priv->status &= ~STATUS_EXIT_PENDING;
  9694. /* tell the device to stop sending interrupts */
  9695. ipw_disable_interrupts(priv);
  9696. /* Clear all bits but the RF Kill */
  9697. priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
  9698. netif_carrier_off(priv->net_dev);
  9699. ipw_stop_nic(priv);
  9700. ipw_led_radio_off(priv);
  9701. }
  9702. static void ipw_bg_down(struct work_struct *work)
  9703. {
  9704. struct ipw_priv *priv =
  9705. container_of(work, struct ipw_priv, down);
  9706. mutex_lock(&priv->mutex);
  9707. ipw_down(priv);
  9708. mutex_unlock(&priv->mutex);
  9709. }
  9710. /* Called by register_netdev() */
  9711. static int ipw_net_init(struct net_device *dev)
  9712. {
  9713. struct ipw_priv *priv = ieee80211_priv(dev);
  9714. mutex_lock(&priv->mutex);
  9715. if (ipw_up(priv)) {
  9716. mutex_unlock(&priv->mutex);
  9717. return -EIO;
  9718. }
  9719. mutex_unlock(&priv->mutex);
  9720. return 0;
  9721. }
  9722. /* PCI driver stuff */
  9723. static struct pci_device_id card_ids[] = {
  9724. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
  9725. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
  9726. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
  9727. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
  9728. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
  9729. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
  9730. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
  9731. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
  9732. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
  9733. {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
  9734. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
  9735. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
  9736. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
  9737. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
  9738. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
  9739. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
  9740. {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
  9741. {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  9742. {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
  9743. {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
  9744. {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
  9745. {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
  9746. /* required last entry */
  9747. {0,}
  9748. };
  9749. MODULE_DEVICE_TABLE(pci, card_ids);
  9750. static struct attribute *ipw_sysfs_entries[] = {
  9751. &dev_attr_rf_kill.attr,
  9752. &dev_attr_direct_dword.attr,
  9753. &dev_attr_indirect_byte.attr,
  9754. &dev_attr_indirect_dword.attr,
  9755. &dev_attr_mem_gpio_reg.attr,
  9756. &dev_attr_command_event_reg.attr,
  9757. &dev_attr_nic_type.attr,
  9758. &dev_attr_status.attr,
  9759. &dev_attr_cfg.attr,
  9760. &dev_attr_error.attr,
  9761. &dev_attr_event_log.attr,
  9762. &dev_attr_cmd_log.attr,
  9763. &dev_attr_eeprom_delay.attr,
  9764. &dev_attr_ucode_version.attr,
  9765. &dev_attr_rtc.attr,
  9766. &dev_attr_scan_age.attr,
  9767. &dev_attr_led.attr,
  9768. &dev_attr_speed_scan.attr,
  9769. &dev_attr_net_stats.attr,
  9770. &dev_attr_channels.attr,
  9771. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9772. &dev_attr_rtap_iface.attr,
  9773. &dev_attr_rtap_filter.attr,
  9774. #endif
  9775. NULL
  9776. };
  9777. static struct attribute_group ipw_attribute_group = {
  9778. .name = NULL, /* put in device directory */
  9779. .attrs = ipw_sysfs_entries,
  9780. };
  9781. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9782. static int ipw_prom_open(struct net_device *dev)
  9783. {
  9784. struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
  9785. struct ipw_priv *priv = prom_priv->priv;
  9786. IPW_DEBUG_INFO("prom dev->open\n");
  9787. netif_carrier_off(dev);
  9788. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  9789. priv->sys_config.accept_all_data_frames = 1;
  9790. priv->sys_config.accept_non_directed_frames = 1;
  9791. priv->sys_config.accept_all_mgmt_bcpr = 1;
  9792. priv->sys_config.accept_all_mgmt_frames = 1;
  9793. ipw_send_system_config(priv);
  9794. }
  9795. return 0;
  9796. }
  9797. static int ipw_prom_stop(struct net_device *dev)
  9798. {
  9799. struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
  9800. struct ipw_priv *priv = prom_priv->priv;
  9801. IPW_DEBUG_INFO("prom dev->stop\n");
  9802. if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
  9803. priv->sys_config.accept_all_data_frames = 0;
  9804. priv->sys_config.accept_non_directed_frames = 0;
  9805. priv->sys_config.accept_all_mgmt_bcpr = 0;
  9806. priv->sys_config.accept_all_mgmt_frames = 0;
  9807. ipw_send_system_config(priv);
  9808. }
  9809. return 0;
  9810. }
  9811. static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
  9812. {
  9813. IPW_DEBUG_INFO("prom dev->xmit\n");
  9814. return -EOPNOTSUPP;
  9815. }
  9816. static const struct net_device_ops ipw_prom_netdev_ops = {
  9817. .ndo_open = ipw_prom_open,
  9818. .ndo_stop = ipw_prom_stop,
  9819. .ndo_start_xmit = ipw_prom_hard_start_xmit,
  9820. .ndo_change_mtu = ieee80211_change_mtu,
  9821. .ndo_set_mac_address = eth_mac_addr,
  9822. .ndo_validate_addr = eth_validate_addr,
  9823. };
  9824. static int ipw_prom_alloc(struct ipw_priv *priv)
  9825. {
  9826. int rc = 0;
  9827. if (priv->prom_net_dev)
  9828. return -EPERM;
  9829. priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
  9830. if (priv->prom_net_dev == NULL)
  9831. return -ENOMEM;
  9832. priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
  9833. priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
  9834. priv->prom_priv->priv = priv;
  9835. strcpy(priv->prom_net_dev->name, "rtap%d");
  9836. memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
  9837. priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
  9838. priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
  9839. priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
  9840. SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
  9841. rc = register_netdev(priv->prom_net_dev);
  9842. if (rc) {
  9843. free_ieee80211(priv->prom_net_dev);
  9844. priv->prom_net_dev = NULL;
  9845. return rc;
  9846. }
  9847. return 0;
  9848. }
  9849. static void ipw_prom_free(struct ipw_priv *priv)
  9850. {
  9851. if (!priv->prom_net_dev)
  9852. return;
  9853. unregister_netdev(priv->prom_net_dev);
  9854. free_ieee80211(priv->prom_net_dev);
  9855. priv->prom_net_dev = NULL;
  9856. }
  9857. #endif
  9858. static const struct net_device_ops ipw_netdev_ops = {
  9859. .ndo_init = ipw_net_init,
  9860. .ndo_open = ipw_net_open,
  9861. .ndo_stop = ipw_net_stop,
  9862. .ndo_set_multicast_list = ipw_net_set_multicast_list,
  9863. .ndo_set_mac_address = ipw_net_set_mac_address,
  9864. .ndo_start_xmit = ieee80211_xmit,
  9865. .ndo_change_mtu = ieee80211_change_mtu,
  9866. .ndo_validate_addr = eth_validate_addr,
  9867. };
  9868. static int __devinit ipw_pci_probe(struct pci_dev *pdev,
  9869. const struct pci_device_id *ent)
  9870. {
  9871. int err = 0;
  9872. struct net_device *net_dev;
  9873. void __iomem *base;
  9874. u32 length, val;
  9875. struct ipw_priv *priv;
  9876. int i;
  9877. net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
  9878. if (net_dev == NULL) {
  9879. err = -ENOMEM;
  9880. goto out;
  9881. }
  9882. priv = ieee80211_priv(net_dev);
  9883. priv->ieee = netdev_priv(net_dev);
  9884. priv->net_dev = net_dev;
  9885. priv->pci_dev = pdev;
  9886. ipw_debug_level = debug;
  9887. spin_lock_init(&priv->irq_lock);
  9888. spin_lock_init(&priv->lock);
  9889. for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
  9890. INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
  9891. mutex_init(&priv->mutex);
  9892. if (pci_enable_device(pdev)) {
  9893. err = -ENODEV;
  9894. goto out_free_ieee80211;
  9895. }
  9896. pci_set_master(pdev);
  9897. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  9898. if (!err)
  9899. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  9900. if (err) {
  9901. printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
  9902. goto out_pci_disable_device;
  9903. }
  9904. pci_set_drvdata(pdev, priv);
  9905. err = pci_request_regions(pdev, DRV_NAME);
  9906. if (err)
  9907. goto out_pci_disable_device;
  9908. /* We disable the RETRY_TIMEOUT register (0x41) to keep
  9909. * PCI Tx retries from interfering with C3 CPU state */
  9910. pci_read_config_dword(pdev, 0x40, &val);
  9911. if ((val & 0x0000ff00) != 0)
  9912. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  9913. length = pci_resource_len(pdev, 0);
  9914. priv->hw_len = length;
  9915. base = pci_ioremap_bar(pdev, 0);
  9916. if (!base) {
  9917. err = -ENODEV;
  9918. goto out_pci_release_regions;
  9919. }
  9920. priv->hw_base = base;
  9921. IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
  9922. IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
  9923. err = ipw_setup_deferred_work(priv);
  9924. if (err) {
  9925. IPW_ERROR("Unable to setup deferred work\n");
  9926. goto out_iounmap;
  9927. }
  9928. ipw_sw_reset(priv, 1);
  9929. err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
  9930. if (err) {
  9931. IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
  9932. goto out_destroy_workqueue;
  9933. }
  9934. SET_NETDEV_DEV(net_dev, &pdev->dev);
  9935. mutex_lock(&priv->mutex);
  9936. priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
  9937. priv->ieee->set_security = shim__set_security;
  9938. priv->ieee->is_queue_full = ipw_net_is_queue_full;
  9939. #ifdef CONFIG_IPW2200_QOS
  9940. priv->ieee->is_qos_active = ipw_is_qos_active;
  9941. priv->ieee->handle_probe_response = ipw_handle_beacon;
  9942. priv->ieee->handle_beacon = ipw_handle_probe_response;
  9943. priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
  9944. #endif /* CONFIG_IPW2200_QOS */
  9945. priv->ieee->perfect_rssi = -20;
  9946. priv->ieee->worst_rssi = -85;
  9947. net_dev->netdev_ops = &ipw_netdev_ops;
  9948. priv->wireless_data.spy_data = &priv->ieee->spy_data;
  9949. net_dev->wireless_data = &priv->wireless_data;
  9950. net_dev->wireless_handlers = &ipw_wx_handler_def;
  9951. net_dev->ethtool_ops = &ipw_ethtool_ops;
  9952. net_dev->irq = pdev->irq;
  9953. net_dev->base_addr = (unsigned long)priv->hw_base;
  9954. net_dev->mem_start = pci_resource_start(pdev, 0);
  9955. net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
  9956. err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
  9957. if (err) {
  9958. IPW_ERROR("failed to create sysfs device attributes\n");
  9959. mutex_unlock(&priv->mutex);
  9960. goto out_release_irq;
  9961. }
  9962. mutex_unlock(&priv->mutex);
  9963. err = register_netdev(net_dev);
  9964. if (err) {
  9965. IPW_ERROR("failed to register network device\n");
  9966. goto out_remove_sysfs;
  9967. }
  9968. #ifdef CONFIG_IPW2200_PROMISCUOUS
  9969. if (rtap_iface) {
  9970. err = ipw_prom_alloc(priv);
  9971. if (err) {
  9972. IPW_ERROR("Failed to register promiscuous network "
  9973. "device (error %d).\n", err);
  9974. unregister_netdev(priv->net_dev);
  9975. goto out_remove_sysfs;
  9976. }
  9977. }
  9978. #endif
  9979. printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
  9980. "channels, %d 802.11a channels)\n",
  9981. priv->ieee->geo.name, priv->ieee->geo.bg_channels,
  9982. priv->ieee->geo.a_channels);
  9983. return 0;
  9984. out_remove_sysfs:
  9985. sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
  9986. out_release_irq:
  9987. free_irq(pdev->irq, priv);
  9988. out_destroy_workqueue:
  9989. destroy_workqueue(priv->workqueue);
  9990. priv->workqueue = NULL;
  9991. out_iounmap:
  9992. iounmap(priv->hw_base);
  9993. out_pci_release_regions:
  9994. pci_release_regions(pdev);
  9995. out_pci_disable_device:
  9996. pci_disable_device(pdev);
  9997. pci_set_drvdata(pdev, NULL);
  9998. out_free_ieee80211:
  9999. free_ieee80211(priv->net_dev);
  10000. out:
  10001. return err;
  10002. }
  10003. static void __devexit ipw_pci_remove(struct pci_dev *pdev)
  10004. {
  10005. struct ipw_priv *priv = pci_get_drvdata(pdev);
  10006. struct list_head *p, *q;
  10007. int i;
  10008. if (!priv)
  10009. return;
  10010. mutex_lock(&priv->mutex);
  10011. priv->status |= STATUS_EXIT_PENDING;
  10012. ipw_down(priv);
  10013. sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
  10014. mutex_unlock(&priv->mutex);
  10015. unregister_netdev(priv->net_dev);
  10016. if (priv->rxq) {
  10017. ipw_rx_queue_free(priv, priv->rxq);
  10018. priv->rxq = NULL;
  10019. }
  10020. ipw_tx_queue_free(priv);
  10021. if (priv->cmdlog) {
  10022. kfree(priv->cmdlog);
  10023. priv->cmdlog = NULL;
  10024. }
  10025. /* ipw_down will ensure that there is no more pending work
  10026. * in the workqueue's, so we can safely remove them now. */
  10027. cancel_delayed_work(&priv->adhoc_check);
  10028. cancel_delayed_work(&priv->gather_stats);
  10029. cancel_delayed_work(&priv->request_scan);
  10030. cancel_delayed_work(&priv->request_direct_scan);
  10031. cancel_delayed_work(&priv->request_passive_scan);
  10032. cancel_delayed_work(&priv->scan_event);
  10033. cancel_delayed_work(&priv->rf_kill);
  10034. cancel_delayed_work(&priv->scan_check);
  10035. destroy_workqueue(priv->workqueue);
  10036. priv->workqueue = NULL;
  10037. /* Free MAC hash list for ADHOC */
  10038. for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
  10039. list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
  10040. list_del(p);
  10041. kfree(list_entry(p, struct ipw_ibss_seq, list));
  10042. }
  10043. }
  10044. kfree(priv->error);
  10045. priv->error = NULL;
  10046. #ifdef CONFIG_IPW2200_PROMISCUOUS
  10047. ipw_prom_free(priv);
  10048. #endif
  10049. free_irq(pdev->irq, priv);
  10050. iounmap(priv->hw_base);
  10051. pci_release_regions(pdev);
  10052. pci_disable_device(pdev);
  10053. pci_set_drvdata(pdev, NULL);
  10054. free_ieee80211(priv->net_dev);
  10055. free_firmware();
  10056. }
  10057. #ifdef CONFIG_PM
  10058. static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  10059. {
  10060. struct ipw_priv *priv = pci_get_drvdata(pdev);
  10061. struct net_device *dev = priv->net_dev;
  10062. printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
  10063. /* Take down the device; powers it off, etc. */
  10064. ipw_down(priv);
  10065. /* Remove the PRESENT state of the device */
  10066. netif_device_detach(dev);
  10067. pci_save_state(pdev);
  10068. pci_disable_device(pdev);
  10069. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  10070. priv->suspend_at = get_seconds();
  10071. return 0;
  10072. }
  10073. static int ipw_pci_resume(struct pci_dev *pdev)
  10074. {
  10075. struct ipw_priv *priv = pci_get_drvdata(pdev);
  10076. struct net_device *dev = priv->net_dev;
  10077. int err;
  10078. u32 val;
  10079. printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
  10080. pci_set_power_state(pdev, PCI_D0);
  10081. err = pci_enable_device(pdev);
  10082. if (err) {
  10083. printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
  10084. dev->name);
  10085. return err;
  10086. }
  10087. pci_restore_state(pdev);
  10088. /*
  10089. * Suspend/Resume resets the PCI configuration space, so we have to
  10090. * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
  10091. * from interfering with C3 CPU state. pci_restore_state won't help
  10092. * here since it only restores the first 64 bytes pci config header.
  10093. */
  10094. pci_read_config_dword(pdev, 0x40, &val);
  10095. if ((val & 0x0000ff00) != 0)
  10096. pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
  10097. /* Set the device back into the PRESENT state; this will also wake
  10098. * the queue of needed */
  10099. netif_device_attach(dev);
  10100. priv->suspend_time = get_seconds() - priv->suspend_at;
  10101. /* Bring the device back up */
  10102. queue_work(priv->workqueue, &priv->up);
  10103. return 0;
  10104. }
  10105. #endif
  10106. static void ipw_pci_shutdown(struct pci_dev *pdev)
  10107. {
  10108. struct ipw_priv *priv = pci_get_drvdata(pdev);
  10109. /* Take down the device; powers it off, etc. */
  10110. ipw_down(priv);
  10111. pci_disable_device(pdev);
  10112. }
  10113. /* driver initialization stuff */
  10114. static struct pci_driver ipw_driver = {
  10115. .name = DRV_NAME,
  10116. .id_table = card_ids,
  10117. .probe = ipw_pci_probe,
  10118. .remove = __devexit_p(ipw_pci_remove),
  10119. #ifdef CONFIG_PM
  10120. .suspend = ipw_pci_suspend,
  10121. .resume = ipw_pci_resume,
  10122. #endif
  10123. .shutdown = ipw_pci_shutdown,
  10124. };
  10125. static int __init ipw_init(void)
  10126. {
  10127. int ret;
  10128. printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
  10129. printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
  10130. ret = pci_register_driver(&ipw_driver);
  10131. if (ret) {
  10132. IPW_ERROR("Unable to initialize PCI module\n");
  10133. return ret;
  10134. }
  10135. ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
  10136. if (ret) {
  10137. IPW_ERROR("Unable to create driver sysfs file\n");
  10138. pci_unregister_driver(&ipw_driver);
  10139. return ret;
  10140. }
  10141. return ret;
  10142. }
  10143. static void __exit ipw_exit(void)
  10144. {
  10145. driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
  10146. pci_unregister_driver(&ipw_driver);
  10147. }
  10148. module_param(disable, int, 0444);
  10149. MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
  10150. module_param(associate, int, 0444);
  10151. MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
  10152. module_param(auto_create, int, 0444);
  10153. MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
  10154. module_param(led, int, 0444);
  10155. MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
  10156. module_param(debug, int, 0444);
  10157. MODULE_PARM_DESC(debug, "debug output mask");
  10158. module_param(channel, int, 0444);
  10159. MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
  10160. #ifdef CONFIG_IPW2200_PROMISCUOUS
  10161. module_param(rtap_iface, int, 0444);
  10162. MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
  10163. #endif
  10164. #ifdef CONFIG_IPW2200_QOS
  10165. module_param(qos_enable, int, 0444);
  10166. MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
  10167. module_param(qos_burst_enable, int, 0444);
  10168. MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
  10169. module_param(qos_no_ack_mask, int, 0444);
  10170. MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
  10171. module_param(burst_duration_CCK, int, 0444);
  10172. MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
  10173. module_param(burst_duration_OFDM, int, 0444);
  10174. MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
  10175. #endif /* CONFIG_IPW2200_QOS */
  10176. #ifdef CONFIG_IPW2200_MONITOR
  10177. module_param(mode, int, 0444);
  10178. MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
  10179. #else
  10180. module_param(mode, int, 0444);
  10181. MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
  10182. #endif
  10183. module_param(bt_coexist, int, 0444);
  10184. MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
  10185. module_param(hwcrypto, int, 0444);
  10186. MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
  10187. module_param(cmdlog, int, 0444);
  10188. MODULE_PARM_DESC(cmdlog,
  10189. "allocate a ring buffer for logging firmware commands");
  10190. module_param(roaming, int, 0444);
  10191. MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
  10192. module_param(antenna, int, 0444);
  10193. MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
  10194. module_exit(ipw_exit);
  10195. module_init(ipw_init);