be_cmds.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279
  1. /*
  2. * Copyright (C) 2005 - 2011 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@emulex.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include <linux/module.h>
  18. #include "be.h"
  19. #include "be_cmds.h"
  20. static struct be_cmd_priv_map cmd_priv_map[] = {
  21. {
  22. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  23. CMD_SUBSYSTEM_ETH,
  24. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  25. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  26. },
  27. {
  28. OPCODE_COMMON_GET_FLOW_CONTROL,
  29. CMD_SUBSYSTEM_COMMON,
  30. BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
  31. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  32. },
  33. {
  34. OPCODE_COMMON_SET_FLOW_CONTROL,
  35. CMD_SUBSYSTEM_COMMON,
  36. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  37. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  38. },
  39. {
  40. OPCODE_ETH_GET_PPORT_STATS,
  41. CMD_SUBSYSTEM_ETH,
  42. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  43. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  44. },
  45. {
  46. OPCODE_COMMON_GET_PHY_DETAILS,
  47. CMD_SUBSYSTEM_COMMON,
  48. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  49. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  50. }
  51. };
  52. static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
  53. u8 subsystem)
  54. {
  55. int i;
  56. int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
  57. u32 cmd_privileges = adapter->cmd_privileges;
  58. for (i = 0; i < num_entries; i++)
  59. if (opcode == cmd_priv_map[i].opcode &&
  60. subsystem == cmd_priv_map[i].subsystem)
  61. if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
  62. return false;
  63. return true;
  64. }
  65. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  66. {
  67. return wrb->payload.embedded_payload;
  68. }
  69. static void be_mcc_notify(struct be_adapter *adapter)
  70. {
  71. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  72. u32 val = 0;
  73. if (be_error(adapter))
  74. return;
  75. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  76. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  77. wmb();
  78. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  79. }
  80. /* To check if valid bit is set, check the entire word as we don't know
  81. * the endianness of the data (old entry is host endian while a new entry is
  82. * little endian) */
  83. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  84. {
  85. u32 flags;
  86. if (compl->flags != 0) {
  87. flags = le32_to_cpu(compl->flags);
  88. if (flags & CQE_FLAGS_VALID_MASK) {
  89. compl->flags = flags;
  90. return true;
  91. }
  92. }
  93. return false;
  94. }
  95. /* Need to reset the entire word that houses the valid bit */
  96. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  97. {
  98. compl->flags = 0;
  99. }
  100. static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
  101. {
  102. unsigned long addr;
  103. addr = tag1;
  104. addr = ((addr << 16) << 16) | tag0;
  105. return (void *)addr;
  106. }
  107. static int be_mcc_compl_process(struct be_adapter *adapter,
  108. struct be_mcc_compl *compl)
  109. {
  110. u16 compl_status, extd_status;
  111. struct be_cmd_resp_hdr *resp_hdr;
  112. u8 opcode = 0, subsystem = 0;
  113. /* Just swap the status to host endian; mcc tag is opaquely copied
  114. * from mcc_wrb */
  115. be_dws_le_to_cpu(compl, 4);
  116. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  117. CQE_STATUS_COMPL_MASK;
  118. resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
  119. if (resp_hdr) {
  120. opcode = resp_hdr->opcode;
  121. subsystem = resp_hdr->subsystem;
  122. }
  123. if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
  124. (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
  125. (subsystem == CMD_SUBSYSTEM_COMMON)) {
  126. adapter->flash_status = compl_status;
  127. complete(&adapter->flash_compl);
  128. }
  129. if (compl_status == MCC_STATUS_SUCCESS) {
  130. if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
  131. (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
  132. (subsystem == CMD_SUBSYSTEM_ETH)) {
  133. be_parse_stats(adapter);
  134. adapter->stats_cmd_sent = false;
  135. }
  136. if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
  137. subsystem == CMD_SUBSYSTEM_COMMON) {
  138. struct be_cmd_resp_get_cntl_addnl_attribs *resp =
  139. (void *)resp_hdr;
  140. adapter->drv_stats.be_on_die_temperature =
  141. resp->on_die_temperature;
  142. }
  143. } else {
  144. if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
  145. adapter->be_get_temp_freq = 0;
  146. if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
  147. compl_status == MCC_STATUS_ILLEGAL_REQUEST)
  148. goto done;
  149. if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
  150. dev_warn(&adapter->pdev->dev,
  151. "VF is not privileged to issue opcode %d-%d\n",
  152. opcode, subsystem);
  153. } else {
  154. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  155. CQE_STATUS_EXTD_MASK;
  156. dev_err(&adapter->pdev->dev,
  157. "opcode %d-%d failed:status %d-%d\n",
  158. opcode, subsystem, compl_status, extd_status);
  159. }
  160. }
  161. done:
  162. return compl_status;
  163. }
  164. /* Link state evt is a string of bytes; no need for endian swapping */
  165. static void be_async_link_state_process(struct be_adapter *adapter,
  166. struct be_async_event_link_state *evt)
  167. {
  168. /* When link status changes, link speed must be re-queried from FW */
  169. adapter->phy.link_speed = -1;
  170. /* Ignore physical link event */
  171. if (lancer_chip(adapter) &&
  172. !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
  173. return;
  174. /* For the initial link status do not rely on the ASYNC event as
  175. * it may not be received in some cases.
  176. */
  177. if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
  178. be_link_status_update(adapter, evt->port_link_status);
  179. }
  180. /* Grp5 CoS Priority evt */
  181. static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
  182. struct be_async_event_grp5_cos_priority *evt)
  183. {
  184. if (evt->valid) {
  185. adapter->vlan_prio_bmap = evt->available_priority_bmap;
  186. adapter->recommended_prio &= ~VLAN_PRIO_MASK;
  187. adapter->recommended_prio =
  188. evt->reco_default_priority << VLAN_PRIO_SHIFT;
  189. }
  190. }
  191. /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
  192. static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
  193. struct be_async_event_grp5_qos_link_speed *evt)
  194. {
  195. if (adapter->phy.link_speed >= 0 &&
  196. evt->physical_port == adapter->port_num)
  197. adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
  198. }
  199. /*Grp5 PVID evt*/
  200. static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
  201. struct be_async_event_grp5_pvid_state *evt)
  202. {
  203. if (evt->enabled)
  204. adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
  205. else
  206. adapter->pvid = 0;
  207. }
  208. static void be_async_grp5_evt_process(struct be_adapter *adapter,
  209. u32 trailer, struct be_mcc_compl *evt)
  210. {
  211. u8 event_type = 0;
  212. event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
  213. ASYNC_TRAILER_EVENT_TYPE_MASK;
  214. switch (event_type) {
  215. case ASYNC_EVENT_COS_PRIORITY:
  216. be_async_grp5_cos_priority_process(adapter,
  217. (struct be_async_event_grp5_cos_priority *)evt);
  218. break;
  219. case ASYNC_EVENT_QOS_SPEED:
  220. be_async_grp5_qos_speed_process(adapter,
  221. (struct be_async_event_grp5_qos_link_speed *)evt);
  222. break;
  223. case ASYNC_EVENT_PVID_STATE:
  224. be_async_grp5_pvid_state_process(adapter,
  225. (struct be_async_event_grp5_pvid_state *)evt);
  226. break;
  227. default:
  228. dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
  229. break;
  230. }
  231. }
  232. static void be_async_dbg_evt_process(struct be_adapter *adapter,
  233. u32 trailer, struct be_mcc_compl *cmp)
  234. {
  235. u8 event_type = 0;
  236. struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
  237. event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
  238. ASYNC_TRAILER_EVENT_TYPE_MASK;
  239. switch (event_type) {
  240. case ASYNC_DEBUG_EVENT_TYPE_QNQ:
  241. if (evt->valid)
  242. adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
  243. adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
  244. break;
  245. default:
  246. dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
  247. break;
  248. }
  249. }
  250. static inline bool is_link_state_evt(u32 trailer)
  251. {
  252. return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  253. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  254. ASYNC_EVENT_CODE_LINK_STATE;
  255. }
  256. static inline bool is_grp5_evt(u32 trailer)
  257. {
  258. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  259. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  260. ASYNC_EVENT_CODE_GRP_5);
  261. }
  262. static inline bool is_dbg_evt(u32 trailer)
  263. {
  264. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  265. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  266. ASYNC_EVENT_CODE_QNQ);
  267. }
  268. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  269. {
  270. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  271. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  272. if (be_mcc_compl_is_new(compl)) {
  273. queue_tail_inc(mcc_cq);
  274. return compl;
  275. }
  276. return NULL;
  277. }
  278. void be_async_mcc_enable(struct be_adapter *adapter)
  279. {
  280. spin_lock_bh(&adapter->mcc_cq_lock);
  281. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  282. adapter->mcc_obj.rearm_cq = true;
  283. spin_unlock_bh(&adapter->mcc_cq_lock);
  284. }
  285. void be_async_mcc_disable(struct be_adapter *adapter)
  286. {
  287. spin_lock_bh(&adapter->mcc_cq_lock);
  288. adapter->mcc_obj.rearm_cq = false;
  289. be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
  290. spin_unlock_bh(&adapter->mcc_cq_lock);
  291. }
  292. int be_process_mcc(struct be_adapter *adapter)
  293. {
  294. struct be_mcc_compl *compl;
  295. int num = 0, status = 0;
  296. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  297. spin_lock(&adapter->mcc_cq_lock);
  298. while ((compl = be_mcc_compl_get(adapter))) {
  299. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  300. /* Interpret flags as an async trailer */
  301. if (is_link_state_evt(compl->flags))
  302. be_async_link_state_process(adapter,
  303. (struct be_async_event_link_state *) compl);
  304. else if (is_grp5_evt(compl->flags))
  305. be_async_grp5_evt_process(adapter,
  306. compl->flags, compl);
  307. else if (is_dbg_evt(compl->flags))
  308. be_async_dbg_evt_process(adapter,
  309. compl->flags, compl);
  310. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  311. status = be_mcc_compl_process(adapter, compl);
  312. atomic_dec(&mcc_obj->q.used);
  313. }
  314. be_mcc_compl_use(compl);
  315. num++;
  316. }
  317. if (num)
  318. be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
  319. spin_unlock(&adapter->mcc_cq_lock);
  320. return status;
  321. }
  322. /* Wait till no more pending mcc requests are present */
  323. static int be_mcc_wait_compl(struct be_adapter *adapter)
  324. {
  325. #define mcc_timeout 120000 /* 12s timeout */
  326. int i, status = 0;
  327. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  328. for (i = 0; i < mcc_timeout; i++) {
  329. if (be_error(adapter))
  330. return -EIO;
  331. local_bh_disable();
  332. status = be_process_mcc(adapter);
  333. local_bh_enable();
  334. if (atomic_read(&mcc_obj->q.used) == 0)
  335. break;
  336. udelay(100);
  337. }
  338. if (i == mcc_timeout) {
  339. dev_err(&adapter->pdev->dev, "FW not responding\n");
  340. adapter->fw_timeout = true;
  341. return -EIO;
  342. }
  343. return status;
  344. }
  345. /* Notify MCC requests and wait for completion */
  346. static int be_mcc_notify_wait(struct be_adapter *adapter)
  347. {
  348. int status;
  349. struct be_mcc_wrb *wrb;
  350. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  351. u16 index = mcc_obj->q.head;
  352. struct be_cmd_resp_hdr *resp;
  353. index_dec(&index, mcc_obj->q.len);
  354. wrb = queue_index_node(&mcc_obj->q, index);
  355. resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
  356. be_mcc_notify(adapter);
  357. status = be_mcc_wait_compl(adapter);
  358. if (status == -EIO)
  359. goto out;
  360. status = resp->status;
  361. out:
  362. return status;
  363. }
  364. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  365. {
  366. int msecs = 0;
  367. u32 ready;
  368. do {
  369. if (be_error(adapter))
  370. return -EIO;
  371. ready = ioread32(db);
  372. if (ready == 0xffffffff)
  373. return -1;
  374. ready &= MPU_MAILBOX_DB_RDY_MASK;
  375. if (ready)
  376. break;
  377. if (msecs > 4000) {
  378. dev_err(&adapter->pdev->dev, "FW not responding\n");
  379. adapter->fw_timeout = true;
  380. be_detect_error(adapter);
  381. return -1;
  382. }
  383. msleep(1);
  384. msecs++;
  385. } while (true);
  386. return 0;
  387. }
  388. /*
  389. * Insert the mailbox address into the doorbell in two steps
  390. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  391. */
  392. static int be_mbox_notify_wait(struct be_adapter *adapter)
  393. {
  394. int status;
  395. u32 val = 0;
  396. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  397. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  398. struct be_mcc_mailbox *mbox = mbox_mem->va;
  399. struct be_mcc_compl *compl = &mbox->compl;
  400. /* wait for ready to be set */
  401. status = be_mbox_db_ready_wait(adapter, db);
  402. if (status != 0)
  403. return status;
  404. val |= MPU_MAILBOX_DB_HI_MASK;
  405. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  406. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  407. iowrite32(val, db);
  408. /* wait for ready to be set */
  409. status = be_mbox_db_ready_wait(adapter, db);
  410. if (status != 0)
  411. return status;
  412. val = 0;
  413. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  414. val |= (u32)(mbox_mem->dma >> 4) << 2;
  415. iowrite32(val, db);
  416. status = be_mbox_db_ready_wait(adapter, db);
  417. if (status != 0)
  418. return status;
  419. /* A cq entry has been made now */
  420. if (be_mcc_compl_is_new(compl)) {
  421. status = be_mcc_compl_process(adapter, &mbox->compl);
  422. be_mcc_compl_use(compl);
  423. if (status)
  424. return status;
  425. } else {
  426. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  427. return -1;
  428. }
  429. return 0;
  430. }
  431. static u16 be_POST_stage_get(struct be_adapter *adapter)
  432. {
  433. u32 sem;
  434. if (BEx_chip(adapter))
  435. sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
  436. else
  437. pci_read_config_dword(adapter->pdev,
  438. SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
  439. return sem & POST_STAGE_MASK;
  440. }
  441. int lancer_wait_ready(struct be_adapter *adapter)
  442. {
  443. #define SLIPORT_READY_TIMEOUT 30
  444. u32 sliport_status;
  445. int status = 0, i;
  446. for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
  447. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  448. if (sliport_status & SLIPORT_STATUS_RDY_MASK)
  449. break;
  450. msleep(1000);
  451. }
  452. if (i == SLIPORT_READY_TIMEOUT)
  453. status = -1;
  454. return status;
  455. }
  456. static bool lancer_provisioning_error(struct be_adapter *adapter)
  457. {
  458. u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
  459. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  460. if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
  461. sliport_err1 = ioread32(adapter->db +
  462. SLIPORT_ERROR1_OFFSET);
  463. sliport_err2 = ioread32(adapter->db +
  464. SLIPORT_ERROR2_OFFSET);
  465. if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
  466. sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
  467. return true;
  468. }
  469. return false;
  470. }
  471. int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
  472. {
  473. int status;
  474. u32 sliport_status, err, reset_needed;
  475. bool resource_error;
  476. resource_error = lancer_provisioning_error(adapter);
  477. if (resource_error)
  478. return -1;
  479. status = lancer_wait_ready(adapter);
  480. if (!status) {
  481. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  482. err = sliport_status & SLIPORT_STATUS_ERR_MASK;
  483. reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
  484. if (err && reset_needed) {
  485. iowrite32(SLI_PORT_CONTROL_IP_MASK,
  486. adapter->db + SLIPORT_CONTROL_OFFSET);
  487. /* check adapter has corrected the error */
  488. status = lancer_wait_ready(adapter);
  489. sliport_status = ioread32(adapter->db +
  490. SLIPORT_STATUS_OFFSET);
  491. sliport_status &= (SLIPORT_STATUS_ERR_MASK |
  492. SLIPORT_STATUS_RN_MASK);
  493. if (status || sliport_status)
  494. status = -1;
  495. } else if (err || reset_needed) {
  496. status = -1;
  497. }
  498. }
  499. /* Stop error recovery if error is not recoverable.
  500. * No resource error is temporary errors and will go away
  501. * when PF provisions resources.
  502. */
  503. resource_error = lancer_provisioning_error(adapter);
  504. if (status == -1 && !resource_error)
  505. adapter->eeh_error = true;
  506. return status;
  507. }
  508. int be_fw_wait_ready(struct be_adapter *adapter)
  509. {
  510. u16 stage;
  511. int status, timeout = 0;
  512. struct device *dev = &adapter->pdev->dev;
  513. if (lancer_chip(adapter)) {
  514. status = lancer_wait_ready(adapter);
  515. return status;
  516. }
  517. do {
  518. stage = be_POST_stage_get(adapter);
  519. if (stage == POST_STAGE_ARMFW_RDY)
  520. return 0;
  521. dev_info(dev, "Waiting for POST, %ds elapsed\n",
  522. timeout);
  523. if (msleep_interruptible(2000)) {
  524. dev_err(dev, "Waiting for POST aborted\n");
  525. return -EINTR;
  526. }
  527. timeout += 2;
  528. } while (timeout < 60);
  529. dev_err(dev, "POST timeout; stage=0x%x\n", stage);
  530. return -1;
  531. }
  532. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  533. {
  534. return &wrb->payload.sgl[0];
  535. }
  536. /* Don't touch the hdr after it's prepared */
  537. /* mem will be NULL for embedded commands */
  538. static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  539. u8 subsystem, u8 opcode, int cmd_len,
  540. struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
  541. {
  542. struct be_sge *sge;
  543. unsigned long addr = (unsigned long)req_hdr;
  544. u64 req_addr = addr;
  545. req_hdr->opcode = opcode;
  546. req_hdr->subsystem = subsystem;
  547. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  548. req_hdr->version = 0;
  549. wrb->tag0 = req_addr & 0xFFFFFFFF;
  550. wrb->tag1 = upper_32_bits(req_addr);
  551. wrb->payload_length = cmd_len;
  552. if (mem) {
  553. wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
  554. MCC_WRB_SGE_CNT_SHIFT;
  555. sge = nonembedded_sgl(wrb);
  556. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  557. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  558. sge->len = cpu_to_le32(mem->size);
  559. } else
  560. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  561. be_dws_cpu_to_le(wrb, 8);
  562. }
  563. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  564. struct be_dma_mem *mem)
  565. {
  566. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  567. u64 dma = (u64)mem->dma;
  568. for (i = 0; i < buf_pages; i++) {
  569. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  570. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  571. dma += PAGE_SIZE_4K;
  572. }
  573. }
  574. /* Converts interrupt delay in microseconds to multiplier value */
  575. static u32 eq_delay_to_mult(u32 usec_delay)
  576. {
  577. #define MAX_INTR_RATE 651042
  578. const u32 round = 10;
  579. u32 multiplier;
  580. if (usec_delay == 0)
  581. multiplier = 0;
  582. else {
  583. u32 interrupt_rate = 1000000 / usec_delay;
  584. /* Max delay, corresponding to the lowest interrupt rate */
  585. if (interrupt_rate == 0)
  586. multiplier = 1023;
  587. else {
  588. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  589. multiplier /= interrupt_rate;
  590. /* Round the multiplier to the closest value.*/
  591. multiplier = (multiplier + round/2) / round;
  592. multiplier = min(multiplier, (u32)1023);
  593. }
  594. }
  595. return multiplier;
  596. }
  597. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  598. {
  599. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  600. struct be_mcc_wrb *wrb
  601. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  602. memset(wrb, 0, sizeof(*wrb));
  603. return wrb;
  604. }
  605. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  606. {
  607. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  608. struct be_mcc_wrb *wrb;
  609. if (!mccq->created)
  610. return NULL;
  611. if (atomic_read(&mccq->used) >= mccq->len) {
  612. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  613. return NULL;
  614. }
  615. wrb = queue_head_node(mccq);
  616. queue_head_inc(mccq);
  617. atomic_inc(&mccq->used);
  618. memset(wrb, 0, sizeof(*wrb));
  619. return wrb;
  620. }
  621. /* Tell fw we're about to start firing cmds by writing a
  622. * special pattern across the wrb hdr; uses mbox
  623. */
  624. int be_cmd_fw_init(struct be_adapter *adapter)
  625. {
  626. u8 *wrb;
  627. int status;
  628. if (lancer_chip(adapter))
  629. return 0;
  630. if (mutex_lock_interruptible(&adapter->mbox_lock))
  631. return -1;
  632. wrb = (u8 *)wrb_from_mbox(adapter);
  633. *wrb++ = 0xFF;
  634. *wrb++ = 0x12;
  635. *wrb++ = 0x34;
  636. *wrb++ = 0xFF;
  637. *wrb++ = 0xFF;
  638. *wrb++ = 0x56;
  639. *wrb++ = 0x78;
  640. *wrb = 0xFF;
  641. status = be_mbox_notify_wait(adapter);
  642. mutex_unlock(&adapter->mbox_lock);
  643. return status;
  644. }
  645. /* Tell fw we're done with firing cmds by writing a
  646. * special pattern across the wrb hdr; uses mbox
  647. */
  648. int be_cmd_fw_clean(struct be_adapter *adapter)
  649. {
  650. u8 *wrb;
  651. int status;
  652. if (lancer_chip(adapter))
  653. return 0;
  654. if (mutex_lock_interruptible(&adapter->mbox_lock))
  655. return -1;
  656. wrb = (u8 *)wrb_from_mbox(adapter);
  657. *wrb++ = 0xFF;
  658. *wrb++ = 0xAA;
  659. *wrb++ = 0xBB;
  660. *wrb++ = 0xFF;
  661. *wrb++ = 0xFF;
  662. *wrb++ = 0xCC;
  663. *wrb++ = 0xDD;
  664. *wrb = 0xFF;
  665. status = be_mbox_notify_wait(adapter);
  666. mutex_unlock(&adapter->mbox_lock);
  667. return status;
  668. }
  669. int be_cmd_eq_create(struct be_adapter *adapter,
  670. struct be_queue_info *eq, int eq_delay)
  671. {
  672. struct be_mcc_wrb *wrb;
  673. struct be_cmd_req_eq_create *req;
  674. struct be_dma_mem *q_mem = &eq->dma_mem;
  675. int status;
  676. if (mutex_lock_interruptible(&adapter->mbox_lock))
  677. return -1;
  678. wrb = wrb_from_mbox(adapter);
  679. req = embedded_payload(wrb);
  680. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  681. OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
  682. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  683. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  684. /* 4byte eqe*/
  685. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  686. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  687. __ilog2_u32(eq->len/256));
  688. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  689. eq_delay_to_mult(eq_delay));
  690. be_dws_cpu_to_le(req->context, sizeof(req->context));
  691. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  692. status = be_mbox_notify_wait(adapter);
  693. if (!status) {
  694. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  695. eq->id = le16_to_cpu(resp->eq_id);
  696. eq->created = true;
  697. }
  698. mutex_unlock(&adapter->mbox_lock);
  699. return status;
  700. }
  701. /* Use MCC */
  702. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  703. bool permanent, u32 if_handle, u32 pmac_id)
  704. {
  705. struct be_mcc_wrb *wrb;
  706. struct be_cmd_req_mac_query *req;
  707. int status;
  708. spin_lock_bh(&adapter->mcc_lock);
  709. wrb = wrb_from_mccq(adapter);
  710. if (!wrb) {
  711. status = -EBUSY;
  712. goto err;
  713. }
  714. req = embedded_payload(wrb);
  715. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  716. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
  717. req->type = MAC_ADDRESS_TYPE_NETWORK;
  718. if (permanent) {
  719. req->permanent = 1;
  720. } else {
  721. req->if_id = cpu_to_le16((u16) if_handle);
  722. req->pmac_id = cpu_to_le32(pmac_id);
  723. req->permanent = 0;
  724. }
  725. status = be_mcc_notify_wait(adapter);
  726. if (!status) {
  727. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  728. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  729. }
  730. err:
  731. spin_unlock_bh(&adapter->mcc_lock);
  732. return status;
  733. }
  734. /* Uses synchronous MCCQ */
  735. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  736. u32 if_id, u32 *pmac_id, u32 domain)
  737. {
  738. struct be_mcc_wrb *wrb;
  739. struct be_cmd_req_pmac_add *req;
  740. int status;
  741. spin_lock_bh(&adapter->mcc_lock);
  742. wrb = wrb_from_mccq(adapter);
  743. if (!wrb) {
  744. status = -EBUSY;
  745. goto err;
  746. }
  747. req = embedded_payload(wrb);
  748. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  749. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
  750. req->hdr.domain = domain;
  751. req->if_id = cpu_to_le32(if_id);
  752. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  753. status = be_mcc_notify_wait(adapter);
  754. if (!status) {
  755. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  756. *pmac_id = le32_to_cpu(resp->pmac_id);
  757. }
  758. err:
  759. spin_unlock_bh(&adapter->mcc_lock);
  760. if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
  761. status = -EPERM;
  762. return status;
  763. }
  764. /* Uses synchronous MCCQ */
  765. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
  766. {
  767. struct be_mcc_wrb *wrb;
  768. struct be_cmd_req_pmac_del *req;
  769. int status;
  770. if (pmac_id == -1)
  771. return 0;
  772. spin_lock_bh(&adapter->mcc_lock);
  773. wrb = wrb_from_mccq(adapter);
  774. if (!wrb) {
  775. status = -EBUSY;
  776. goto err;
  777. }
  778. req = embedded_payload(wrb);
  779. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  780. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
  781. req->hdr.domain = dom;
  782. req->if_id = cpu_to_le32(if_id);
  783. req->pmac_id = cpu_to_le32(pmac_id);
  784. status = be_mcc_notify_wait(adapter);
  785. err:
  786. spin_unlock_bh(&adapter->mcc_lock);
  787. return status;
  788. }
  789. /* Uses Mbox */
  790. int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
  791. struct be_queue_info *eq, bool no_delay, int coalesce_wm)
  792. {
  793. struct be_mcc_wrb *wrb;
  794. struct be_cmd_req_cq_create *req;
  795. struct be_dma_mem *q_mem = &cq->dma_mem;
  796. void *ctxt;
  797. int status;
  798. if (mutex_lock_interruptible(&adapter->mbox_lock))
  799. return -1;
  800. wrb = wrb_from_mbox(adapter);
  801. req = embedded_payload(wrb);
  802. ctxt = &req->context;
  803. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  804. OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
  805. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  806. if (lancer_chip(adapter)) {
  807. req->hdr.version = 2;
  808. req->page_size = 1; /* 1 for 4K */
  809. AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
  810. no_delay);
  811. AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
  812. __ilog2_u32(cq->len/256));
  813. AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
  814. AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
  815. ctxt, 1);
  816. AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
  817. ctxt, eq->id);
  818. } else {
  819. AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
  820. coalesce_wm);
  821. AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
  822. ctxt, no_delay);
  823. AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
  824. __ilog2_u32(cq->len/256));
  825. AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
  826. AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
  827. AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
  828. }
  829. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  830. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  831. status = be_mbox_notify_wait(adapter);
  832. if (!status) {
  833. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  834. cq->id = le16_to_cpu(resp->cq_id);
  835. cq->created = true;
  836. }
  837. mutex_unlock(&adapter->mbox_lock);
  838. return status;
  839. }
  840. static u32 be_encoded_q_len(int q_len)
  841. {
  842. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  843. if (len_encoded == 16)
  844. len_encoded = 0;
  845. return len_encoded;
  846. }
  847. int be_cmd_mccq_ext_create(struct be_adapter *adapter,
  848. struct be_queue_info *mccq,
  849. struct be_queue_info *cq)
  850. {
  851. struct be_mcc_wrb *wrb;
  852. struct be_cmd_req_mcc_ext_create *req;
  853. struct be_dma_mem *q_mem = &mccq->dma_mem;
  854. void *ctxt;
  855. int status;
  856. if (mutex_lock_interruptible(&adapter->mbox_lock))
  857. return -1;
  858. wrb = wrb_from_mbox(adapter);
  859. req = embedded_payload(wrb);
  860. ctxt = &req->context;
  861. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  862. OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
  863. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  864. if (lancer_chip(adapter)) {
  865. req->hdr.version = 1;
  866. req->cq_id = cpu_to_le16(cq->id);
  867. AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
  868. be_encoded_q_len(mccq->len));
  869. AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
  870. AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
  871. ctxt, cq->id);
  872. AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
  873. ctxt, 1);
  874. } else {
  875. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  876. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  877. be_encoded_q_len(mccq->len));
  878. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  879. }
  880. /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
  881. req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
  882. req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
  883. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  884. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  885. status = be_mbox_notify_wait(adapter);
  886. if (!status) {
  887. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  888. mccq->id = le16_to_cpu(resp->id);
  889. mccq->created = true;
  890. }
  891. mutex_unlock(&adapter->mbox_lock);
  892. return status;
  893. }
  894. int be_cmd_mccq_org_create(struct be_adapter *adapter,
  895. struct be_queue_info *mccq,
  896. struct be_queue_info *cq)
  897. {
  898. struct be_mcc_wrb *wrb;
  899. struct be_cmd_req_mcc_create *req;
  900. struct be_dma_mem *q_mem = &mccq->dma_mem;
  901. void *ctxt;
  902. int status;
  903. if (mutex_lock_interruptible(&adapter->mbox_lock))
  904. return -1;
  905. wrb = wrb_from_mbox(adapter);
  906. req = embedded_payload(wrb);
  907. ctxt = &req->context;
  908. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  909. OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
  910. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  911. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  912. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  913. be_encoded_q_len(mccq->len));
  914. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  915. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  916. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  917. status = be_mbox_notify_wait(adapter);
  918. if (!status) {
  919. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  920. mccq->id = le16_to_cpu(resp->id);
  921. mccq->created = true;
  922. }
  923. mutex_unlock(&adapter->mbox_lock);
  924. return status;
  925. }
  926. int be_cmd_mccq_create(struct be_adapter *adapter,
  927. struct be_queue_info *mccq,
  928. struct be_queue_info *cq)
  929. {
  930. int status;
  931. status = be_cmd_mccq_ext_create(adapter, mccq, cq);
  932. if (status && !lancer_chip(adapter)) {
  933. dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
  934. "or newer to avoid conflicting priorities between NIC "
  935. "and FCoE traffic");
  936. status = be_cmd_mccq_org_create(adapter, mccq, cq);
  937. }
  938. return status;
  939. }
  940. int be_cmd_txq_create(struct be_adapter *adapter,
  941. struct be_queue_info *txq,
  942. struct be_queue_info *cq)
  943. {
  944. struct be_mcc_wrb *wrb;
  945. struct be_cmd_req_eth_tx_create *req;
  946. struct be_dma_mem *q_mem = &txq->dma_mem;
  947. void *ctxt;
  948. int status;
  949. spin_lock_bh(&adapter->mcc_lock);
  950. wrb = wrb_from_mccq(adapter);
  951. if (!wrb) {
  952. status = -EBUSY;
  953. goto err;
  954. }
  955. req = embedded_payload(wrb);
  956. ctxt = &req->context;
  957. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  958. OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
  959. if (lancer_chip(adapter)) {
  960. req->hdr.version = 1;
  961. AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
  962. adapter->if_handle);
  963. }
  964. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  965. req->ulp_num = BE_ULP1_NUM;
  966. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  967. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  968. be_encoded_q_len(txq->len));
  969. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  970. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  971. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  972. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  973. status = be_mcc_notify_wait(adapter);
  974. if (!status) {
  975. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  976. txq->id = le16_to_cpu(resp->cid);
  977. txq->created = true;
  978. }
  979. err:
  980. spin_unlock_bh(&adapter->mcc_lock);
  981. return status;
  982. }
  983. /* Uses MCC */
  984. int be_cmd_rxq_create(struct be_adapter *adapter,
  985. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  986. u32 if_id, u32 rss, u8 *rss_id)
  987. {
  988. struct be_mcc_wrb *wrb;
  989. struct be_cmd_req_eth_rx_create *req;
  990. struct be_dma_mem *q_mem = &rxq->dma_mem;
  991. int status;
  992. spin_lock_bh(&adapter->mcc_lock);
  993. wrb = wrb_from_mccq(adapter);
  994. if (!wrb) {
  995. status = -EBUSY;
  996. goto err;
  997. }
  998. req = embedded_payload(wrb);
  999. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1000. OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
  1001. req->cq_id = cpu_to_le16(cq_id);
  1002. req->frag_size = fls(frag_size) - 1;
  1003. req->num_pages = 2;
  1004. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1005. req->interface_id = cpu_to_le32(if_id);
  1006. req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
  1007. req->rss_queue = cpu_to_le32(rss);
  1008. status = be_mcc_notify_wait(adapter);
  1009. if (!status) {
  1010. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  1011. rxq->id = le16_to_cpu(resp->id);
  1012. rxq->created = true;
  1013. *rss_id = resp->rss_id;
  1014. }
  1015. err:
  1016. spin_unlock_bh(&adapter->mcc_lock);
  1017. return status;
  1018. }
  1019. /* Generic destroyer function for all types of queues
  1020. * Uses Mbox
  1021. */
  1022. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  1023. int queue_type)
  1024. {
  1025. struct be_mcc_wrb *wrb;
  1026. struct be_cmd_req_q_destroy *req;
  1027. u8 subsys = 0, opcode = 0;
  1028. int status;
  1029. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1030. return -1;
  1031. wrb = wrb_from_mbox(adapter);
  1032. req = embedded_payload(wrb);
  1033. switch (queue_type) {
  1034. case QTYPE_EQ:
  1035. subsys = CMD_SUBSYSTEM_COMMON;
  1036. opcode = OPCODE_COMMON_EQ_DESTROY;
  1037. break;
  1038. case QTYPE_CQ:
  1039. subsys = CMD_SUBSYSTEM_COMMON;
  1040. opcode = OPCODE_COMMON_CQ_DESTROY;
  1041. break;
  1042. case QTYPE_TXQ:
  1043. subsys = CMD_SUBSYSTEM_ETH;
  1044. opcode = OPCODE_ETH_TX_DESTROY;
  1045. break;
  1046. case QTYPE_RXQ:
  1047. subsys = CMD_SUBSYSTEM_ETH;
  1048. opcode = OPCODE_ETH_RX_DESTROY;
  1049. break;
  1050. case QTYPE_MCCQ:
  1051. subsys = CMD_SUBSYSTEM_COMMON;
  1052. opcode = OPCODE_COMMON_MCC_DESTROY;
  1053. break;
  1054. default:
  1055. BUG();
  1056. }
  1057. be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
  1058. NULL);
  1059. req->id = cpu_to_le16(q->id);
  1060. status = be_mbox_notify_wait(adapter);
  1061. q->created = false;
  1062. mutex_unlock(&adapter->mbox_lock);
  1063. return status;
  1064. }
  1065. /* Uses MCC */
  1066. int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
  1067. {
  1068. struct be_mcc_wrb *wrb;
  1069. struct be_cmd_req_q_destroy *req;
  1070. int status;
  1071. spin_lock_bh(&adapter->mcc_lock);
  1072. wrb = wrb_from_mccq(adapter);
  1073. if (!wrb) {
  1074. status = -EBUSY;
  1075. goto err;
  1076. }
  1077. req = embedded_payload(wrb);
  1078. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1079. OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
  1080. req->id = cpu_to_le16(q->id);
  1081. status = be_mcc_notify_wait(adapter);
  1082. q->created = false;
  1083. err:
  1084. spin_unlock_bh(&adapter->mcc_lock);
  1085. return status;
  1086. }
  1087. /* Create an rx filtering policy configuration on an i/f
  1088. * Uses MCCQ
  1089. */
  1090. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  1091. u32 *if_handle, u32 domain)
  1092. {
  1093. struct be_mcc_wrb *wrb;
  1094. struct be_cmd_req_if_create *req;
  1095. int status;
  1096. spin_lock_bh(&adapter->mcc_lock);
  1097. wrb = wrb_from_mccq(adapter);
  1098. if (!wrb) {
  1099. status = -EBUSY;
  1100. goto err;
  1101. }
  1102. req = embedded_payload(wrb);
  1103. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1104. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
  1105. req->hdr.domain = domain;
  1106. req->capability_flags = cpu_to_le32(cap_flags);
  1107. req->enable_flags = cpu_to_le32(en_flags);
  1108. req->pmac_invalid = true;
  1109. status = be_mcc_notify_wait(adapter);
  1110. if (!status) {
  1111. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  1112. *if_handle = le32_to_cpu(resp->interface_id);
  1113. }
  1114. err:
  1115. spin_unlock_bh(&adapter->mcc_lock);
  1116. return status;
  1117. }
  1118. /* Uses MCCQ */
  1119. int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
  1120. {
  1121. struct be_mcc_wrb *wrb;
  1122. struct be_cmd_req_if_destroy *req;
  1123. int status;
  1124. if (interface_id == -1)
  1125. return 0;
  1126. spin_lock_bh(&adapter->mcc_lock);
  1127. wrb = wrb_from_mccq(adapter);
  1128. if (!wrb) {
  1129. status = -EBUSY;
  1130. goto err;
  1131. }
  1132. req = embedded_payload(wrb);
  1133. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1134. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
  1135. req->hdr.domain = domain;
  1136. req->interface_id = cpu_to_le32(interface_id);
  1137. status = be_mcc_notify_wait(adapter);
  1138. err:
  1139. spin_unlock_bh(&adapter->mcc_lock);
  1140. return status;
  1141. }
  1142. /* Get stats is a non embedded command: the request is not embedded inside
  1143. * WRB but is a separate dma memory block
  1144. * Uses asynchronous MCC
  1145. */
  1146. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  1147. {
  1148. struct be_mcc_wrb *wrb;
  1149. struct be_cmd_req_hdr *hdr;
  1150. int status = 0;
  1151. spin_lock_bh(&adapter->mcc_lock);
  1152. wrb = wrb_from_mccq(adapter);
  1153. if (!wrb) {
  1154. status = -EBUSY;
  1155. goto err;
  1156. }
  1157. hdr = nonemb_cmd->va;
  1158. be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
  1159. OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
  1160. /* version 1 of the cmd is not supported only by BE2 */
  1161. if (!BE2_chip(adapter))
  1162. hdr->version = 1;
  1163. be_mcc_notify(adapter);
  1164. adapter->stats_cmd_sent = true;
  1165. err:
  1166. spin_unlock_bh(&adapter->mcc_lock);
  1167. return status;
  1168. }
  1169. /* Lancer Stats */
  1170. int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
  1171. struct be_dma_mem *nonemb_cmd)
  1172. {
  1173. struct be_mcc_wrb *wrb;
  1174. struct lancer_cmd_req_pport_stats *req;
  1175. int status = 0;
  1176. if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
  1177. CMD_SUBSYSTEM_ETH))
  1178. return -EPERM;
  1179. spin_lock_bh(&adapter->mcc_lock);
  1180. wrb = wrb_from_mccq(adapter);
  1181. if (!wrb) {
  1182. status = -EBUSY;
  1183. goto err;
  1184. }
  1185. req = nonemb_cmd->va;
  1186. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1187. OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
  1188. nonemb_cmd);
  1189. req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
  1190. req->cmd_params.params.reset_stats = 0;
  1191. be_mcc_notify(adapter);
  1192. adapter->stats_cmd_sent = true;
  1193. err:
  1194. spin_unlock_bh(&adapter->mcc_lock);
  1195. return status;
  1196. }
  1197. static int be_mac_to_link_speed(int mac_speed)
  1198. {
  1199. switch (mac_speed) {
  1200. case PHY_LINK_SPEED_ZERO:
  1201. return 0;
  1202. case PHY_LINK_SPEED_10MBPS:
  1203. return 10;
  1204. case PHY_LINK_SPEED_100MBPS:
  1205. return 100;
  1206. case PHY_LINK_SPEED_1GBPS:
  1207. return 1000;
  1208. case PHY_LINK_SPEED_10GBPS:
  1209. return 10000;
  1210. }
  1211. return 0;
  1212. }
  1213. /* Uses synchronous mcc
  1214. * Returns link_speed in Mbps
  1215. */
  1216. int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
  1217. u8 *link_status, u32 dom)
  1218. {
  1219. struct be_mcc_wrb *wrb;
  1220. struct be_cmd_req_link_status *req;
  1221. int status;
  1222. spin_lock_bh(&adapter->mcc_lock);
  1223. if (link_status)
  1224. *link_status = LINK_DOWN;
  1225. wrb = wrb_from_mccq(adapter);
  1226. if (!wrb) {
  1227. status = -EBUSY;
  1228. goto err;
  1229. }
  1230. req = embedded_payload(wrb);
  1231. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1232. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
  1233. /* version 1 of the cmd is not supported only by BE2 */
  1234. if (!BE2_chip(adapter))
  1235. req->hdr.version = 1;
  1236. req->hdr.domain = dom;
  1237. status = be_mcc_notify_wait(adapter);
  1238. if (!status) {
  1239. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  1240. if (link_speed) {
  1241. *link_speed = resp->link_speed ?
  1242. le16_to_cpu(resp->link_speed) * 10 :
  1243. be_mac_to_link_speed(resp->mac_speed);
  1244. if (!resp->logical_link_status)
  1245. *link_speed = 0;
  1246. }
  1247. if (link_status)
  1248. *link_status = resp->logical_link_status;
  1249. }
  1250. err:
  1251. spin_unlock_bh(&adapter->mcc_lock);
  1252. return status;
  1253. }
  1254. /* Uses synchronous mcc */
  1255. int be_cmd_get_die_temperature(struct be_adapter *adapter)
  1256. {
  1257. struct be_mcc_wrb *wrb;
  1258. struct be_cmd_req_get_cntl_addnl_attribs *req;
  1259. int status;
  1260. spin_lock_bh(&adapter->mcc_lock);
  1261. wrb = wrb_from_mccq(adapter);
  1262. if (!wrb) {
  1263. status = -EBUSY;
  1264. goto err;
  1265. }
  1266. req = embedded_payload(wrb);
  1267. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1268. OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
  1269. wrb, NULL);
  1270. be_mcc_notify(adapter);
  1271. err:
  1272. spin_unlock_bh(&adapter->mcc_lock);
  1273. return status;
  1274. }
  1275. /* Uses synchronous mcc */
  1276. int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
  1277. {
  1278. struct be_mcc_wrb *wrb;
  1279. struct be_cmd_req_get_fat *req;
  1280. int status;
  1281. spin_lock_bh(&adapter->mcc_lock);
  1282. wrb = wrb_from_mccq(adapter);
  1283. if (!wrb) {
  1284. status = -EBUSY;
  1285. goto err;
  1286. }
  1287. req = embedded_payload(wrb);
  1288. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1289. OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
  1290. req->fat_operation = cpu_to_le32(QUERY_FAT);
  1291. status = be_mcc_notify_wait(adapter);
  1292. if (!status) {
  1293. struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
  1294. if (log_size && resp->log_size)
  1295. *log_size = le32_to_cpu(resp->log_size) -
  1296. sizeof(u32);
  1297. }
  1298. err:
  1299. spin_unlock_bh(&adapter->mcc_lock);
  1300. return status;
  1301. }
  1302. void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
  1303. {
  1304. struct be_dma_mem get_fat_cmd;
  1305. struct be_mcc_wrb *wrb;
  1306. struct be_cmd_req_get_fat *req;
  1307. u32 offset = 0, total_size, buf_size,
  1308. log_offset = sizeof(u32), payload_len;
  1309. int status;
  1310. if (buf_len == 0)
  1311. return;
  1312. total_size = buf_len;
  1313. get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
  1314. get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
  1315. get_fat_cmd.size,
  1316. &get_fat_cmd.dma);
  1317. if (!get_fat_cmd.va) {
  1318. status = -ENOMEM;
  1319. dev_err(&adapter->pdev->dev,
  1320. "Memory allocation failure while retrieving FAT data\n");
  1321. return;
  1322. }
  1323. spin_lock_bh(&adapter->mcc_lock);
  1324. while (total_size) {
  1325. buf_size = min(total_size, (u32)60*1024);
  1326. total_size -= buf_size;
  1327. wrb = wrb_from_mccq(adapter);
  1328. if (!wrb) {
  1329. status = -EBUSY;
  1330. goto err;
  1331. }
  1332. req = get_fat_cmd.va;
  1333. payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
  1334. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1335. OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
  1336. &get_fat_cmd);
  1337. req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
  1338. req->read_log_offset = cpu_to_le32(log_offset);
  1339. req->read_log_length = cpu_to_le32(buf_size);
  1340. req->data_buffer_size = cpu_to_le32(buf_size);
  1341. status = be_mcc_notify_wait(adapter);
  1342. if (!status) {
  1343. struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
  1344. memcpy(buf + offset,
  1345. resp->data_buffer,
  1346. le32_to_cpu(resp->read_log_length));
  1347. } else {
  1348. dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
  1349. goto err;
  1350. }
  1351. offset += buf_size;
  1352. log_offset += buf_size;
  1353. }
  1354. err:
  1355. pci_free_consistent(adapter->pdev, get_fat_cmd.size,
  1356. get_fat_cmd.va,
  1357. get_fat_cmd.dma);
  1358. spin_unlock_bh(&adapter->mcc_lock);
  1359. }
  1360. /* Uses synchronous mcc */
  1361. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
  1362. char *fw_on_flash)
  1363. {
  1364. struct be_mcc_wrb *wrb;
  1365. struct be_cmd_req_get_fw_version *req;
  1366. int status;
  1367. spin_lock_bh(&adapter->mcc_lock);
  1368. wrb = wrb_from_mccq(adapter);
  1369. if (!wrb) {
  1370. status = -EBUSY;
  1371. goto err;
  1372. }
  1373. req = embedded_payload(wrb);
  1374. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1375. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
  1376. status = be_mcc_notify_wait(adapter);
  1377. if (!status) {
  1378. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  1379. strcpy(fw_ver, resp->firmware_version_string);
  1380. if (fw_on_flash)
  1381. strcpy(fw_on_flash, resp->fw_on_flash_version_string);
  1382. }
  1383. err:
  1384. spin_unlock_bh(&adapter->mcc_lock);
  1385. return status;
  1386. }
  1387. /* set the EQ delay interval of an EQ to specified value
  1388. * Uses async mcc
  1389. */
  1390. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  1391. {
  1392. struct be_mcc_wrb *wrb;
  1393. struct be_cmd_req_modify_eq_delay *req;
  1394. int status = 0;
  1395. spin_lock_bh(&adapter->mcc_lock);
  1396. wrb = wrb_from_mccq(adapter);
  1397. if (!wrb) {
  1398. status = -EBUSY;
  1399. goto err;
  1400. }
  1401. req = embedded_payload(wrb);
  1402. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1403. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
  1404. req->num_eq = cpu_to_le32(1);
  1405. req->delay[0].eq_id = cpu_to_le32(eq_id);
  1406. req->delay[0].phase = 0;
  1407. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  1408. be_mcc_notify(adapter);
  1409. err:
  1410. spin_unlock_bh(&adapter->mcc_lock);
  1411. return status;
  1412. }
  1413. /* Uses sycnhronous mcc */
  1414. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  1415. u32 num, bool untagged, bool promiscuous)
  1416. {
  1417. struct be_mcc_wrb *wrb;
  1418. struct be_cmd_req_vlan_config *req;
  1419. int status;
  1420. spin_lock_bh(&adapter->mcc_lock);
  1421. wrb = wrb_from_mccq(adapter);
  1422. if (!wrb) {
  1423. status = -EBUSY;
  1424. goto err;
  1425. }
  1426. req = embedded_payload(wrb);
  1427. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1428. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
  1429. req->interface_id = if_id;
  1430. req->promiscuous = promiscuous;
  1431. req->untagged = untagged;
  1432. req->num_vlan = num;
  1433. if (!promiscuous) {
  1434. memcpy(req->normal_vlan, vtag_array,
  1435. req->num_vlan * sizeof(vtag_array[0]));
  1436. }
  1437. status = be_mcc_notify_wait(adapter);
  1438. err:
  1439. spin_unlock_bh(&adapter->mcc_lock);
  1440. return status;
  1441. }
  1442. int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
  1443. {
  1444. struct be_mcc_wrb *wrb;
  1445. struct be_dma_mem *mem = &adapter->rx_filter;
  1446. struct be_cmd_req_rx_filter *req = mem->va;
  1447. int status;
  1448. spin_lock_bh(&adapter->mcc_lock);
  1449. wrb = wrb_from_mccq(adapter);
  1450. if (!wrb) {
  1451. status = -EBUSY;
  1452. goto err;
  1453. }
  1454. memset(req, 0, sizeof(*req));
  1455. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1456. OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
  1457. wrb, mem);
  1458. req->if_id = cpu_to_le32(adapter->if_handle);
  1459. if (flags & IFF_PROMISC) {
  1460. req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
  1461. BE_IF_FLAGS_VLAN_PROMISCUOUS);
  1462. if (value == ON)
  1463. req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
  1464. BE_IF_FLAGS_VLAN_PROMISCUOUS);
  1465. } else if (flags & IFF_ALLMULTI) {
  1466. req->if_flags_mask = req->if_flags =
  1467. cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
  1468. } else {
  1469. struct netdev_hw_addr *ha;
  1470. int i = 0;
  1471. req->if_flags_mask = req->if_flags =
  1472. cpu_to_le32(BE_IF_FLAGS_MULTICAST);
  1473. /* Reset mcast promisc mode if already set by setting mask
  1474. * and not setting flags field
  1475. */
  1476. req->if_flags_mask |=
  1477. cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
  1478. adapter->if_cap_flags);
  1479. req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
  1480. netdev_for_each_mc_addr(ha, adapter->netdev)
  1481. memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
  1482. }
  1483. status = be_mcc_notify_wait(adapter);
  1484. err:
  1485. spin_unlock_bh(&adapter->mcc_lock);
  1486. return status;
  1487. }
  1488. /* Uses synchrounous mcc */
  1489. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  1490. {
  1491. struct be_mcc_wrb *wrb;
  1492. struct be_cmd_req_set_flow_control *req;
  1493. int status;
  1494. if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
  1495. CMD_SUBSYSTEM_COMMON))
  1496. return -EPERM;
  1497. spin_lock_bh(&adapter->mcc_lock);
  1498. wrb = wrb_from_mccq(adapter);
  1499. if (!wrb) {
  1500. status = -EBUSY;
  1501. goto err;
  1502. }
  1503. req = embedded_payload(wrb);
  1504. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1505. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
  1506. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  1507. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  1508. status = be_mcc_notify_wait(adapter);
  1509. err:
  1510. spin_unlock_bh(&adapter->mcc_lock);
  1511. return status;
  1512. }
  1513. /* Uses sycn mcc */
  1514. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  1515. {
  1516. struct be_mcc_wrb *wrb;
  1517. struct be_cmd_req_get_flow_control *req;
  1518. int status;
  1519. if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
  1520. CMD_SUBSYSTEM_COMMON))
  1521. return -EPERM;
  1522. spin_lock_bh(&adapter->mcc_lock);
  1523. wrb = wrb_from_mccq(adapter);
  1524. if (!wrb) {
  1525. status = -EBUSY;
  1526. goto err;
  1527. }
  1528. req = embedded_payload(wrb);
  1529. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1530. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
  1531. status = be_mcc_notify_wait(adapter);
  1532. if (!status) {
  1533. struct be_cmd_resp_get_flow_control *resp =
  1534. embedded_payload(wrb);
  1535. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1536. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1537. }
  1538. err:
  1539. spin_unlock_bh(&adapter->mcc_lock);
  1540. return status;
  1541. }
  1542. /* Uses mbox */
  1543. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
  1544. u32 *mode, u32 *caps)
  1545. {
  1546. struct be_mcc_wrb *wrb;
  1547. struct be_cmd_req_query_fw_cfg *req;
  1548. int status;
  1549. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1550. return -1;
  1551. wrb = wrb_from_mbox(adapter);
  1552. req = embedded_payload(wrb);
  1553. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1554. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
  1555. status = be_mbox_notify_wait(adapter);
  1556. if (!status) {
  1557. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1558. *port_num = le32_to_cpu(resp->phys_port);
  1559. *mode = le32_to_cpu(resp->function_mode);
  1560. *caps = le32_to_cpu(resp->function_caps);
  1561. }
  1562. mutex_unlock(&adapter->mbox_lock);
  1563. return status;
  1564. }
  1565. /* Uses mbox */
  1566. int be_cmd_reset_function(struct be_adapter *adapter)
  1567. {
  1568. struct be_mcc_wrb *wrb;
  1569. struct be_cmd_req_hdr *req;
  1570. int status;
  1571. if (lancer_chip(adapter)) {
  1572. status = lancer_wait_ready(adapter);
  1573. if (!status) {
  1574. iowrite32(SLI_PORT_CONTROL_IP_MASK,
  1575. adapter->db + SLIPORT_CONTROL_OFFSET);
  1576. status = lancer_test_and_set_rdy_state(adapter);
  1577. }
  1578. if (status) {
  1579. dev_err(&adapter->pdev->dev,
  1580. "Adapter in non recoverable error\n");
  1581. }
  1582. return status;
  1583. }
  1584. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1585. return -1;
  1586. wrb = wrb_from_mbox(adapter);
  1587. req = embedded_payload(wrb);
  1588. be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1589. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
  1590. status = be_mbox_notify_wait(adapter);
  1591. mutex_unlock(&adapter->mbox_lock);
  1592. return status;
  1593. }
  1594. int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
  1595. {
  1596. struct be_mcc_wrb *wrb;
  1597. struct be_cmd_req_rss_config *req;
  1598. u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
  1599. 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
  1600. 0x3ea83c02, 0x4a110304};
  1601. int status;
  1602. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1603. return -1;
  1604. wrb = wrb_from_mbox(adapter);
  1605. req = embedded_payload(wrb);
  1606. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1607. OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
  1608. req->if_id = cpu_to_le32(adapter->if_handle);
  1609. req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
  1610. RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
  1611. if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
  1612. req->hdr.version = 1;
  1613. req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
  1614. RSS_ENABLE_UDP_IPV6);
  1615. }
  1616. req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  1617. memcpy(req->cpu_table, rsstable, table_size);
  1618. memcpy(req->hash, myhash, sizeof(myhash));
  1619. be_dws_cpu_to_le(req->hash, sizeof(req->hash));
  1620. status = be_mbox_notify_wait(adapter);
  1621. mutex_unlock(&adapter->mbox_lock);
  1622. return status;
  1623. }
  1624. /* Uses sync mcc */
  1625. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1626. u8 bcn, u8 sts, u8 state)
  1627. {
  1628. struct be_mcc_wrb *wrb;
  1629. struct be_cmd_req_enable_disable_beacon *req;
  1630. int status;
  1631. spin_lock_bh(&adapter->mcc_lock);
  1632. wrb = wrb_from_mccq(adapter);
  1633. if (!wrb) {
  1634. status = -EBUSY;
  1635. goto err;
  1636. }
  1637. req = embedded_payload(wrb);
  1638. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1639. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
  1640. req->port_num = port_num;
  1641. req->beacon_state = state;
  1642. req->beacon_duration = bcn;
  1643. req->status_duration = sts;
  1644. status = be_mcc_notify_wait(adapter);
  1645. err:
  1646. spin_unlock_bh(&adapter->mcc_lock);
  1647. return status;
  1648. }
  1649. /* Uses sync mcc */
  1650. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1651. {
  1652. struct be_mcc_wrb *wrb;
  1653. struct be_cmd_req_get_beacon_state *req;
  1654. int status;
  1655. spin_lock_bh(&adapter->mcc_lock);
  1656. wrb = wrb_from_mccq(adapter);
  1657. if (!wrb) {
  1658. status = -EBUSY;
  1659. goto err;
  1660. }
  1661. req = embedded_payload(wrb);
  1662. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1663. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
  1664. req->port_num = port_num;
  1665. status = be_mcc_notify_wait(adapter);
  1666. if (!status) {
  1667. struct be_cmd_resp_get_beacon_state *resp =
  1668. embedded_payload(wrb);
  1669. *state = resp->beacon_state;
  1670. }
  1671. err:
  1672. spin_unlock_bh(&adapter->mcc_lock);
  1673. return status;
  1674. }
  1675. int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1676. u32 data_size, u32 data_offset,
  1677. const char *obj_name, u32 *data_written,
  1678. u8 *change_status, u8 *addn_status)
  1679. {
  1680. struct be_mcc_wrb *wrb;
  1681. struct lancer_cmd_req_write_object *req;
  1682. struct lancer_cmd_resp_write_object *resp;
  1683. void *ctxt = NULL;
  1684. int status;
  1685. spin_lock_bh(&adapter->mcc_lock);
  1686. adapter->flash_status = 0;
  1687. wrb = wrb_from_mccq(adapter);
  1688. if (!wrb) {
  1689. status = -EBUSY;
  1690. goto err_unlock;
  1691. }
  1692. req = embedded_payload(wrb);
  1693. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1694. OPCODE_COMMON_WRITE_OBJECT,
  1695. sizeof(struct lancer_cmd_req_write_object), wrb,
  1696. NULL);
  1697. ctxt = &req->context;
  1698. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1699. write_length, ctxt, data_size);
  1700. if (data_size == 0)
  1701. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1702. eof, ctxt, 1);
  1703. else
  1704. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1705. eof, ctxt, 0);
  1706. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1707. req->write_offset = cpu_to_le32(data_offset);
  1708. strcpy(req->object_name, obj_name);
  1709. req->descriptor_count = cpu_to_le32(1);
  1710. req->buf_len = cpu_to_le32(data_size);
  1711. req->addr_low = cpu_to_le32((cmd->dma +
  1712. sizeof(struct lancer_cmd_req_write_object))
  1713. & 0xFFFFFFFF);
  1714. req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
  1715. sizeof(struct lancer_cmd_req_write_object)));
  1716. be_mcc_notify(adapter);
  1717. spin_unlock_bh(&adapter->mcc_lock);
  1718. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1719. msecs_to_jiffies(30000)))
  1720. status = -1;
  1721. else
  1722. status = adapter->flash_status;
  1723. resp = embedded_payload(wrb);
  1724. if (!status) {
  1725. *data_written = le32_to_cpu(resp->actual_write_len);
  1726. *change_status = resp->change_status;
  1727. } else {
  1728. *addn_status = resp->additional_status;
  1729. }
  1730. return status;
  1731. err_unlock:
  1732. spin_unlock_bh(&adapter->mcc_lock);
  1733. return status;
  1734. }
  1735. int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1736. u32 data_size, u32 data_offset, const char *obj_name,
  1737. u32 *data_read, u32 *eof, u8 *addn_status)
  1738. {
  1739. struct be_mcc_wrb *wrb;
  1740. struct lancer_cmd_req_read_object *req;
  1741. struct lancer_cmd_resp_read_object *resp;
  1742. int status;
  1743. spin_lock_bh(&adapter->mcc_lock);
  1744. wrb = wrb_from_mccq(adapter);
  1745. if (!wrb) {
  1746. status = -EBUSY;
  1747. goto err_unlock;
  1748. }
  1749. req = embedded_payload(wrb);
  1750. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1751. OPCODE_COMMON_READ_OBJECT,
  1752. sizeof(struct lancer_cmd_req_read_object), wrb,
  1753. NULL);
  1754. req->desired_read_len = cpu_to_le32(data_size);
  1755. req->read_offset = cpu_to_le32(data_offset);
  1756. strcpy(req->object_name, obj_name);
  1757. req->descriptor_count = cpu_to_le32(1);
  1758. req->buf_len = cpu_to_le32(data_size);
  1759. req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
  1760. req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
  1761. status = be_mcc_notify_wait(adapter);
  1762. resp = embedded_payload(wrb);
  1763. if (!status) {
  1764. *data_read = le32_to_cpu(resp->actual_read_len);
  1765. *eof = le32_to_cpu(resp->eof);
  1766. } else {
  1767. *addn_status = resp->additional_status;
  1768. }
  1769. err_unlock:
  1770. spin_unlock_bh(&adapter->mcc_lock);
  1771. return status;
  1772. }
  1773. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1774. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1775. {
  1776. struct be_mcc_wrb *wrb;
  1777. struct be_cmd_write_flashrom *req;
  1778. int status;
  1779. spin_lock_bh(&adapter->mcc_lock);
  1780. adapter->flash_status = 0;
  1781. wrb = wrb_from_mccq(adapter);
  1782. if (!wrb) {
  1783. status = -EBUSY;
  1784. goto err_unlock;
  1785. }
  1786. req = cmd->va;
  1787. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1788. OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
  1789. req->params.op_type = cpu_to_le32(flash_type);
  1790. req->params.op_code = cpu_to_le32(flash_opcode);
  1791. req->params.data_buf_size = cpu_to_le32(buf_size);
  1792. be_mcc_notify(adapter);
  1793. spin_unlock_bh(&adapter->mcc_lock);
  1794. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1795. msecs_to_jiffies(40000)))
  1796. status = -1;
  1797. else
  1798. status = adapter->flash_status;
  1799. return status;
  1800. err_unlock:
  1801. spin_unlock_bh(&adapter->mcc_lock);
  1802. return status;
  1803. }
  1804. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  1805. int offset)
  1806. {
  1807. struct be_mcc_wrb *wrb;
  1808. struct be_cmd_read_flash_crc *req;
  1809. int status;
  1810. spin_lock_bh(&adapter->mcc_lock);
  1811. wrb = wrb_from_mccq(adapter);
  1812. if (!wrb) {
  1813. status = -EBUSY;
  1814. goto err;
  1815. }
  1816. req = embedded_payload(wrb);
  1817. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1818. OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
  1819. wrb, NULL);
  1820. req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
  1821. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1822. req->params.offset = cpu_to_le32(offset);
  1823. req->params.data_buf_size = cpu_to_le32(0x4);
  1824. status = be_mcc_notify_wait(adapter);
  1825. if (!status)
  1826. memcpy(flashed_crc, req->crc, 4);
  1827. err:
  1828. spin_unlock_bh(&adapter->mcc_lock);
  1829. return status;
  1830. }
  1831. int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1832. struct be_dma_mem *nonemb_cmd)
  1833. {
  1834. struct be_mcc_wrb *wrb;
  1835. struct be_cmd_req_acpi_wol_magic_config *req;
  1836. int status;
  1837. spin_lock_bh(&adapter->mcc_lock);
  1838. wrb = wrb_from_mccq(adapter);
  1839. if (!wrb) {
  1840. status = -EBUSY;
  1841. goto err;
  1842. }
  1843. req = nonemb_cmd->va;
  1844. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1845. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
  1846. nonemb_cmd);
  1847. memcpy(req->magic_mac, mac, ETH_ALEN);
  1848. status = be_mcc_notify_wait(adapter);
  1849. err:
  1850. spin_unlock_bh(&adapter->mcc_lock);
  1851. return status;
  1852. }
  1853. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1854. u8 loopback_type, u8 enable)
  1855. {
  1856. struct be_mcc_wrb *wrb;
  1857. struct be_cmd_req_set_lmode *req;
  1858. int status;
  1859. spin_lock_bh(&adapter->mcc_lock);
  1860. wrb = wrb_from_mccq(adapter);
  1861. if (!wrb) {
  1862. status = -EBUSY;
  1863. goto err;
  1864. }
  1865. req = embedded_payload(wrb);
  1866. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1867. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
  1868. NULL);
  1869. req->src_port = port_num;
  1870. req->dest_port = port_num;
  1871. req->loopback_type = loopback_type;
  1872. req->loopback_state = enable;
  1873. status = be_mcc_notify_wait(adapter);
  1874. err:
  1875. spin_unlock_bh(&adapter->mcc_lock);
  1876. return status;
  1877. }
  1878. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1879. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1880. {
  1881. struct be_mcc_wrb *wrb;
  1882. struct be_cmd_req_loopback_test *req;
  1883. int status;
  1884. spin_lock_bh(&adapter->mcc_lock);
  1885. wrb = wrb_from_mccq(adapter);
  1886. if (!wrb) {
  1887. status = -EBUSY;
  1888. goto err;
  1889. }
  1890. req = embedded_payload(wrb);
  1891. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1892. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
  1893. req->hdr.timeout = cpu_to_le32(4);
  1894. req->pattern = cpu_to_le64(pattern);
  1895. req->src_port = cpu_to_le32(port_num);
  1896. req->dest_port = cpu_to_le32(port_num);
  1897. req->pkt_size = cpu_to_le32(pkt_size);
  1898. req->num_pkts = cpu_to_le32(num_pkts);
  1899. req->loopback_type = cpu_to_le32(loopback_type);
  1900. status = be_mcc_notify_wait(adapter);
  1901. if (!status) {
  1902. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1903. status = le32_to_cpu(resp->status);
  1904. }
  1905. err:
  1906. spin_unlock_bh(&adapter->mcc_lock);
  1907. return status;
  1908. }
  1909. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1910. u32 byte_cnt, struct be_dma_mem *cmd)
  1911. {
  1912. struct be_mcc_wrb *wrb;
  1913. struct be_cmd_req_ddrdma_test *req;
  1914. int status;
  1915. int i, j = 0;
  1916. spin_lock_bh(&adapter->mcc_lock);
  1917. wrb = wrb_from_mccq(adapter);
  1918. if (!wrb) {
  1919. status = -EBUSY;
  1920. goto err;
  1921. }
  1922. req = cmd->va;
  1923. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1924. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
  1925. req->pattern = cpu_to_le64(pattern);
  1926. req->byte_count = cpu_to_le32(byte_cnt);
  1927. for (i = 0; i < byte_cnt; i++) {
  1928. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1929. j++;
  1930. if (j > 7)
  1931. j = 0;
  1932. }
  1933. status = be_mcc_notify_wait(adapter);
  1934. if (!status) {
  1935. struct be_cmd_resp_ddrdma_test *resp;
  1936. resp = cmd->va;
  1937. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1938. resp->snd_err) {
  1939. status = -1;
  1940. }
  1941. }
  1942. err:
  1943. spin_unlock_bh(&adapter->mcc_lock);
  1944. return status;
  1945. }
  1946. int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  1947. struct be_dma_mem *nonemb_cmd)
  1948. {
  1949. struct be_mcc_wrb *wrb;
  1950. struct be_cmd_req_seeprom_read *req;
  1951. struct be_sge *sge;
  1952. int status;
  1953. spin_lock_bh(&adapter->mcc_lock);
  1954. wrb = wrb_from_mccq(adapter);
  1955. if (!wrb) {
  1956. status = -EBUSY;
  1957. goto err;
  1958. }
  1959. req = nonemb_cmd->va;
  1960. sge = nonembedded_sgl(wrb);
  1961. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1962. OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
  1963. nonemb_cmd);
  1964. status = be_mcc_notify_wait(adapter);
  1965. err:
  1966. spin_unlock_bh(&adapter->mcc_lock);
  1967. return status;
  1968. }
  1969. int be_cmd_get_phy_info(struct be_adapter *adapter)
  1970. {
  1971. struct be_mcc_wrb *wrb;
  1972. struct be_cmd_req_get_phy_info *req;
  1973. struct be_dma_mem cmd;
  1974. int status;
  1975. if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
  1976. CMD_SUBSYSTEM_COMMON))
  1977. return -EPERM;
  1978. spin_lock_bh(&adapter->mcc_lock);
  1979. wrb = wrb_from_mccq(adapter);
  1980. if (!wrb) {
  1981. status = -EBUSY;
  1982. goto err;
  1983. }
  1984. cmd.size = sizeof(struct be_cmd_req_get_phy_info);
  1985. cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  1986. &cmd.dma);
  1987. if (!cmd.va) {
  1988. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  1989. status = -ENOMEM;
  1990. goto err;
  1991. }
  1992. req = cmd.va;
  1993. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1994. OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
  1995. wrb, &cmd);
  1996. status = be_mcc_notify_wait(adapter);
  1997. if (!status) {
  1998. struct be_phy_info *resp_phy_info =
  1999. cmd.va + sizeof(struct be_cmd_req_hdr);
  2000. adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
  2001. adapter->phy.interface_type =
  2002. le16_to_cpu(resp_phy_info->interface_type);
  2003. adapter->phy.auto_speeds_supported =
  2004. le16_to_cpu(resp_phy_info->auto_speeds_supported);
  2005. adapter->phy.fixed_speeds_supported =
  2006. le16_to_cpu(resp_phy_info->fixed_speeds_supported);
  2007. adapter->phy.misc_params =
  2008. le32_to_cpu(resp_phy_info->misc_params);
  2009. }
  2010. pci_free_consistent(adapter->pdev, cmd.size,
  2011. cmd.va, cmd.dma);
  2012. err:
  2013. spin_unlock_bh(&adapter->mcc_lock);
  2014. return status;
  2015. }
  2016. int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
  2017. {
  2018. struct be_mcc_wrb *wrb;
  2019. struct be_cmd_req_set_qos *req;
  2020. int status;
  2021. spin_lock_bh(&adapter->mcc_lock);
  2022. wrb = wrb_from_mccq(adapter);
  2023. if (!wrb) {
  2024. status = -EBUSY;
  2025. goto err;
  2026. }
  2027. req = embedded_payload(wrb);
  2028. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2029. OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
  2030. req->hdr.domain = domain;
  2031. req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
  2032. req->max_bps_nic = cpu_to_le32(bps);
  2033. status = be_mcc_notify_wait(adapter);
  2034. err:
  2035. spin_unlock_bh(&adapter->mcc_lock);
  2036. return status;
  2037. }
  2038. int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
  2039. {
  2040. struct be_mcc_wrb *wrb;
  2041. struct be_cmd_req_cntl_attribs *req;
  2042. struct be_cmd_resp_cntl_attribs *resp;
  2043. int status;
  2044. int payload_len = max(sizeof(*req), sizeof(*resp));
  2045. struct mgmt_controller_attrib *attribs;
  2046. struct be_dma_mem attribs_cmd;
  2047. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2048. return -1;
  2049. memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
  2050. attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
  2051. attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
  2052. &attribs_cmd.dma);
  2053. if (!attribs_cmd.va) {
  2054. dev_err(&adapter->pdev->dev,
  2055. "Memory allocation failure\n");
  2056. status = -ENOMEM;
  2057. goto err;
  2058. }
  2059. wrb = wrb_from_mbox(adapter);
  2060. if (!wrb) {
  2061. status = -EBUSY;
  2062. goto err;
  2063. }
  2064. req = attribs_cmd.va;
  2065. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2066. OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
  2067. &attribs_cmd);
  2068. status = be_mbox_notify_wait(adapter);
  2069. if (!status) {
  2070. attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
  2071. adapter->hba_port_num = attribs->hba_attribs.phy_port;
  2072. }
  2073. err:
  2074. mutex_unlock(&adapter->mbox_lock);
  2075. if (attribs_cmd.va)
  2076. pci_free_consistent(adapter->pdev, attribs_cmd.size,
  2077. attribs_cmd.va, attribs_cmd.dma);
  2078. return status;
  2079. }
  2080. /* Uses mbox */
  2081. int be_cmd_req_native_mode(struct be_adapter *adapter)
  2082. {
  2083. struct be_mcc_wrb *wrb;
  2084. struct be_cmd_req_set_func_cap *req;
  2085. int status;
  2086. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2087. return -1;
  2088. wrb = wrb_from_mbox(adapter);
  2089. if (!wrb) {
  2090. status = -EBUSY;
  2091. goto err;
  2092. }
  2093. req = embedded_payload(wrb);
  2094. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2095. OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
  2096. req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
  2097. CAPABILITY_BE3_NATIVE_ERX_API);
  2098. req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
  2099. status = be_mbox_notify_wait(adapter);
  2100. if (!status) {
  2101. struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
  2102. adapter->be3_native = le32_to_cpu(resp->cap_flags) &
  2103. CAPABILITY_BE3_NATIVE_ERX_API;
  2104. if (!adapter->be3_native)
  2105. dev_warn(&adapter->pdev->dev,
  2106. "adapter not in advanced mode\n");
  2107. }
  2108. err:
  2109. mutex_unlock(&adapter->mbox_lock);
  2110. return status;
  2111. }
  2112. /* Get privilege(s) for a function */
  2113. int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
  2114. u32 domain)
  2115. {
  2116. struct be_mcc_wrb *wrb;
  2117. struct be_cmd_req_get_fn_privileges *req;
  2118. int status;
  2119. spin_lock_bh(&adapter->mcc_lock);
  2120. wrb = wrb_from_mccq(adapter);
  2121. if (!wrb) {
  2122. status = -EBUSY;
  2123. goto err;
  2124. }
  2125. req = embedded_payload(wrb);
  2126. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2127. OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
  2128. wrb, NULL);
  2129. req->hdr.domain = domain;
  2130. status = be_mcc_notify_wait(adapter);
  2131. if (!status) {
  2132. struct be_cmd_resp_get_fn_privileges *resp =
  2133. embedded_payload(wrb);
  2134. *privilege = le32_to_cpu(resp->privilege_mask);
  2135. }
  2136. err:
  2137. spin_unlock_bh(&adapter->mcc_lock);
  2138. return status;
  2139. }
  2140. /* Uses synchronous MCCQ */
  2141. int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
  2142. bool *pmac_id_active, u32 *pmac_id, u8 domain)
  2143. {
  2144. struct be_mcc_wrb *wrb;
  2145. struct be_cmd_req_get_mac_list *req;
  2146. int status;
  2147. int mac_count;
  2148. struct be_dma_mem get_mac_list_cmd;
  2149. int i;
  2150. memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
  2151. get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
  2152. get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
  2153. get_mac_list_cmd.size,
  2154. &get_mac_list_cmd.dma);
  2155. if (!get_mac_list_cmd.va) {
  2156. dev_err(&adapter->pdev->dev,
  2157. "Memory allocation failure during GET_MAC_LIST\n");
  2158. return -ENOMEM;
  2159. }
  2160. spin_lock_bh(&adapter->mcc_lock);
  2161. wrb = wrb_from_mccq(adapter);
  2162. if (!wrb) {
  2163. status = -EBUSY;
  2164. goto out;
  2165. }
  2166. req = get_mac_list_cmd.va;
  2167. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2168. OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
  2169. wrb, &get_mac_list_cmd);
  2170. req->hdr.domain = domain;
  2171. req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
  2172. req->perm_override = 1;
  2173. status = be_mcc_notify_wait(adapter);
  2174. if (!status) {
  2175. struct be_cmd_resp_get_mac_list *resp =
  2176. get_mac_list_cmd.va;
  2177. mac_count = resp->true_mac_count + resp->pseudo_mac_count;
  2178. /* Mac list returned could contain one or more active mac_ids
  2179. * or one or more true or pseudo permanant mac addresses.
  2180. * If an active mac_id is present, return first active mac_id
  2181. * found.
  2182. */
  2183. for (i = 0; i < mac_count; i++) {
  2184. struct get_list_macaddr *mac_entry;
  2185. u16 mac_addr_size;
  2186. u32 mac_id;
  2187. mac_entry = &resp->macaddr_list[i];
  2188. mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
  2189. /* mac_id is a 32 bit value and mac_addr size
  2190. * is 6 bytes
  2191. */
  2192. if (mac_addr_size == sizeof(u32)) {
  2193. *pmac_id_active = true;
  2194. mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
  2195. *pmac_id = le32_to_cpu(mac_id);
  2196. goto out;
  2197. }
  2198. }
  2199. /* If no active mac_id found, return first mac addr */
  2200. *pmac_id_active = false;
  2201. memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
  2202. ETH_ALEN);
  2203. }
  2204. out:
  2205. spin_unlock_bh(&adapter->mcc_lock);
  2206. pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
  2207. get_mac_list_cmd.va, get_mac_list_cmd.dma);
  2208. return status;
  2209. }
  2210. /* Uses synchronous MCCQ */
  2211. int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
  2212. u8 mac_count, u32 domain)
  2213. {
  2214. struct be_mcc_wrb *wrb;
  2215. struct be_cmd_req_set_mac_list *req;
  2216. int status;
  2217. struct be_dma_mem cmd;
  2218. memset(&cmd, 0, sizeof(struct be_dma_mem));
  2219. cmd.size = sizeof(struct be_cmd_req_set_mac_list);
  2220. cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
  2221. &cmd.dma, GFP_KERNEL);
  2222. if (!cmd.va) {
  2223. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  2224. return -ENOMEM;
  2225. }
  2226. spin_lock_bh(&adapter->mcc_lock);
  2227. wrb = wrb_from_mccq(adapter);
  2228. if (!wrb) {
  2229. status = -EBUSY;
  2230. goto err;
  2231. }
  2232. req = cmd.va;
  2233. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2234. OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
  2235. wrb, &cmd);
  2236. req->hdr.domain = domain;
  2237. req->mac_count = mac_count;
  2238. if (mac_count)
  2239. memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
  2240. status = be_mcc_notify_wait(adapter);
  2241. err:
  2242. dma_free_coherent(&adapter->pdev->dev, cmd.size,
  2243. cmd.va, cmd.dma);
  2244. spin_unlock_bh(&adapter->mcc_lock);
  2245. return status;
  2246. }
  2247. int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
  2248. u32 domain, u16 intf_id)
  2249. {
  2250. struct be_mcc_wrb *wrb;
  2251. struct be_cmd_req_set_hsw_config *req;
  2252. void *ctxt;
  2253. int status;
  2254. spin_lock_bh(&adapter->mcc_lock);
  2255. wrb = wrb_from_mccq(adapter);
  2256. if (!wrb) {
  2257. status = -EBUSY;
  2258. goto err;
  2259. }
  2260. req = embedded_payload(wrb);
  2261. ctxt = &req->context;
  2262. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2263. OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
  2264. req->hdr.domain = domain;
  2265. AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
  2266. if (pvid) {
  2267. AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
  2268. AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
  2269. }
  2270. be_dws_cpu_to_le(req->context, sizeof(req->context));
  2271. status = be_mcc_notify_wait(adapter);
  2272. err:
  2273. spin_unlock_bh(&adapter->mcc_lock);
  2274. return status;
  2275. }
  2276. /* Get Hyper switch config */
  2277. int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
  2278. u32 domain, u16 intf_id)
  2279. {
  2280. struct be_mcc_wrb *wrb;
  2281. struct be_cmd_req_get_hsw_config *req;
  2282. void *ctxt;
  2283. int status;
  2284. u16 vid;
  2285. spin_lock_bh(&adapter->mcc_lock);
  2286. wrb = wrb_from_mccq(adapter);
  2287. if (!wrb) {
  2288. status = -EBUSY;
  2289. goto err;
  2290. }
  2291. req = embedded_payload(wrb);
  2292. ctxt = &req->context;
  2293. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2294. OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
  2295. req->hdr.domain = domain;
  2296. AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
  2297. intf_id);
  2298. AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
  2299. be_dws_cpu_to_le(req->context, sizeof(req->context));
  2300. status = be_mcc_notify_wait(adapter);
  2301. if (!status) {
  2302. struct be_cmd_resp_get_hsw_config *resp =
  2303. embedded_payload(wrb);
  2304. be_dws_le_to_cpu(&resp->context,
  2305. sizeof(resp->context));
  2306. vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
  2307. pvid, &resp->context);
  2308. *pvid = le16_to_cpu(vid);
  2309. }
  2310. err:
  2311. spin_unlock_bh(&adapter->mcc_lock);
  2312. return status;
  2313. }
  2314. int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
  2315. {
  2316. struct be_mcc_wrb *wrb;
  2317. struct be_cmd_req_acpi_wol_magic_config_v1 *req;
  2318. int status;
  2319. int payload_len = sizeof(*req);
  2320. struct be_dma_mem cmd;
  2321. if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  2322. CMD_SUBSYSTEM_ETH))
  2323. return -EPERM;
  2324. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2325. return -1;
  2326. memset(&cmd, 0, sizeof(struct be_dma_mem));
  2327. cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
  2328. cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  2329. &cmd.dma);
  2330. if (!cmd.va) {
  2331. dev_err(&adapter->pdev->dev,
  2332. "Memory allocation failure\n");
  2333. status = -ENOMEM;
  2334. goto err;
  2335. }
  2336. wrb = wrb_from_mbox(adapter);
  2337. if (!wrb) {
  2338. status = -EBUSY;
  2339. goto err;
  2340. }
  2341. req = cmd.va;
  2342. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  2343. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  2344. payload_len, wrb, &cmd);
  2345. req->hdr.version = 1;
  2346. req->query_options = BE_GET_WOL_CAP;
  2347. status = be_mbox_notify_wait(adapter);
  2348. if (!status) {
  2349. struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
  2350. resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
  2351. /* the command could succeed misleadingly on old f/w
  2352. * which is not aware of the V1 version. fake an error. */
  2353. if (resp->hdr.response_length < payload_len) {
  2354. status = -1;
  2355. goto err;
  2356. }
  2357. adapter->wol_cap = resp->wol_settings;
  2358. }
  2359. err:
  2360. mutex_unlock(&adapter->mbox_lock);
  2361. if (cmd.va)
  2362. pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
  2363. return status;
  2364. }
  2365. int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
  2366. struct be_dma_mem *cmd)
  2367. {
  2368. struct be_mcc_wrb *wrb;
  2369. struct be_cmd_req_get_ext_fat_caps *req;
  2370. int status;
  2371. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2372. return -1;
  2373. wrb = wrb_from_mbox(adapter);
  2374. if (!wrb) {
  2375. status = -EBUSY;
  2376. goto err;
  2377. }
  2378. req = cmd->va;
  2379. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2380. OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
  2381. cmd->size, wrb, cmd);
  2382. req->parameter_type = cpu_to_le32(1);
  2383. status = be_mbox_notify_wait(adapter);
  2384. err:
  2385. mutex_unlock(&adapter->mbox_lock);
  2386. return status;
  2387. }
  2388. int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
  2389. struct be_dma_mem *cmd,
  2390. struct be_fat_conf_params *configs)
  2391. {
  2392. struct be_mcc_wrb *wrb;
  2393. struct be_cmd_req_set_ext_fat_caps *req;
  2394. int status;
  2395. spin_lock_bh(&adapter->mcc_lock);
  2396. wrb = wrb_from_mccq(adapter);
  2397. if (!wrb) {
  2398. status = -EBUSY;
  2399. goto err;
  2400. }
  2401. req = cmd->va;
  2402. memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
  2403. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2404. OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
  2405. cmd->size, wrb, cmd);
  2406. status = be_mcc_notify_wait(adapter);
  2407. err:
  2408. spin_unlock_bh(&adapter->mcc_lock);
  2409. return status;
  2410. }
  2411. int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
  2412. {
  2413. struct be_mcc_wrb *wrb;
  2414. struct be_cmd_req_get_port_name *req;
  2415. int status;
  2416. if (!lancer_chip(adapter)) {
  2417. *port_name = adapter->hba_port_num + '0';
  2418. return 0;
  2419. }
  2420. spin_lock_bh(&adapter->mcc_lock);
  2421. wrb = wrb_from_mccq(adapter);
  2422. if (!wrb) {
  2423. status = -EBUSY;
  2424. goto err;
  2425. }
  2426. req = embedded_payload(wrb);
  2427. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2428. OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
  2429. NULL);
  2430. req->hdr.version = 1;
  2431. status = be_mcc_notify_wait(adapter);
  2432. if (!status) {
  2433. struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
  2434. *port_name = resp->port_name[adapter->hba_port_num];
  2435. } else {
  2436. *port_name = adapter->hba_port_num + '0';
  2437. }
  2438. err:
  2439. spin_unlock_bh(&adapter->mcc_lock);
  2440. return status;
  2441. }
  2442. static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
  2443. u32 max_buf_size)
  2444. {
  2445. struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
  2446. int i;
  2447. for (i = 0; i < desc_count; i++) {
  2448. desc->desc_len = RESOURCE_DESC_SIZE;
  2449. if (((void *)desc + desc->desc_len) >
  2450. (void *)(buf + max_buf_size)) {
  2451. desc = NULL;
  2452. break;
  2453. }
  2454. if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
  2455. break;
  2456. desc = (void *)desc + desc->desc_len;
  2457. }
  2458. if (!desc || i == MAX_RESOURCE_DESC)
  2459. return NULL;
  2460. return desc;
  2461. }
  2462. /* Uses Mbox */
  2463. int be_cmd_get_func_config(struct be_adapter *adapter)
  2464. {
  2465. struct be_mcc_wrb *wrb;
  2466. struct be_cmd_req_get_func_config *req;
  2467. int status;
  2468. struct be_dma_mem cmd;
  2469. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2470. return -1;
  2471. memset(&cmd, 0, sizeof(struct be_dma_mem));
  2472. cmd.size = sizeof(struct be_cmd_resp_get_func_config);
  2473. cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  2474. &cmd.dma);
  2475. if (!cmd.va) {
  2476. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  2477. status = -ENOMEM;
  2478. goto err;
  2479. }
  2480. wrb = wrb_from_mbox(adapter);
  2481. if (!wrb) {
  2482. status = -EBUSY;
  2483. goto err;
  2484. }
  2485. req = cmd.va;
  2486. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2487. OPCODE_COMMON_GET_FUNC_CONFIG,
  2488. cmd.size, wrb, &cmd);
  2489. status = be_mbox_notify_wait(adapter);
  2490. if (!status) {
  2491. struct be_cmd_resp_get_func_config *resp = cmd.va;
  2492. u32 desc_count = le32_to_cpu(resp->desc_count);
  2493. struct be_nic_resource_desc *desc;
  2494. desc = be_get_nic_desc(resp->func_param, desc_count,
  2495. sizeof(resp->func_param));
  2496. if (!desc) {
  2497. status = -EINVAL;
  2498. goto err;
  2499. }
  2500. adapter->pf_number = desc->pf_num;
  2501. adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
  2502. adapter->max_vlans = le16_to_cpu(desc->vlan_count);
  2503. adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
  2504. adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
  2505. adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
  2506. adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
  2507. adapter->max_event_queues = le16_to_cpu(desc->eq_count);
  2508. adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
  2509. }
  2510. err:
  2511. mutex_unlock(&adapter->mbox_lock);
  2512. if (cmd.va)
  2513. pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
  2514. return status;
  2515. }
  2516. /* Uses sync mcc */
  2517. int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
  2518. u8 domain)
  2519. {
  2520. struct be_mcc_wrb *wrb;
  2521. struct be_cmd_req_get_profile_config *req;
  2522. int status;
  2523. struct be_dma_mem cmd;
  2524. memset(&cmd, 0, sizeof(struct be_dma_mem));
  2525. cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
  2526. cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  2527. &cmd.dma);
  2528. if (!cmd.va) {
  2529. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  2530. return -ENOMEM;
  2531. }
  2532. spin_lock_bh(&adapter->mcc_lock);
  2533. wrb = wrb_from_mccq(adapter);
  2534. if (!wrb) {
  2535. status = -EBUSY;
  2536. goto err;
  2537. }
  2538. req = cmd.va;
  2539. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2540. OPCODE_COMMON_GET_PROFILE_CONFIG,
  2541. cmd.size, wrb, &cmd);
  2542. req->type = ACTIVE_PROFILE_TYPE;
  2543. req->hdr.domain = domain;
  2544. status = be_mcc_notify_wait(adapter);
  2545. if (!status) {
  2546. struct be_cmd_resp_get_profile_config *resp = cmd.va;
  2547. u32 desc_count = le32_to_cpu(resp->desc_count);
  2548. struct be_nic_resource_desc *desc;
  2549. desc = be_get_nic_desc(resp->func_param, desc_count,
  2550. sizeof(resp->func_param));
  2551. if (!desc) {
  2552. status = -EINVAL;
  2553. goto err;
  2554. }
  2555. *cap_flags = le32_to_cpu(desc->cap_flags);
  2556. }
  2557. err:
  2558. spin_unlock_bh(&adapter->mcc_lock);
  2559. pci_free_consistent(adapter->pdev, cmd.size,
  2560. cmd.va, cmd.dma);
  2561. return status;
  2562. }
  2563. /* Uses sync mcc */
  2564. int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
  2565. u8 domain)
  2566. {
  2567. struct be_mcc_wrb *wrb;
  2568. struct be_cmd_req_set_profile_config *req;
  2569. int status;
  2570. spin_lock_bh(&adapter->mcc_lock);
  2571. wrb = wrb_from_mccq(adapter);
  2572. if (!wrb) {
  2573. status = -EBUSY;
  2574. goto err;
  2575. }
  2576. req = embedded_payload(wrb);
  2577. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2578. OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
  2579. wrb, NULL);
  2580. req->hdr.domain = domain;
  2581. req->desc_count = cpu_to_le32(1);
  2582. req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
  2583. req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
  2584. req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
  2585. req->nic_desc.pf_num = adapter->pf_number;
  2586. req->nic_desc.vf_num = domain;
  2587. /* Mark fields invalid */
  2588. req->nic_desc.unicast_mac_count = 0xFFFF;
  2589. req->nic_desc.mcc_count = 0xFFFF;
  2590. req->nic_desc.vlan_count = 0xFFFF;
  2591. req->nic_desc.mcast_mac_count = 0xFFFF;
  2592. req->nic_desc.txq_count = 0xFFFF;
  2593. req->nic_desc.rq_count = 0xFFFF;
  2594. req->nic_desc.rssq_count = 0xFFFF;
  2595. req->nic_desc.lro_count = 0xFFFF;
  2596. req->nic_desc.cq_count = 0xFFFF;
  2597. req->nic_desc.toe_conn_count = 0xFFFF;
  2598. req->nic_desc.eq_count = 0xFFFF;
  2599. req->nic_desc.link_param = 0xFF;
  2600. req->nic_desc.bw_min = 0xFFFFFFFF;
  2601. req->nic_desc.acpi_params = 0xFF;
  2602. req->nic_desc.wol_param = 0x0F;
  2603. /* Change BW */
  2604. req->nic_desc.bw_min = cpu_to_le32(bps);
  2605. req->nic_desc.bw_max = cpu_to_le32(bps);
  2606. status = be_mcc_notify_wait(adapter);
  2607. err:
  2608. spin_unlock_bh(&adapter->mcc_lock);
  2609. return status;
  2610. }
  2611. int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
  2612. int vf_num)
  2613. {
  2614. struct be_mcc_wrb *wrb;
  2615. struct be_cmd_req_get_iface_list *req;
  2616. struct be_cmd_resp_get_iface_list *resp;
  2617. int status;
  2618. spin_lock_bh(&adapter->mcc_lock);
  2619. wrb = wrb_from_mccq(adapter);
  2620. if (!wrb) {
  2621. status = -EBUSY;
  2622. goto err;
  2623. }
  2624. req = embedded_payload(wrb);
  2625. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2626. OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
  2627. wrb, NULL);
  2628. req->hdr.domain = vf_num + 1;
  2629. status = be_mcc_notify_wait(adapter);
  2630. if (!status) {
  2631. resp = (struct be_cmd_resp_get_iface_list *)req;
  2632. vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
  2633. }
  2634. err:
  2635. spin_unlock_bh(&adapter->mcc_lock);
  2636. return status;
  2637. }
  2638. /* Uses sync mcc */
  2639. int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
  2640. {
  2641. struct be_mcc_wrb *wrb;
  2642. struct be_cmd_enable_disable_vf *req;
  2643. int status;
  2644. if (!lancer_chip(adapter))
  2645. return 0;
  2646. spin_lock_bh(&adapter->mcc_lock);
  2647. wrb = wrb_from_mccq(adapter);
  2648. if (!wrb) {
  2649. status = -EBUSY;
  2650. goto err;
  2651. }
  2652. req = embedded_payload(wrb);
  2653. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2654. OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
  2655. wrb, NULL);
  2656. req->hdr.domain = domain;
  2657. req->enable = 1;
  2658. status = be_mcc_notify_wait(adapter);
  2659. err:
  2660. spin_unlock_bh(&adapter->mcc_lock);
  2661. return status;
  2662. }
  2663. int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
  2664. int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
  2665. {
  2666. struct be_adapter *adapter = netdev_priv(netdev_handle);
  2667. struct be_mcc_wrb *wrb;
  2668. struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
  2669. struct be_cmd_req_hdr *req;
  2670. struct be_cmd_resp_hdr *resp;
  2671. int status;
  2672. spin_lock_bh(&adapter->mcc_lock);
  2673. wrb = wrb_from_mccq(adapter);
  2674. if (!wrb) {
  2675. status = -EBUSY;
  2676. goto err;
  2677. }
  2678. req = embedded_payload(wrb);
  2679. resp = embedded_payload(wrb);
  2680. be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
  2681. hdr->opcode, wrb_payload_size, wrb, NULL);
  2682. memcpy(req, wrb_payload, wrb_payload_size);
  2683. be_dws_cpu_to_le(req, wrb_payload_size);
  2684. status = be_mcc_notify_wait(adapter);
  2685. if (cmd_status)
  2686. *cmd_status = (status & 0xffff);
  2687. if (ext_status)
  2688. *ext_status = 0;
  2689. memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
  2690. be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
  2691. err:
  2692. spin_unlock_bh(&adapter->mcc_lock);
  2693. return status;
  2694. }
  2695. EXPORT_SYMBOL(be_roce_mcc_cmd);