be_cmds.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808
  1. /*
  2. * Copyright (C) 2005 - 2011 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@emulex.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include <linux/module.h>
  18. #include "be.h"
  19. #include "be_cmds.h"
  20. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  21. {
  22. return wrb->payload.embedded_payload;
  23. }
  24. static void be_mcc_notify(struct be_adapter *adapter)
  25. {
  26. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  27. u32 val = 0;
  28. if (be_error(adapter))
  29. return;
  30. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  31. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  32. wmb();
  33. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  34. }
  35. /* To check if valid bit is set, check the entire word as we don't know
  36. * the endianness of the data (old entry is host endian while a new entry is
  37. * little endian) */
  38. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  39. {
  40. if (compl->flags != 0) {
  41. compl->flags = le32_to_cpu(compl->flags);
  42. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  43. return true;
  44. } else {
  45. return false;
  46. }
  47. }
  48. /* Need to reset the entire word that houses the valid bit */
  49. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  50. {
  51. compl->flags = 0;
  52. }
  53. static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
  54. {
  55. unsigned long addr;
  56. addr = tag1;
  57. addr = ((addr << 16) << 16) | tag0;
  58. return (void *)addr;
  59. }
  60. static int be_mcc_compl_process(struct be_adapter *adapter,
  61. struct be_mcc_compl *compl)
  62. {
  63. u16 compl_status, extd_status;
  64. struct be_cmd_resp_hdr *resp_hdr;
  65. u8 opcode = 0, subsystem = 0;
  66. /* Just swap the status to host endian; mcc tag is opaquely copied
  67. * from mcc_wrb */
  68. be_dws_le_to_cpu(compl, 4);
  69. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  70. CQE_STATUS_COMPL_MASK;
  71. resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
  72. if (resp_hdr) {
  73. opcode = resp_hdr->opcode;
  74. subsystem = resp_hdr->subsystem;
  75. }
  76. if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
  77. (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
  78. (subsystem == CMD_SUBSYSTEM_COMMON)) {
  79. adapter->flash_status = compl_status;
  80. complete(&adapter->flash_compl);
  81. }
  82. if (compl_status == MCC_STATUS_SUCCESS) {
  83. if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
  84. (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
  85. (subsystem == CMD_SUBSYSTEM_ETH)) {
  86. be_parse_stats(adapter);
  87. adapter->stats_cmd_sent = false;
  88. }
  89. if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
  90. subsystem == CMD_SUBSYSTEM_COMMON) {
  91. struct be_cmd_resp_get_cntl_addnl_attribs *resp =
  92. (void *)resp_hdr;
  93. adapter->drv_stats.be_on_die_temperature =
  94. resp->on_die_temperature;
  95. }
  96. } else {
  97. if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
  98. adapter->be_get_temp_freq = 0;
  99. if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
  100. compl_status == MCC_STATUS_ILLEGAL_REQUEST)
  101. goto done;
  102. if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
  103. dev_warn(&adapter->pdev->dev,
  104. "opcode %d-%d is not permitted\n",
  105. opcode, subsystem);
  106. } else {
  107. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  108. CQE_STATUS_EXTD_MASK;
  109. dev_err(&adapter->pdev->dev,
  110. "opcode %d-%d failed:status %d-%d\n",
  111. opcode, subsystem, compl_status, extd_status);
  112. }
  113. }
  114. done:
  115. return compl_status;
  116. }
  117. /* Link state evt is a string of bytes; no need for endian swapping */
  118. static void be_async_link_state_process(struct be_adapter *adapter,
  119. struct be_async_event_link_state *evt)
  120. {
  121. /* When link status changes, link speed must be re-queried from FW */
  122. adapter->phy.link_speed = -1;
  123. /* Ignore physical link event */
  124. if (lancer_chip(adapter) &&
  125. !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
  126. return;
  127. /* For the initial link status do not rely on the ASYNC event as
  128. * it may not be received in some cases.
  129. */
  130. if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
  131. be_link_status_update(adapter, evt->port_link_status);
  132. }
  133. /* Grp5 CoS Priority evt */
  134. static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
  135. struct be_async_event_grp5_cos_priority *evt)
  136. {
  137. if (evt->valid) {
  138. adapter->vlan_prio_bmap = evt->available_priority_bmap;
  139. adapter->recommended_prio &= ~VLAN_PRIO_MASK;
  140. adapter->recommended_prio =
  141. evt->reco_default_priority << VLAN_PRIO_SHIFT;
  142. }
  143. }
  144. /* Grp5 QOS Speed evt */
  145. static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
  146. struct be_async_event_grp5_qos_link_speed *evt)
  147. {
  148. if (evt->physical_port == adapter->port_num) {
  149. /* qos_link_speed is in units of 10 Mbps */
  150. adapter->phy.link_speed = evt->qos_link_speed * 10;
  151. }
  152. }
  153. /*Grp5 PVID evt*/
  154. static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
  155. struct be_async_event_grp5_pvid_state *evt)
  156. {
  157. if (evt->enabled)
  158. adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
  159. else
  160. adapter->pvid = 0;
  161. }
  162. static void be_async_grp5_evt_process(struct be_adapter *adapter,
  163. u32 trailer, struct be_mcc_compl *evt)
  164. {
  165. u8 event_type = 0;
  166. event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
  167. ASYNC_TRAILER_EVENT_TYPE_MASK;
  168. switch (event_type) {
  169. case ASYNC_EVENT_COS_PRIORITY:
  170. be_async_grp5_cos_priority_process(adapter,
  171. (struct be_async_event_grp5_cos_priority *)evt);
  172. break;
  173. case ASYNC_EVENT_QOS_SPEED:
  174. be_async_grp5_qos_speed_process(adapter,
  175. (struct be_async_event_grp5_qos_link_speed *)evt);
  176. break;
  177. case ASYNC_EVENT_PVID_STATE:
  178. be_async_grp5_pvid_state_process(adapter,
  179. (struct be_async_event_grp5_pvid_state *)evt);
  180. break;
  181. default:
  182. dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
  183. break;
  184. }
  185. }
  186. static inline bool is_link_state_evt(u32 trailer)
  187. {
  188. return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  189. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  190. ASYNC_EVENT_CODE_LINK_STATE;
  191. }
  192. static inline bool is_grp5_evt(u32 trailer)
  193. {
  194. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  195. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  196. ASYNC_EVENT_CODE_GRP_5);
  197. }
  198. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  199. {
  200. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  201. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  202. if (be_mcc_compl_is_new(compl)) {
  203. queue_tail_inc(mcc_cq);
  204. return compl;
  205. }
  206. return NULL;
  207. }
  208. void be_async_mcc_enable(struct be_adapter *adapter)
  209. {
  210. spin_lock_bh(&adapter->mcc_cq_lock);
  211. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  212. adapter->mcc_obj.rearm_cq = true;
  213. spin_unlock_bh(&adapter->mcc_cq_lock);
  214. }
  215. void be_async_mcc_disable(struct be_adapter *adapter)
  216. {
  217. adapter->mcc_obj.rearm_cq = false;
  218. }
  219. int be_process_mcc(struct be_adapter *adapter)
  220. {
  221. struct be_mcc_compl *compl;
  222. int num = 0, status = 0;
  223. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  224. spin_lock(&adapter->mcc_cq_lock);
  225. while ((compl = be_mcc_compl_get(adapter))) {
  226. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  227. /* Interpret flags as an async trailer */
  228. if (is_link_state_evt(compl->flags))
  229. be_async_link_state_process(adapter,
  230. (struct be_async_event_link_state *) compl);
  231. else if (is_grp5_evt(compl->flags))
  232. be_async_grp5_evt_process(adapter,
  233. compl->flags, compl);
  234. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  235. status = be_mcc_compl_process(adapter, compl);
  236. atomic_dec(&mcc_obj->q.used);
  237. }
  238. be_mcc_compl_use(compl);
  239. num++;
  240. }
  241. if (num)
  242. be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
  243. spin_unlock(&adapter->mcc_cq_lock);
  244. return status;
  245. }
  246. /* Wait till no more pending mcc requests are present */
  247. static int be_mcc_wait_compl(struct be_adapter *adapter)
  248. {
  249. #define mcc_timeout 120000 /* 12s timeout */
  250. int i, status = 0;
  251. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  252. for (i = 0; i < mcc_timeout; i++) {
  253. if (be_error(adapter))
  254. return -EIO;
  255. local_bh_disable();
  256. status = be_process_mcc(adapter);
  257. local_bh_enable();
  258. if (atomic_read(&mcc_obj->q.used) == 0)
  259. break;
  260. udelay(100);
  261. }
  262. if (i == mcc_timeout) {
  263. dev_err(&adapter->pdev->dev, "FW not responding\n");
  264. adapter->fw_timeout = true;
  265. return -EIO;
  266. }
  267. return status;
  268. }
  269. /* Notify MCC requests and wait for completion */
  270. static int be_mcc_notify_wait(struct be_adapter *adapter)
  271. {
  272. int status;
  273. struct be_mcc_wrb *wrb;
  274. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  275. u16 index = mcc_obj->q.head;
  276. struct be_cmd_resp_hdr *resp;
  277. index_dec(&index, mcc_obj->q.len);
  278. wrb = queue_index_node(&mcc_obj->q, index);
  279. resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
  280. be_mcc_notify(adapter);
  281. status = be_mcc_wait_compl(adapter);
  282. if (status == -EIO)
  283. goto out;
  284. status = resp->status;
  285. out:
  286. return status;
  287. }
  288. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  289. {
  290. int msecs = 0;
  291. u32 ready;
  292. do {
  293. if (be_error(adapter))
  294. return -EIO;
  295. ready = ioread32(db);
  296. if (ready == 0xffffffff)
  297. return -1;
  298. ready &= MPU_MAILBOX_DB_RDY_MASK;
  299. if (ready)
  300. break;
  301. if (msecs > 4000) {
  302. dev_err(&adapter->pdev->dev, "FW not responding\n");
  303. adapter->fw_timeout = true;
  304. be_detect_error(adapter);
  305. return -1;
  306. }
  307. msleep(1);
  308. msecs++;
  309. } while (true);
  310. return 0;
  311. }
  312. /*
  313. * Insert the mailbox address into the doorbell in two steps
  314. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  315. */
  316. static int be_mbox_notify_wait(struct be_adapter *adapter)
  317. {
  318. int status;
  319. u32 val = 0;
  320. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  321. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  322. struct be_mcc_mailbox *mbox = mbox_mem->va;
  323. struct be_mcc_compl *compl = &mbox->compl;
  324. /* wait for ready to be set */
  325. status = be_mbox_db_ready_wait(adapter, db);
  326. if (status != 0)
  327. return status;
  328. val |= MPU_MAILBOX_DB_HI_MASK;
  329. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  330. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  331. iowrite32(val, db);
  332. /* wait for ready to be set */
  333. status = be_mbox_db_ready_wait(adapter, db);
  334. if (status != 0)
  335. return status;
  336. val = 0;
  337. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  338. val |= (u32)(mbox_mem->dma >> 4) << 2;
  339. iowrite32(val, db);
  340. status = be_mbox_db_ready_wait(adapter, db);
  341. if (status != 0)
  342. return status;
  343. /* A cq entry has been made now */
  344. if (be_mcc_compl_is_new(compl)) {
  345. status = be_mcc_compl_process(adapter, &mbox->compl);
  346. be_mcc_compl_use(compl);
  347. if (status)
  348. return status;
  349. } else {
  350. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  351. return -1;
  352. }
  353. return 0;
  354. }
  355. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  356. {
  357. u32 sem;
  358. if (lancer_chip(adapter))
  359. sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
  360. else
  361. sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  362. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  363. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  364. return -1;
  365. else
  366. return 0;
  367. }
  368. int lancer_wait_ready(struct be_adapter *adapter)
  369. {
  370. #define SLIPORT_READY_TIMEOUT 30
  371. u32 sliport_status;
  372. int status = 0, i;
  373. for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
  374. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  375. if (sliport_status & SLIPORT_STATUS_RDY_MASK)
  376. break;
  377. msleep(1000);
  378. }
  379. if (i == SLIPORT_READY_TIMEOUT)
  380. status = -1;
  381. return status;
  382. }
  383. int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
  384. {
  385. int status;
  386. u32 sliport_status, err, reset_needed;
  387. status = lancer_wait_ready(adapter);
  388. if (!status) {
  389. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  390. err = sliport_status & SLIPORT_STATUS_ERR_MASK;
  391. reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
  392. if (err && reset_needed) {
  393. iowrite32(SLI_PORT_CONTROL_IP_MASK,
  394. adapter->db + SLIPORT_CONTROL_OFFSET);
  395. /* check adapter has corrected the error */
  396. status = lancer_wait_ready(adapter);
  397. sliport_status = ioread32(adapter->db +
  398. SLIPORT_STATUS_OFFSET);
  399. sliport_status &= (SLIPORT_STATUS_ERR_MASK |
  400. SLIPORT_STATUS_RN_MASK);
  401. if (status || sliport_status)
  402. status = -1;
  403. } else if (err || reset_needed) {
  404. status = -1;
  405. }
  406. }
  407. return status;
  408. }
  409. int be_fw_wait_ready(struct be_adapter *adapter)
  410. {
  411. u16 stage;
  412. int status, timeout = 0;
  413. struct device *dev = &adapter->pdev->dev;
  414. if (lancer_chip(adapter)) {
  415. status = lancer_wait_ready(adapter);
  416. return status;
  417. }
  418. do {
  419. status = be_POST_stage_get(adapter, &stage);
  420. if (status) {
  421. dev_err(dev, "POST error; stage=0x%x\n", stage);
  422. return -1;
  423. } else if (stage != POST_STAGE_ARMFW_RDY) {
  424. if (msleep_interruptible(2000)) {
  425. dev_err(dev, "Waiting for POST aborted\n");
  426. return -EINTR;
  427. }
  428. timeout += 2;
  429. } else {
  430. return 0;
  431. }
  432. } while (timeout < 60);
  433. dev_err(dev, "POST timeout; stage=0x%x\n", stage);
  434. return -1;
  435. }
  436. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  437. {
  438. return &wrb->payload.sgl[0];
  439. }
  440. /* Don't touch the hdr after it's prepared */
  441. /* mem will be NULL for embedded commands */
  442. static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  443. u8 subsystem, u8 opcode, int cmd_len,
  444. struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
  445. {
  446. struct be_sge *sge;
  447. unsigned long addr = (unsigned long)req_hdr;
  448. u64 req_addr = addr;
  449. req_hdr->opcode = opcode;
  450. req_hdr->subsystem = subsystem;
  451. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  452. req_hdr->version = 0;
  453. wrb->tag0 = req_addr & 0xFFFFFFFF;
  454. wrb->tag1 = upper_32_bits(req_addr);
  455. wrb->payload_length = cmd_len;
  456. if (mem) {
  457. wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
  458. MCC_WRB_SGE_CNT_SHIFT;
  459. sge = nonembedded_sgl(wrb);
  460. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  461. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  462. sge->len = cpu_to_le32(mem->size);
  463. } else
  464. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  465. be_dws_cpu_to_le(wrb, 8);
  466. }
  467. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  468. struct be_dma_mem *mem)
  469. {
  470. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  471. u64 dma = (u64)mem->dma;
  472. for (i = 0; i < buf_pages; i++) {
  473. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  474. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  475. dma += PAGE_SIZE_4K;
  476. }
  477. }
  478. /* Converts interrupt delay in microseconds to multiplier value */
  479. static u32 eq_delay_to_mult(u32 usec_delay)
  480. {
  481. #define MAX_INTR_RATE 651042
  482. const u32 round = 10;
  483. u32 multiplier;
  484. if (usec_delay == 0)
  485. multiplier = 0;
  486. else {
  487. u32 interrupt_rate = 1000000 / usec_delay;
  488. /* Max delay, corresponding to the lowest interrupt rate */
  489. if (interrupt_rate == 0)
  490. multiplier = 1023;
  491. else {
  492. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  493. multiplier /= interrupt_rate;
  494. /* Round the multiplier to the closest value.*/
  495. multiplier = (multiplier + round/2) / round;
  496. multiplier = min(multiplier, (u32)1023);
  497. }
  498. }
  499. return multiplier;
  500. }
  501. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  502. {
  503. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  504. struct be_mcc_wrb *wrb
  505. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  506. memset(wrb, 0, sizeof(*wrb));
  507. return wrb;
  508. }
  509. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  510. {
  511. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  512. struct be_mcc_wrb *wrb;
  513. if (atomic_read(&mccq->used) >= mccq->len) {
  514. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  515. return NULL;
  516. }
  517. wrb = queue_head_node(mccq);
  518. queue_head_inc(mccq);
  519. atomic_inc(&mccq->used);
  520. memset(wrb, 0, sizeof(*wrb));
  521. return wrb;
  522. }
  523. /* Tell fw we're about to start firing cmds by writing a
  524. * special pattern across the wrb hdr; uses mbox
  525. */
  526. int be_cmd_fw_init(struct be_adapter *adapter)
  527. {
  528. u8 *wrb;
  529. int status;
  530. if (lancer_chip(adapter))
  531. return 0;
  532. if (mutex_lock_interruptible(&adapter->mbox_lock))
  533. return -1;
  534. wrb = (u8 *)wrb_from_mbox(adapter);
  535. *wrb++ = 0xFF;
  536. *wrb++ = 0x12;
  537. *wrb++ = 0x34;
  538. *wrb++ = 0xFF;
  539. *wrb++ = 0xFF;
  540. *wrb++ = 0x56;
  541. *wrb++ = 0x78;
  542. *wrb = 0xFF;
  543. status = be_mbox_notify_wait(adapter);
  544. mutex_unlock(&adapter->mbox_lock);
  545. return status;
  546. }
  547. /* Tell fw we're done with firing cmds by writing a
  548. * special pattern across the wrb hdr; uses mbox
  549. */
  550. int be_cmd_fw_clean(struct be_adapter *adapter)
  551. {
  552. u8 *wrb;
  553. int status;
  554. if (lancer_chip(adapter))
  555. return 0;
  556. if (mutex_lock_interruptible(&adapter->mbox_lock))
  557. return -1;
  558. wrb = (u8 *)wrb_from_mbox(adapter);
  559. *wrb++ = 0xFF;
  560. *wrb++ = 0xAA;
  561. *wrb++ = 0xBB;
  562. *wrb++ = 0xFF;
  563. *wrb++ = 0xFF;
  564. *wrb++ = 0xCC;
  565. *wrb++ = 0xDD;
  566. *wrb = 0xFF;
  567. status = be_mbox_notify_wait(adapter);
  568. mutex_unlock(&adapter->mbox_lock);
  569. return status;
  570. }
  571. int be_cmd_eq_create(struct be_adapter *adapter,
  572. struct be_queue_info *eq, int eq_delay)
  573. {
  574. struct be_mcc_wrb *wrb;
  575. struct be_cmd_req_eq_create *req;
  576. struct be_dma_mem *q_mem = &eq->dma_mem;
  577. int status;
  578. if (mutex_lock_interruptible(&adapter->mbox_lock))
  579. return -1;
  580. wrb = wrb_from_mbox(adapter);
  581. req = embedded_payload(wrb);
  582. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  583. OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
  584. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  585. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  586. /* 4byte eqe*/
  587. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  588. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  589. __ilog2_u32(eq->len/256));
  590. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  591. eq_delay_to_mult(eq_delay));
  592. be_dws_cpu_to_le(req->context, sizeof(req->context));
  593. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  594. status = be_mbox_notify_wait(adapter);
  595. if (!status) {
  596. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  597. eq->id = le16_to_cpu(resp->eq_id);
  598. eq->created = true;
  599. }
  600. mutex_unlock(&adapter->mbox_lock);
  601. return status;
  602. }
  603. /* Use MCC */
  604. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  605. u8 type, bool permanent, u32 if_handle, u32 pmac_id)
  606. {
  607. struct be_mcc_wrb *wrb;
  608. struct be_cmd_req_mac_query *req;
  609. int status;
  610. spin_lock_bh(&adapter->mcc_lock);
  611. wrb = wrb_from_mccq(adapter);
  612. if (!wrb) {
  613. status = -EBUSY;
  614. goto err;
  615. }
  616. req = embedded_payload(wrb);
  617. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  618. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
  619. req->type = type;
  620. if (permanent) {
  621. req->permanent = 1;
  622. } else {
  623. req->if_id = cpu_to_le16((u16) if_handle);
  624. req->pmac_id = cpu_to_le32(pmac_id);
  625. req->permanent = 0;
  626. }
  627. status = be_mcc_notify_wait(adapter);
  628. if (!status) {
  629. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  630. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  631. }
  632. err:
  633. spin_unlock_bh(&adapter->mcc_lock);
  634. return status;
  635. }
  636. /* Uses synchronous MCCQ */
  637. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  638. u32 if_id, u32 *pmac_id, u32 domain)
  639. {
  640. struct be_mcc_wrb *wrb;
  641. struct be_cmd_req_pmac_add *req;
  642. int status;
  643. spin_lock_bh(&adapter->mcc_lock);
  644. wrb = wrb_from_mccq(adapter);
  645. if (!wrb) {
  646. status = -EBUSY;
  647. goto err;
  648. }
  649. req = embedded_payload(wrb);
  650. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  651. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
  652. req->hdr.domain = domain;
  653. req->if_id = cpu_to_le32(if_id);
  654. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  655. status = be_mcc_notify_wait(adapter);
  656. if (!status) {
  657. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  658. *pmac_id = le32_to_cpu(resp->pmac_id);
  659. }
  660. err:
  661. spin_unlock_bh(&adapter->mcc_lock);
  662. if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
  663. status = -EPERM;
  664. return status;
  665. }
  666. /* Uses synchronous MCCQ */
  667. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
  668. {
  669. struct be_mcc_wrb *wrb;
  670. struct be_cmd_req_pmac_del *req;
  671. int status;
  672. if (pmac_id == -1)
  673. return 0;
  674. spin_lock_bh(&adapter->mcc_lock);
  675. wrb = wrb_from_mccq(adapter);
  676. if (!wrb) {
  677. status = -EBUSY;
  678. goto err;
  679. }
  680. req = embedded_payload(wrb);
  681. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  682. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
  683. req->hdr.domain = dom;
  684. req->if_id = cpu_to_le32(if_id);
  685. req->pmac_id = cpu_to_le32(pmac_id);
  686. status = be_mcc_notify_wait(adapter);
  687. err:
  688. spin_unlock_bh(&adapter->mcc_lock);
  689. return status;
  690. }
  691. /* Uses Mbox */
  692. int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
  693. struct be_queue_info *eq, bool no_delay, int coalesce_wm)
  694. {
  695. struct be_mcc_wrb *wrb;
  696. struct be_cmd_req_cq_create *req;
  697. struct be_dma_mem *q_mem = &cq->dma_mem;
  698. void *ctxt;
  699. int status;
  700. if (mutex_lock_interruptible(&adapter->mbox_lock))
  701. return -1;
  702. wrb = wrb_from_mbox(adapter);
  703. req = embedded_payload(wrb);
  704. ctxt = &req->context;
  705. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  706. OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
  707. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  708. if (lancer_chip(adapter)) {
  709. req->hdr.version = 2;
  710. req->page_size = 1; /* 1 for 4K */
  711. AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
  712. no_delay);
  713. AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
  714. __ilog2_u32(cq->len/256));
  715. AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
  716. AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
  717. ctxt, 1);
  718. AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
  719. ctxt, eq->id);
  720. } else {
  721. AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
  722. coalesce_wm);
  723. AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
  724. ctxt, no_delay);
  725. AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
  726. __ilog2_u32(cq->len/256));
  727. AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
  728. AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
  729. AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
  730. }
  731. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  732. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  733. status = be_mbox_notify_wait(adapter);
  734. if (!status) {
  735. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  736. cq->id = le16_to_cpu(resp->cq_id);
  737. cq->created = true;
  738. }
  739. mutex_unlock(&adapter->mbox_lock);
  740. return status;
  741. }
  742. static u32 be_encoded_q_len(int q_len)
  743. {
  744. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  745. if (len_encoded == 16)
  746. len_encoded = 0;
  747. return len_encoded;
  748. }
  749. int be_cmd_mccq_ext_create(struct be_adapter *adapter,
  750. struct be_queue_info *mccq,
  751. struct be_queue_info *cq)
  752. {
  753. struct be_mcc_wrb *wrb;
  754. struct be_cmd_req_mcc_ext_create *req;
  755. struct be_dma_mem *q_mem = &mccq->dma_mem;
  756. void *ctxt;
  757. int status;
  758. if (mutex_lock_interruptible(&adapter->mbox_lock))
  759. return -1;
  760. wrb = wrb_from_mbox(adapter);
  761. req = embedded_payload(wrb);
  762. ctxt = &req->context;
  763. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  764. OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
  765. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  766. if (lancer_chip(adapter)) {
  767. req->hdr.version = 1;
  768. req->cq_id = cpu_to_le16(cq->id);
  769. AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
  770. be_encoded_q_len(mccq->len));
  771. AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
  772. AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
  773. ctxt, cq->id);
  774. AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
  775. ctxt, 1);
  776. } else {
  777. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  778. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  779. be_encoded_q_len(mccq->len));
  780. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  781. }
  782. /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
  783. req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
  784. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  785. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  786. status = be_mbox_notify_wait(adapter);
  787. if (!status) {
  788. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  789. mccq->id = le16_to_cpu(resp->id);
  790. mccq->created = true;
  791. }
  792. mutex_unlock(&adapter->mbox_lock);
  793. return status;
  794. }
  795. int be_cmd_mccq_org_create(struct be_adapter *adapter,
  796. struct be_queue_info *mccq,
  797. struct be_queue_info *cq)
  798. {
  799. struct be_mcc_wrb *wrb;
  800. struct be_cmd_req_mcc_create *req;
  801. struct be_dma_mem *q_mem = &mccq->dma_mem;
  802. void *ctxt;
  803. int status;
  804. if (mutex_lock_interruptible(&adapter->mbox_lock))
  805. return -1;
  806. wrb = wrb_from_mbox(adapter);
  807. req = embedded_payload(wrb);
  808. ctxt = &req->context;
  809. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  810. OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
  811. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  812. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  813. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  814. be_encoded_q_len(mccq->len));
  815. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  816. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  817. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  818. status = be_mbox_notify_wait(adapter);
  819. if (!status) {
  820. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  821. mccq->id = le16_to_cpu(resp->id);
  822. mccq->created = true;
  823. }
  824. mutex_unlock(&adapter->mbox_lock);
  825. return status;
  826. }
  827. int be_cmd_mccq_create(struct be_adapter *adapter,
  828. struct be_queue_info *mccq,
  829. struct be_queue_info *cq)
  830. {
  831. int status;
  832. status = be_cmd_mccq_ext_create(adapter, mccq, cq);
  833. if (status && !lancer_chip(adapter)) {
  834. dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
  835. "or newer to avoid conflicting priorities between NIC "
  836. "and FCoE traffic");
  837. status = be_cmd_mccq_org_create(adapter, mccq, cq);
  838. }
  839. return status;
  840. }
  841. int be_cmd_txq_create(struct be_adapter *adapter,
  842. struct be_queue_info *txq,
  843. struct be_queue_info *cq)
  844. {
  845. struct be_mcc_wrb *wrb;
  846. struct be_cmd_req_eth_tx_create *req;
  847. struct be_dma_mem *q_mem = &txq->dma_mem;
  848. void *ctxt;
  849. int status;
  850. spin_lock_bh(&adapter->mcc_lock);
  851. wrb = wrb_from_mccq(adapter);
  852. if (!wrb) {
  853. status = -EBUSY;
  854. goto err;
  855. }
  856. req = embedded_payload(wrb);
  857. ctxt = &req->context;
  858. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  859. OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
  860. if (lancer_chip(adapter)) {
  861. req->hdr.version = 1;
  862. AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
  863. adapter->if_handle);
  864. }
  865. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  866. req->ulp_num = BE_ULP1_NUM;
  867. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  868. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  869. be_encoded_q_len(txq->len));
  870. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  871. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  872. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  873. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  874. status = be_mcc_notify_wait(adapter);
  875. if (!status) {
  876. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  877. txq->id = le16_to_cpu(resp->cid);
  878. txq->created = true;
  879. }
  880. err:
  881. spin_unlock_bh(&adapter->mcc_lock);
  882. return status;
  883. }
  884. /* Uses MCC */
  885. int be_cmd_rxq_create(struct be_adapter *adapter,
  886. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  887. u32 if_id, u32 rss, u8 *rss_id)
  888. {
  889. struct be_mcc_wrb *wrb;
  890. struct be_cmd_req_eth_rx_create *req;
  891. struct be_dma_mem *q_mem = &rxq->dma_mem;
  892. int status;
  893. spin_lock_bh(&adapter->mcc_lock);
  894. wrb = wrb_from_mccq(adapter);
  895. if (!wrb) {
  896. status = -EBUSY;
  897. goto err;
  898. }
  899. req = embedded_payload(wrb);
  900. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  901. OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
  902. req->cq_id = cpu_to_le16(cq_id);
  903. req->frag_size = fls(frag_size) - 1;
  904. req->num_pages = 2;
  905. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  906. req->interface_id = cpu_to_le32(if_id);
  907. req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
  908. req->rss_queue = cpu_to_le32(rss);
  909. status = be_mcc_notify_wait(adapter);
  910. if (!status) {
  911. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  912. rxq->id = le16_to_cpu(resp->id);
  913. rxq->created = true;
  914. *rss_id = resp->rss_id;
  915. }
  916. err:
  917. spin_unlock_bh(&adapter->mcc_lock);
  918. return status;
  919. }
  920. /* Generic destroyer function for all types of queues
  921. * Uses Mbox
  922. */
  923. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  924. int queue_type)
  925. {
  926. struct be_mcc_wrb *wrb;
  927. struct be_cmd_req_q_destroy *req;
  928. u8 subsys = 0, opcode = 0;
  929. int status;
  930. if (mutex_lock_interruptible(&adapter->mbox_lock))
  931. return -1;
  932. wrb = wrb_from_mbox(adapter);
  933. req = embedded_payload(wrb);
  934. switch (queue_type) {
  935. case QTYPE_EQ:
  936. subsys = CMD_SUBSYSTEM_COMMON;
  937. opcode = OPCODE_COMMON_EQ_DESTROY;
  938. break;
  939. case QTYPE_CQ:
  940. subsys = CMD_SUBSYSTEM_COMMON;
  941. opcode = OPCODE_COMMON_CQ_DESTROY;
  942. break;
  943. case QTYPE_TXQ:
  944. subsys = CMD_SUBSYSTEM_ETH;
  945. opcode = OPCODE_ETH_TX_DESTROY;
  946. break;
  947. case QTYPE_RXQ:
  948. subsys = CMD_SUBSYSTEM_ETH;
  949. opcode = OPCODE_ETH_RX_DESTROY;
  950. break;
  951. case QTYPE_MCCQ:
  952. subsys = CMD_SUBSYSTEM_COMMON;
  953. opcode = OPCODE_COMMON_MCC_DESTROY;
  954. break;
  955. default:
  956. BUG();
  957. }
  958. be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
  959. NULL);
  960. req->id = cpu_to_le16(q->id);
  961. status = be_mbox_notify_wait(adapter);
  962. if (!status)
  963. q->created = false;
  964. mutex_unlock(&adapter->mbox_lock);
  965. return status;
  966. }
  967. /* Uses MCC */
  968. int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
  969. {
  970. struct be_mcc_wrb *wrb;
  971. struct be_cmd_req_q_destroy *req;
  972. int status;
  973. spin_lock_bh(&adapter->mcc_lock);
  974. wrb = wrb_from_mccq(adapter);
  975. if (!wrb) {
  976. status = -EBUSY;
  977. goto err;
  978. }
  979. req = embedded_payload(wrb);
  980. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  981. OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
  982. req->id = cpu_to_le16(q->id);
  983. status = be_mcc_notify_wait(adapter);
  984. if (!status)
  985. q->created = false;
  986. err:
  987. spin_unlock_bh(&adapter->mcc_lock);
  988. return status;
  989. }
  990. /* Create an rx filtering policy configuration on an i/f
  991. * Uses MCCQ
  992. */
  993. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  994. u32 *if_handle, u32 domain)
  995. {
  996. struct be_mcc_wrb *wrb;
  997. struct be_cmd_req_if_create *req;
  998. int status;
  999. spin_lock_bh(&adapter->mcc_lock);
  1000. wrb = wrb_from_mccq(adapter);
  1001. if (!wrb) {
  1002. status = -EBUSY;
  1003. goto err;
  1004. }
  1005. req = embedded_payload(wrb);
  1006. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1007. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
  1008. req->hdr.domain = domain;
  1009. req->capability_flags = cpu_to_le32(cap_flags);
  1010. req->enable_flags = cpu_to_le32(en_flags);
  1011. req->pmac_invalid = true;
  1012. status = be_mcc_notify_wait(adapter);
  1013. if (!status) {
  1014. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  1015. *if_handle = le32_to_cpu(resp->interface_id);
  1016. }
  1017. err:
  1018. spin_unlock_bh(&adapter->mcc_lock);
  1019. return status;
  1020. }
  1021. /* Uses MCCQ */
  1022. int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
  1023. {
  1024. struct be_mcc_wrb *wrb;
  1025. struct be_cmd_req_if_destroy *req;
  1026. int status;
  1027. if (interface_id == -1)
  1028. return 0;
  1029. spin_lock_bh(&adapter->mcc_lock);
  1030. wrb = wrb_from_mccq(adapter);
  1031. if (!wrb) {
  1032. status = -EBUSY;
  1033. goto err;
  1034. }
  1035. req = embedded_payload(wrb);
  1036. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1037. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
  1038. req->hdr.domain = domain;
  1039. req->interface_id = cpu_to_le32(interface_id);
  1040. status = be_mcc_notify_wait(adapter);
  1041. err:
  1042. spin_unlock_bh(&adapter->mcc_lock);
  1043. return status;
  1044. }
  1045. /* Get stats is a non embedded command: the request is not embedded inside
  1046. * WRB but is a separate dma memory block
  1047. * Uses asynchronous MCC
  1048. */
  1049. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  1050. {
  1051. struct be_mcc_wrb *wrb;
  1052. struct be_cmd_req_hdr *hdr;
  1053. int status = 0;
  1054. spin_lock_bh(&adapter->mcc_lock);
  1055. wrb = wrb_from_mccq(adapter);
  1056. if (!wrb) {
  1057. status = -EBUSY;
  1058. goto err;
  1059. }
  1060. hdr = nonemb_cmd->va;
  1061. be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
  1062. OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
  1063. if (adapter->generation == BE_GEN3)
  1064. hdr->version = 1;
  1065. be_mcc_notify(adapter);
  1066. adapter->stats_cmd_sent = true;
  1067. err:
  1068. spin_unlock_bh(&adapter->mcc_lock);
  1069. return status;
  1070. }
  1071. /* Lancer Stats */
  1072. int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
  1073. struct be_dma_mem *nonemb_cmd)
  1074. {
  1075. struct be_mcc_wrb *wrb;
  1076. struct lancer_cmd_req_pport_stats *req;
  1077. int status = 0;
  1078. spin_lock_bh(&adapter->mcc_lock);
  1079. wrb = wrb_from_mccq(adapter);
  1080. if (!wrb) {
  1081. status = -EBUSY;
  1082. goto err;
  1083. }
  1084. req = nonemb_cmd->va;
  1085. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1086. OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
  1087. nonemb_cmd);
  1088. req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
  1089. req->cmd_params.params.reset_stats = 0;
  1090. be_mcc_notify(adapter);
  1091. adapter->stats_cmd_sent = true;
  1092. err:
  1093. spin_unlock_bh(&adapter->mcc_lock);
  1094. return status;
  1095. }
  1096. /* Uses synchronous mcc */
  1097. int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
  1098. u16 *link_speed, u8 *link_status, u32 dom)
  1099. {
  1100. struct be_mcc_wrb *wrb;
  1101. struct be_cmd_req_link_status *req;
  1102. int status;
  1103. spin_lock_bh(&adapter->mcc_lock);
  1104. if (link_status)
  1105. *link_status = LINK_DOWN;
  1106. wrb = wrb_from_mccq(adapter);
  1107. if (!wrb) {
  1108. status = -EBUSY;
  1109. goto err;
  1110. }
  1111. req = embedded_payload(wrb);
  1112. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1113. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
  1114. if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
  1115. req->hdr.version = 1;
  1116. req->hdr.domain = dom;
  1117. status = be_mcc_notify_wait(adapter);
  1118. if (!status) {
  1119. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  1120. if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
  1121. if (link_speed)
  1122. *link_speed = le16_to_cpu(resp->link_speed);
  1123. if (mac_speed)
  1124. *mac_speed = resp->mac_speed;
  1125. }
  1126. if (link_status)
  1127. *link_status = resp->logical_link_status;
  1128. }
  1129. err:
  1130. spin_unlock_bh(&adapter->mcc_lock);
  1131. return status;
  1132. }
  1133. /* Uses synchronous mcc */
  1134. int be_cmd_get_die_temperature(struct be_adapter *adapter)
  1135. {
  1136. struct be_mcc_wrb *wrb;
  1137. struct be_cmd_req_get_cntl_addnl_attribs *req;
  1138. int status;
  1139. spin_lock_bh(&adapter->mcc_lock);
  1140. wrb = wrb_from_mccq(adapter);
  1141. if (!wrb) {
  1142. status = -EBUSY;
  1143. goto err;
  1144. }
  1145. req = embedded_payload(wrb);
  1146. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1147. OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
  1148. wrb, NULL);
  1149. be_mcc_notify(adapter);
  1150. err:
  1151. spin_unlock_bh(&adapter->mcc_lock);
  1152. return status;
  1153. }
  1154. /* Uses synchronous mcc */
  1155. int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
  1156. {
  1157. struct be_mcc_wrb *wrb;
  1158. struct be_cmd_req_get_fat *req;
  1159. int status;
  1160. spin_lock_bh(&adapter->mcc_lock);
  1161. wrb = wrb_from_mccq(adapter);
  1162. if (!wrb) {
  1163. status = -EBUSY;
  1164. goto err;
  1165. }
  1166. req = embedded_payload(wrb);
  1167. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1168. OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
  1169. req->fat_operation = cpu_to_le32(QUERY_FAT);
  1170. status = be_mcc_notify_wait(adapter);
  1171. if (!status) {
  1172. struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
  1173. if (log_size && resp->log_size)
  1174. *log_size = le32_to_cpu(resp->log_size) -
  1175. sizeof(u32);
  1176. }
  1177. err:
  1178. spin_unlock_bh(&adapter->mcc_lock);
  1179. return status;
  1180. }
  1181. void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
  1182. {
  1183. struct be_dma_mem get_fat_cmd;
  1184. struct be_mcc_wrb *wrb;
  1185. struct be_cmd_req_get_fat *req;
  1186. u32 offset = 0, total_size, buf_size,
  1187. log_offset = sizeof(u32), payload_len;
  1188. int status;
  1189. if (buf_len == 0)
  1190. return;
  1191. total_size = buf_len;
  1192. get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
  1193. get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
  1194. get_fat_cmd.size,
  1195. &get_fat_cmd.dma);
  1196. if (!get_fat_cmd.va) {
  1197. status = -ENOMEM;
  1198. dev_err(&adapter->pdev->dev,
  1199. "Memory allocation failure while retrieving FAT data\n");
  1200. return;
  1201. }
  1202. spin_lock_bh(&adapter->mcc_lock);
  1203. while (total_size) {
  1204. buf_size = min(total_size, (u32)60*1024);
  1205. total_size -= buf_size;
  1206. wrb = wrb_from_mccq(adapter);
  1207. if (!wrb) {
  1208. status = -EBUSY;
  1209. goto err;
  1210. }
  1211. req = get_fat_cmd.va;
  1212. payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
  1213. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1214. OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
  1215. &get_fat_cmd);
  1216. req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
  1217. req->read_log_offset = cpu_to_le32(log_offset);
  1218. req->read_log_length = cpu_to_le32(buf_size);
  1219. req->data_buffer_size = cpu_to_le32(buf_size);
  1220. status = be_mcc_notify_wait(adapter);
  1221. if (!status) {
  1222. struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
  1223. memcpy(buf + offset,
  1224. resp->data_buffer,
  1225. le32_to_cpu(resp->read_log_length));
  1226. } else {
  1227. dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
  1228. goto err;
  1229. }
  1230. offset += buf_size;
  1231. log_offset += buf_size;
  1232. }
  1233. err:
  1234. pci_free_consistent(adapter->pdev, get_fat_cmd.size,
  1235. get_fat_cmd.va,
  1236. get_fat_cmd.dma);
  1237. spin_unlock_bh(&adapter->mcc_lock);
  1238. }
  1239. /* Uses synchronous mcc */
  1240. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
  1241. char *fw_on_flash)
  1242. {
  1243. struct be_mcc_wrb *wrb;
  1244. struct be_cmd_req_get_fw_version *req;
  1245. int status;
  1246. spin_lock_bh(&adapter->mcc_lock);
  1247. wrb = wrb_from_mccq(adapter);
  1248. if (!wrb) {
  1249. status = -EBUSY;
  1250. goto err;
  1251. }
  1252. req = embedded_payload(wrb);
  1253. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1254. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
  1255. status = be_mcc_notify_wait(adapter);
  1256. if (!status) {
  1257. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  1258. strcpy(fw_ver, resp->firmware_version_string);
  1259. if (fw_on_flash)
  1260. strcpy(fw_on_flash, resp->fw_on_flash_version_string);
  1261. }
  1262. err:
  1263. spin_unlock_bh(&adapter->mcc_lock);
  1264. return status;
  1265. }
  1266. /* set the EQ delay interval of an EQ to specified value
  1267. * Uses async mcc
  1268. */
  1269. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  1270. {
  1271. struct be_mcc_wrb *wrb;
  1272. struct be_cmd_req_modify_eq_delay *req;
  1273. int status = 0;
  1274. spin_lock_bh(&adapter->mcc_lock);
  1275. wrb = wrb_from_mccq(adapter);
  1276. if (!wrb) {
  1277. status = -EBUSY;
  1278. goto err;
  1279. }
  1280. req = embedded_payload(wrb);
  1281. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1282. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
  1283. req->num_eq = cpu_to_le32(1);
  1284. req->delay[0].eq_id = cpu_to_le32(eq_id);
  1285. req->delay[0].phase = 0;
  1286. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  1287. be_mcc_notify(adapter);
  1288. err:
  1289. spin_unlock_bh(&adapter->mcc_lock);
  1290. return status;
  1291. }
  1292. /* Uses sycnhronous mcc */
  1293. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  1294. u32 num, bool untagged, bool promiscuous)
  1295. {
  1296. struct be_mcc_wrb *wrb;
  1297. struct be_cmd_req_vlan_config *req;
  1298. int status;
  1299. spin_lock_bh(&adapter->mcc_lock);
  1300. wrb = wrb_from_mccq(adapter);
  1301. if (!wrb) {
  1302. status = -EBUSY;
  1303. goto err;
  1304. }
  1305. req = embedded_payload(wrb);
  1306. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1307. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
  1308. req->interface_id = if_id;
  1309. req->promiscuous = promiscuous;
  1310. req->untagged = untagged;
  1311. req->num_vlan = num;
  1312. if (!promiscuous) {
  1313. memcpy(req->normal_vlan, vtag_array,
  1314. req->num_vlan * sizeof(vtag_array[0]));
  1315. }
  1316. status = be_mcc_notify_wait(adapter);
  1317. err:
  1318. spin_unlock_bh(&adapter->mcc_lock);
  1319. return status;
  1320. }
  1321. int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
  1322. {
  1323. struct be_mcc_wrb *wrb;
  1324. struct be_dma_mem *mem = &adapter->rx_filter;
  1325. struct be_cmd_req_rx_filter *req = mem->va;
  1326. int status;
  1327. spin_lock_bh(&adapter->mcc_lock);
  1328. wrb = wrb_from_mccq(adapter);
  1329. if (!wrb) {
  1330. status = -EBUSY;
  1331. goto err;
  1332. }
  1333. memset(req, 0, sizeof(*req));
  1334. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1335. OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
  1336. wrb, mem);
  1337. req->if_id = cpu_to_le32(adapter->if_handle);
  1338. if (flags & IFF_PROMISC) {
  1339. req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
  1340. BE_IF_FLAGS_VLAN_PROMISCUOUS);
  1341. if (value == ON)
  1342. req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
  1343. BE_IF_FLAGS_VLAN_PROMISCUOUS);
  1344. } else if (flags & IFF_ALLMULTI) {
  1345. req->if_flags_mask = req->if_flags =
  1346. cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
  1347. } else {
  1348. struct netdev_hw_addr *ha;
  1349. int i = 0;
  1350. req->if_flags_mask = req->if_flags =
  1351. cpu_to_le32(BE_IF_FLAGS_MULTICAST);
  1352. /* Reset mcast promisc mode if already set by setting mask
  1353. * and not setting flags field
  1354. */
  1355. if (!lancer_chip(adapter) || be_physfn(adapter))
  1356. req->if_flags_mask |=
  1357. cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
  1358. req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
  1359. netdev_for_each_mc_addr(ha, adapter->netdev)
  1360. memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
  1361. }
  1362. status = be_mcc_notify_wait(adapter);
  1363. err:
  1364. spin_unlock_bh(&adapter->mcc_lock);
  1365. return status;
  1366. }
  1367. /* Uses synchrounous mcc */
  1368. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  1369. {
  1370. struct be_mcc_wrb *wrb;
  1371. struct be_cmd_req_set_flow_control *req;
  1372. int status;
  1373. spin_lock_bh(&adapter->mcc_lock);
  1374. wrb = wrb_from_mccq(adapter);
  1375. if (!wrb) {
  1376. status = -EBUSY;
  1377. goto err;
  1378. }
  1379. req = embedded_payload(wrb);
  1380. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1381. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
  1382. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  1383. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  1384. status = be_mcc_notify_wait(adapter);
  1385. err:
  1386. spin_unlock_bh(&adapter->mcc_lock);
  1387. return status;
  1388. }
  1389. /* Uses sycn mcc */
  1390. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  1391. {
  1392. struct be_mcc_wrb *wrb;
  1393. struct be_cmd_req_get_flow_control *req;
  1394. int status;
  1395. spin_lock_bh(&adapter->mcc_lock);
  1396. wrb = wrb_from_mccq(adapter);
  1397. if (!wrb) {
  1398. status = -EBUSY;
  1399. goto err;
  1400. }
  1401. req = embedded_payload(wrb);
  1402. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1403. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
  1404. status = be_mcc_notify_wait(adapter);
  1405. if (!status) {
  1406. struct be_cmd_resp_get_flow_control *resp =
  1407. embedded_payload(wrb);
  1408. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1409. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1410. }
  1411. err:
  1412. spin_unlock_bh(&adapter->mcc_lock);
  1413. return status;
  1414. }
  1415. /* Uses mbox */
  1416. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
  1417. u32 *mode, u32 *caps)
  1418. {
  1419. struct be_mcc_wrb *wrb;
  1420. struct be_cmd_req_query_fw_cfg *req;
  1421. int status;
  1422. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1423. return -1;
  1424. wrb = wrb_from_mbox(adapter);
  1425. req = embedded_payload(wrb);
  1426. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1427. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
  1428. status = be_mbox_notify_wait(adapter);
  1429. if (!status) {
  1430. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1431. *port_num = le32_to_cpu(resp->phys_port);
  1432. *mode = le32_to_cpu(resp->function_mode);
  1433. *caps = le32_to_cpu(resp->function_caps);
  1434. }
  1435. mutex_unlock(&adapter->mbox_lock);
  1436. return status;
  1437. }
  1438. /* Uses mbox */
  1439. int be_cmd_reset_function(struct be_adapter *adapter)
  1440. {
  1441. struct be_mcc_wrb *wrb;
  1442. struct be_cmd_req_hdr *req;
  1443. int status;
  1444. if (lancer_chip(adapter)) {
  1445. status = lancer_wait_ready(adapter);
  1446. if (!status) {
  1447. iowrite32(SLI_PORT_CONTROL_IP_MASK,
  1448. adapter->db + SLIPORT_CONTROL_OFFSET);
  1449. status = lancer_test_and_set_rdy_state(adapter);
  1450. }
  1451. if (status) {
  1452. dev_err(&adapter->pdev->dev,
  1453. "Adapter in non recoverable error\n");
  1454. }
  1455. return status;
  1456. }
  1457. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1458. return -1;
  1459. wrb = wrb_from_mbox(adapter);
  1460. req = embedded_payload(wrb);
  1461. be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1462. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
  1463. status = be_mbox_notify_wait(adapter);
  1464. mutex_unlock(&adapter->mbox_lock);
  1465. return status;
  1466. }
  1467. int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
  1468. {
  1469. struct be_mcc_wrb *wrb;
  1470. struct be_cmd_req_rss_config *req;
  1471. u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
  1472. 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
  1473. 0x3ea83c02, 0x4a110304};
  1474. int status;
  1475. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1476. return -1;
  1477. wrb = wrb_from_mbox(adapter);
  1478. req = embedded_payload(wrb);
  1479. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1480. OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
  1481. req->if_id = cpu_to_le32(adapter->if_handle);
  1482. req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
  1483. RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
  1484. if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
  1485. req->hdr.version = 1;
  1486. req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
  1487. RSS_ENABLE_UDP_IPV6);
  1488. }
  1489. req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  1490. memcpy(req->cpu_table, rsstable, table_size);
  1491. memcpy(req->hash, myhash, sizeof(myhash));
  1492. be_dws_cpu_to_le(req->hash, sizeof(req->hash));
  1493. status = be_mbox_notify_wait(adapter);
  1494. mutex_unlock(&adapter->mbox_lock);
  1495. return status;
  1496. }
  1497. /* Uses sync mcc */
  1498. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1499. u8 bcn, u8 sts, u8 state)
  1500. {
  1501. struct be_mcc_wrb *wrb;
  1502. struct be_cmd_req_enable_disable_beacon *req;
  1503. int status;
  1504. spin_lock_bh(&adapter->mcc_lock);
  1505. wrb = wrb_from_mccq(adapter);
  1506. if (!wrb) {
  1507. status = -EBUSY;
  1508. goto err;
  1509. }
  1510. req = embedded_payload(wrb);
  1511. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1512. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
  1513. req->port_num = port_num;
  1514. req->beacon_state = state;
  1515. req->beacon_duration = bcn;
  1516. req->status_duration = sts;
  1517. status = be_mcc_notify_wait(adapter);
  1518. err:
  1519. spin_unlock_bh(&adapter->mcc_lock);
  1520. return status;
  1521. }
  1522. /* Uses sync mcc */
  1523. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1524. {
  1525. struct be_mcc_wrb *wrb;
  1526. struct be_cmd_req_get_beacon_state *req;
  1527. int status;
  1528. spin_lock_bh(&adapter->mcc_lock);
  1529. wrb = wrb_from_mccq(adapter);
  1530. if (!wrb) {
  1531. status = -EBUSY;
  1532. goto err;
  1533. }
  1534. req = embedded_payload(wrb);
  1535. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1536. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
  1537. req->port_num = port_num;
  1538. status = be_mcc_notify_wait(adapter);
  1539. if (!status) {
  1540. struct be_cmd_resp_get_beacon_state *resp =
  1541. embedded_payload(wrb);
  1542. *state = resp->beacon_state;
  1543. }
  1544. err:
  1545. spin_unlock_bh(&adapter->mcc_lock);
  1546. return status;
  1547. }
  1548. int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1549. u32 data_size, u32 data_offset,
  1550. const char *obj_name, u32 *data_written,
  1551. u8 *change_status, u8 *addn_status)
  1552. {
  1553. struct be_mcc_wrb *wrb;
  1554. struct lancer_cmd_req_write_object *req;
  1555. struct lancer_cmd_resp_write_object *resp;
  1556. void *ctxt = NULL;
  1557. int status;
  1558. spin_lock_bh(&adapter->mcc_lock);
  1559. adapter->flash_status = 0;
  1560. wrb = wrb_from_mccq(adapter);
  1561. if (!wrb) {
  1562. status = -EBUSY;
  1563. goto err_unlock;
  1564. }
  1565. req = embedded_payload(wrb);
  1566. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1567. OPCODE_COMMON_WRITE_OBJECT,
  1568. sizeof(struct lancer_cmd_req_write_object), wrb,
  1569. NULL);
  1570. ctxt = &req->context;
  1571. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1572. write_length, ctxt, data_size);
  1573. if (data_size == 0)
  1574. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1575. eof, ctxt, 1);
  1576. else
  1577. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1578. eof, ctxt, 0);
  1579. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1580. req->write_offset = cpu_to_le32(data_offset);
  1581. strcpy(req->object_name, obj_name);
  1582. req->descriptor_count = cpu_to_le32(1);
  1583. req->buf_len = cpu_to_le32(data_size);
  1584. req->addr_low = cpu_to_le32((cmd->dma +
  1585. sizeof(struct lancer_cmd_req_write_object))
  1586. & 0xFFFFFFFF);
  1587. req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
  1588. sizeof(struct lancer_cmd_req_write_object)));
  1589. be_mcc_notify(adapter);
  1590. spin_unlock_bh(&adapter->mcc_lock);
  1591. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1592. msecs_to_jiffies(30000)))
  1593. status = -1;
  1594. else
  1595. status = adapter->flash_status;
  1596. resp = embedded_payload(wrb);
  1597. if (!status) {
  1598. *data_written = le32_to_cpu(resp->actual_write_len);
  1599. *change_status = resp->change_status;
  1600. } else {
  1601. *addn_status = resp->additional_status;
  1602. }
  1603. return status;
  1604. err_unlock:
  1605. spin_unlock_bh(&adapter->mcc_lock);
  1606. return status;
  1607. }
  1608. int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1609. u32 data_size, u32 data_offset, const char *obj_name,
  1610. u32 *data_read, u32 *eof, u8 *addn_status)
  1611. {
  1612. struct be_mcc_wrb *wrb;
  1613. struct lancer_cmd_req_read_object *req;
  1614. struct lancer_cmd_resp_read_object *resp;
  1615. int status;
  1616. spin_lock_bh(&adapter->mcc_lock);
  1617. wrb = wrb_from_mccq(adapter);
  1618. if (!wrb) {
  1619. status = -EBUSY;
  1620. goto err_unlock;
  1621. }
  1622. req = embedded_payload(wrb);
  1623. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1624. OPCODE_COMMON_READ_OBJECT,
  1625. sizeof(struct lancer_cmd_req_read_object), wrb,
  1626. NULL);
  1627. req->desired_read_len = cpu_to_le32(data_size);
  1628. req->read_offset = cpu_to_le32(data_offset);
  1629. strcpy(req->object_name, obj_name);
  1630. req->descriptor_count = cpu_to_le32(1);
  1631. req->buf_len = cpu_to_le32(data_size);
  1632. req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
  1633. req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
  1634. status = be_mcc_notify_wait(adapter);
  1635. resp = embedded_payload(wrb);
  1636. if (!status) {
  1637. *data_read = le32_to_cpu(resp->actual_read_len);
  1638. *eof = le32_to_cpu(resp->eof);
  1639. } else {
  1640. *addn_status = resp->additional_status;
  1641. }
  1642. err_unlock:
  1643. spin_unlock_bh(&adapter->mcc_lock);
  1644. return status;
  1645. }
  1646. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1647. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1648. {
  1649. struct be_mcc_wrb *wrb;
  1650. struct be_cmd_write_flashrom *req;
  1651. int status;
  1652. spin_lock_bh(&adapter->mcc_lock);
  1653. adapter->flash_status = 0;
  1654. wrb = wrb_from_mccq(adapter);
  1655. if (!wrb) {
  1656. status = -EBUSY;
  1657. goto err_unlock;
  1658. }
  1659. req = cmd->va;
  1660. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1661. OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
  1662. req->params.op_type = cpu_to_le32(flash_type);
  1663. req->params.op_code = cpu_to_le32(flash_opcode);
  1664. req->params.data_buf_size = cpu_to_le32(buf_size);
  1665. be_mcc_notify(adapter);
  1666. spin_unlock_bh(&adapter->mcc_lock);
  1667. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1668. msecs_to_jiffies(40000)))
  1669. status = -1;
  1670. else
  1671. status = adapter->flash_status;
  1672. return status;
  1673. err_unlock:
  1674. spin_unlock_bh(&adapter->mcc_lock);
  1675. return status;
  1676. }
  1677. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  1678. int offset)
  1679. {
  1680. struct be_mcc_wrb *wrb;
  1681. struct be_cmd_write_flashrom *req;
  1682. int status;
  1683. spin_lock_bh(&adapter->mcc_lock);
  1684. wrb = wrb_from_mccq(adapter);
  1685. if (!wrb) {
  1686. status = -EBUSY;
  1687. goto err;
  1688. }
  1689. req = embedded_payload(wrb);
  1690. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1691. OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
  1692. req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
  1693. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1694. req->params.offset = cpu_to_le32(offset);
  1695. req->params.data_buf_size = cpu_to_le32(0x4);
  1696. status = be_mcc_notify_wait(adapter);
  1697. if (!status)
  1698. memcpy(flashed_crc, req->params.data_buf, 4);
  1699. err:
  1700. spin_unlock_bh(&adapter->mcc_lock);
  1701. return status;
  1702. }
  1703. int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1704. struct be_dma_mem *nonemb_cmd)
  1705. {
  1706. struct be_mcc_wrb *wrb;
  1707. struct be_cmd_req_acpi_wol_magic_config *req;
  1708. int status;
  1709. spin_lock_bh(&adapter->mcc_lock);
  1710. wrb = wrb_from_mccq(adapter);
  1711. if (!wrb) {
  1712. status = -EBUSY;
  1713. goto err;
  1714. }
  1715. req = nonemb_cmd->va;
  1716. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1717. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
  1718. nonemb_cmd);
  1719. memcpy(req->magic_mac, mac, ETH_ALEN);
  1720. status = be_mcc_notify_wait(adapter);
  1721. err:
  1722. spin_unlock_bh(&adapter->mcc_lock);
  1723. return status;
  1724. }
  1725. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1726. u8 loopback_type, u8 enable)
  1727. {
  1728. struct be_mcc_wrb *wrb;
  1729. struct be_cmd_req_set_lmode *req;
  1730. int status;
  1731. spin_lock_bh(&adapter->mcc_lock);
  1732. wrb = wrb_from_mccq(adapter);
  1733. if (!wrb) {
  1734. status = -EBUSY;
  1735. goto err;
  1736. }
  1737. req = embedded_payload(wrb);
  1738. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1739. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
  1740. NULL);
  1741. req->src_port = port_num;
  1742. req->dest_port = port_num;
  1743. req->loopback_type = loopback_type;
  1744. req->loopback_state = enable;
  1745. status = be_mcc_notify_wait(adapter);
  1746. err:
  1747. spin_unlock_bh(&adapter->mcc_lock);
  1748. return status;
  1749. }
  1750. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1751. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1752. {
  1753. struct be_mcc_wrb *wrb;
  1754. struct be_cmd_req_loopback_test *req;
  1755. int status;
  1756. spin_lock_bh(&adapter->mcc_lock);
  1757. wrb = wrb_from_mccq(adapter);
  1758. if (!wrb) {
  1759. status = -EBUSY;
  1760. goto err;
  1761. }
  1762. req = embedded_payload(wrb);
  1763. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1764. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
  1765. req->hdr.timeout = cpu_to_le32(4);
  1766. req->pattern = cpu_to_le64(pattern);
  1767. req->src_port = cpu_to_le32(port_num);
  1768. req->dest_port = cpu_to_le32(port_num);
  1769. req->pkt_size = cpu_to_le32(pkt_size);
  1770. req->num_pkts = cpu_to_le32(num_pkts);
  1771. req->loopback_type = cpu_to_le32(loopback_type);
  1772. status = be_mcc_notify_wait(adapter);
  1773. if (!status) {
  1774. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1775. status = le32_to_cpu(resp->status);
  1776. }
  1777. err:
  1778. spin_unlock_bh(&adapter->mcc_lock);
  1779. return status;
  1780. }
  1781. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1782. u32 byte_cnt, struct be_dma_mem *cmd)
  1783. {
  1784. struct be_mcc_wrb *wrb;
  1785. struct be_cmd_req_ddrdma_test *req;
  1786. int status;
  1787. int i, j = 0;
  1788. spin_lock_bh(&adapter->mcc_lock);
  1789. wrb = wrb_from_mccq(adapter);
  1790. if (!wrb) {
  1791. status = -EBUSY;
  1792. goto err;
  1793. }
  1794. req = cmd->va;
  1795. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1796. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
  1797. req->pattern = cpu_to_le64(pattern);
  1798. req->byte_count = cpu_to_le32(byte_cnt);
  1799. for (i = 0; i < byte_cnt; i++) {
  1800. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1801. j++;
  1802. if (j > 7)
  1803. j = 0;
  1804. }
  1805. status = be_mcc_notify_wait(adapter);
  1806. if (!status) {
  1807. struct be_cmd_resp_ddrdma_test *resp;
  1808. resp = cmd->va;
  1809. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1810. resp->snd_err) {
  1811. status = -1;
  1812. }
  1813. }
  1814. err:
  1815. spin_unlock_bh(&adapter->mcc_lock);
  1816. return status;
  1817. }
  1818. int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  1819. struct be_dma_mem *nonemb_cmd)
  1820. {
  1821. struct be_mcc_wrb *wrb;
  1822. struct be_cmd_req_seeprom_read *req;
  1823. struct be_sge *sge;
  1824. int status;
  1825. spin_lock_bh(&adapter->mcc_lock);
  1826. wrb = wrb_from_mccq(adapter);
  1827. if (!wrb) {
  1828. status = -EBUSY;
  1829. goto err;
  1830. }
  1831. req = nonemb_cmd->va;
  1832. sge = nonembedded_sgl(wrb);
  1833. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1834. OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
  1835. nonemb_cmd);
  1836. status = be_mcc_notify_wait(adapter);
  1837. err:
  1838. spin_unlock_bh(&adapter->mcc_lock);
  1839. return status;
  1840. }
  1841. int be_cmd_get_phy_info(struct be_adapter *adapter)
  1842. {
  1843. struct be_mcc_wrb *wrb;
  1844. struct be_cmd_req_get_phy_info *req;
  1845. struct be_dma_mem cmd;
  1846. int status;
  1847. spin_lock_bh(&adapter->mcc_lock);
  1848. wrb = wrb_from_mccq(adapter);
  1849. if (!wrb) {
  1850. status = -EBUSY;
  1851. goto err;
  1852. }
  1853. cmd.size = sizeof(struct be_cmd_req_get_phy_info);
  1854. cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  1855. &cmd.dma);
  1856. if (!cmd.va) {
  1857. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  1858. status = -ENOMEM;
  1859. goto err;
  1860. }
  1861. req = cmd.va;
  1862. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1863. OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
  1864. wrb, &cmd);
  1865. status = be_mcc_notify_wait(adapter);
  1866. if (!status) {
  1867. struct be_phy_info *resp_phy_info =
  1868. cmd.va + sizeof(struct be_cmd_req_hdr);
  1869. adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
  1870. adapter->phy.interface_type =
  1871. le16_to_cpu(resp_phy_info->interface_type);
  1872. adapter->phy.auto_speeds_supported =
  1873. le16_to_cpu(resp_phy_info->auto_speeds_supported);
  1874. adapter->phy.fixed_speeds_supported =
  1875. le16_to_cpu(resp_phy_info->fixed_speeds_supported);
  1876. adapter->phy.misc_params =
  1877. le32_to_cpu(resp_phy_info->misc_params);
  1878. }
  1879. pci_free_consistent(adapter->pdev, cmd.size,
  1880. cmd.va, cmd.dma);
  1881. err:
  1882. spin_unlock_bh(&adapter->mcc_lock);
  1883. return status;
  1884. }
  1885. int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
  1886. {
  1887. struct be_mcc_wrb *wrb;
  1888. struct be_cmd_req_set_qos *req;
  1889. int status;
  1890. spin_lock_bh(&adapter->mcc_lock);
  1891. wrb = wrb_from_mccq(adapter);
  1892. if (!wrb) {
  1893. status = -EBUSY;
  1894. goto err;
  1895. }
  1896. req = embedded_payload(wrb);
  1897. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1898. OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
  1899. req->hdr.domain = domain;
  1900. req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
  1901. req->max_bps_nic = cpu_to_le32(bps);
  1902. status = be_mcc_notify_wait(adapter);
  1903. err:
  1904. spin_unlock_bh(&adapter->mcc_lock);
  1905. return status;
  1906. }
  1907. int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
  1908. {
  1909. struct be_mcc_wrb *wrb;
  1910. struct be_cmd_req_cntl_attribs *req;
  1911. struct be_cmd_resp_cntl_attribs *resp;
  1912. int status;
  1913. int payload_len = max(sizeof(*req), sizeof(*resp));
  1914. struct mgmt_controller_attrib *attribs;
  1915. struct be_dma_mem attribs_cmd;
  1916. memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
  1917. attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
  1918. attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
  1919. &attribs_cmd.dma);
  1920. if (!attribs_cmd.va) {
  1921. dev_err(&adapter->pdev->dev,
  1922. "Memory allocation failure\n");
  1923. return -ENOMEM;
  1924. }
  1925. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1926. return -1;
  1927. wrb = wrb_from_mbox(adapter);
  1928. if (!wrb) {
  1929. status = -EBUSY;
  1930. goto err;
  1931. }
  1932. req = attribs_cmd.va;
  1933. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1934. OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
  1935. &attribs_cmd);
  1936. status = be_mbox_notify_wait(adapter);
  1937. if (!status) {
  1938. attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
  1939. adapter->hba_port_num = attribs->hba_attribs.phy_port;
  1940. }
  1941. err:
  1942. mutex_unlock(&adapter->mbox_lock);
  1943. pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
  1944. attribs_cmd.dma);
  1945. return status;
  1946. }
  1947. /* Uses mbox */
  1948. int be_cmd_req_native_mode(struct be_adapter *adapter)
  1949. {
  1950. struct be_mcc_wrb *wrb;
  1951. struct be_cmd_req_set_func_cap *req;
  1952. int status;
  1953. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1954. return -1;
  1955. wrb = wrb_from_mbox(adapter);
  1956. if (!wrb) {
  1957. status = -EBUSY;
  1958. goto err;
  1959. }
  1960. req = embedded_payload(wrb);
  1961. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1962. OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
  1963. req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
  1964. CAPABILITY_BE3_NATIVE_ERX_API);
  1965. req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
  1966. status = be_mbox_notify_wait(adapter);
  1967. if (!status) {
  1968. struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
  1969. adapter->be3_native = le32_to_cpu(resp->cap_flags) &
  1970. CAPABILITY_BE3_NATIVE_ERX_API;
  1971. }
  1972. err:
  1973. mutex_unlock(&adapter->mbox_lock);
  1974. return status;
  1975. }
  1976. /* Uses synchronous MCCQ */
  1977. int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
  1978. bool *pmac_id_active, u32 *pmac_id, u8 domain)
  1979. {
  1980. struct be_mcc_wrb *wrb;
  1981. struct be_cmd_req_get_mac_list *req;
  1982. int status;
  1983. int mac_count;
  1984. struct be_dma_mem get_mac_list_cmd;
  1985. int i;
  1986. memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
  1987. get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
  1988. get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
  1989. get_mac_list_cmd.size,
  1990. &get_mac_list_cmd.dma);
  1991. if (!get_mac_list_cmd.va) {
  1992. dev_err(&adapter->pdev->dev,
  1993. "Memory allocation failure during GET_MAC_LIST\n");
  1994. return -ENOMEM;
  1995. }
  1996. spin_lock_bh(&adapter->mcc_lock);
  1997. wrb = wrb_from_mccq(adapter);
  1998. if (!wrb) {
  1999. status = -EBUSY;
  2000. goto out;
  2001. }
  2002. req = get_mac_list_cmd.va;
  2003. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2004. OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
  2005. wrb, &get_mac_list_cmd);
  2006. req->hdr.domain = domain;
  2007. req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
  2008. req->perm_override = 1;
  2009. status = be_mcc_notify_wait(adapter);
  2010. if (!status) {
  2011. struct be_cmd_resp_get_mac_list *resp =
  2012. get_mac_list_cmd.va;
  2013. mac_count = resp->true_mac_count + resp->pseudo_mac_count;
  2014. /* Mac list returned could contain one or more active mac_ids
  2015. * or one or more true or pseudo permanant mac addresses.
  2016. * If an active mac_id is present, return first active mac_id
  2017. * found.
  2018. */
  2019. for (i = 0; i < mac_count; i++) {
  2020. struct get_list_macaddr *mac_entry;
  2021. u16 mac_addr_size;
  2022. u32 mac_id;
  2023. mac_entry = &resp->macaddr_list[i];
  2024. mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
  2025. /* mac_id is a 32 bit value and mac_addr size
  2026. * is 6 bytes
  2027. */
  2028. if (mac_addr_size == sizeof(u32)) {
  2029. *pmac_id_active = true;
  2030. mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
  2031. *pmac_id = le32_to_cpu(mac_id);
  2032. goto out;
  2033. }
  2034. }
  2035. /* If no active mac_id found, return first mac addr */
  2036. *pmac_id_active = false;
  2037. memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
  2038. ETH_ALEN);
  2039. }
  2040. out:
  2041. spin_unlock_bh(&adapter->mcc_lock);
  2042. pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
  2043. get_mac_list_cmd.va, get_mac_list_cmd.dma);
  2044. return status;
  2045. }
  2046. /* Uses synchronous MCCQ */
  2047. int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
  2048. u8 mac_count, u32 domain)
  2049. {
  2050. struct be_mcc_wrb *wrb;
  2051. struct be_cmd_req_set_mac_list *req;
  2052. int status;
  2053. struct be_dma_mem cmd;
  2054. memset(&cmd, 0, sizeof(struct be_dma_mem));
  2055. cmd.size = sizeof(struct be_cmd_req_set_mac_list);
  2056. cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
  2057. &cmd.dma, GFP_KERNEL);
  2058. if (!cmd.va) {
  2059. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  2060. return -ENOMEM;
  2061. }
  2062. spin_lock_bh(&adapter->mcc_lock);
  2063. wrb = wrb_from_mccq(adapter);
  2064. if (!wrb) {
  2065. status = -EBUSY;
  2066. goto err;
  2067. }
  2068. req = cmd.va;
  2069. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2070. OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
  2071. wrb, &cmd);
  2072. req->hdr.domain = domain;
  2073. req->mac_count = mac_count;
  2074. if (mac_count)
  2075. memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
  2076. status = be_mcc_notify_wait(adapter);
  2077. err:
  2078. dma_free_coherent(&adapter->pdev->dev, cmd.size,
  2079. cmd.va, cmd.dma);
  2080. spin_unlock_bh(&adapter->mcc_lock);
  2081. return status;
  2082. }
  2083. int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
  2084. u32 domain, u16 intf_id)
  2085. {
  2086. struct be_mcc_wrb *wrb;
  2087. struct be_cmd_req_set_hsw_config *req;
  2088. void *ctxt;
  2089. int status;
  2090. spin_lock_bh(&adapter->mcc_lock);
  2091. wrb = wrb_from_mccq(adapter);
  2092. if (!wrb) {
  2093. status = -EBUSY;
  2094. goto err;
  2095. }
  2096. req = embedded_payload(wrb);
  2097. ctxt = &req->context;
  2098. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2099. OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
  2100. req->hdr.domain = domain;
  2101. AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
  2102. if (pvid) {
  2103. AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
  2104. AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
  2105. }
  2106. be_dws_cpu_to_le(req->context, sizeof(req->context));
  2107. status = be_mcc_notify_wait(adapter);
  2108. err:
  2109. spin_unlock_bh(&adapter->mcc_lock);
  2110. return status;
  2111. }
  2112. /* Get Hyper switch config */
  2113. int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
  2114. u32 domain, u16 intf_id)
  2115. {
  2116. struct be_mcc_wrb *wrb;
  2117. struct be_cmd_req_get_hsw_config *req;
  2118. void *ctxt;
  2119. int status;
  2120. u16 vid;
  2121. spin_lock_bh(&adapter->mcc_lock);
  2122. wrb = wrb_from_mccq(adapter);
  2123. if (!wrb) {
  2124. status = -EBUSY;
  2125. goto err;
  2126. }
  2127. req = embedded_payload(wrb);
  2128. ctxt = &req->context;
  2129. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2130. OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
  2131. req->hdr.domain = domain;
  2132. AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
  2133. intf_id);
  2134. AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
  2135. be_dws_cpu_to_le(req->context, sizeof(req->context));
  2136. status = be_mcc_notify_wait(adapter);
  2137. if (!status) {
  2138. struct be_cmd_resp_get_hsw_config *resp =
  2139. embedded_payload(wrb);
  2140. be_dws_le_to_cpu(&resp->context,
  2141. sizeof(resp->context));
  2142. vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
  2143. pvid, &resp->context);
  2144. *pvid = le16_to_cpu(vid);
  2145. }
  2146. err:
  2147. spin_unlock_bh(&adapter->mcc_lock);
  2148. return status;
  2149. }
  2150. int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
  2151. {
  2152. struct be_mcc_wrb *wrb;
  2153. struct be_cmd_req_acpi_wol_magic_config_v1 *req;
  2154. int status;
  2155. int payload_len = sizeof(*req);
  2156. struct be_dma_mem cmd;
  2157. memset(&cmd, 0, sizeof(struct be_dma_mem));
  2158. cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
  2159. cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
  2160. &cmd.dma);
  2161. if (!cmd.va) {
  2162. dev_err(&adapter->pdev->dev,
  2163. "Memory allocation failure\n");
  2164. return -ENOMEM;
  2165. }
  2166. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2167. return -1;
  2168. wrb = wrb_from_mbox(adapter);
  2169. if (!wrb) {
  2170. status = -EBUSY;
  2171. goto err;
  2172. }
  2173. req = cmd.va;
  2174. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  2175. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  2176. payload_len, wrb, &cmd);
  2177. req->hdr.version = 1;
  2178. req->query_options = BE_GET_WOL_CAP;
  2179. status = be_mbox_notify_wait(adapter);
  2180. if (!status) {
  2181. struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
  2182. resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
  2183. /* the command could succeed misleadingly on old f/w
  2184. * which is not aware of the V1 version. fake an error. */
  2185. if (resp->hdr.response_length < payload_len) {
  2186. status = -1;
  2187. goto err;
  2188. }
  2189. adapter->wol_cap = resp->wol_settings;
  2190. }
  2191. err:
  2192. mutex_unlock(&adapter->mbox_lock);
  2193. pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
  2194. return status;
  2195. }
  2196. int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
  2197. struct be_dma_mem *cmd)
  2198. {
  2199. struct be_mcc_wrb *wrb;
  2200. struct be_cmd_req_get_ext_fat_caps *req;
  2201. int status;
  2202. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2203. return -1;
  2204. wrb = wrb_from_mbox(adapter);
  2205. if (!wrb) {
  2206. status = -EBUSY;
  2207. goto err;
  2208. }
  2209. req = cmd->va;
  2210. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2211. OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
  2212. cmd->size, wrb, cmd);
  2213. req->parameter_type = cpu_to_le32(1);
  2214. status = be_mbox_notify_wait(adapter);
  2215. err:
  2216. mutex_unlock(&adapter->mbox_lock);
  2217. return status;
  2218. }
  2219. int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
  2220. struct be_dma_mem *cmd,
  2221. struct be_fat_conf_params *configs)
  2222. {
  2223. struct be_mcc_wrb *wrb;
  2224. struct be_cmd_req_set_ext_fat_caps *req;
  2225. int status;
  2226. spin_lock_bh(&adapter->mcc_lock);
  2227. wrb = wrb_from_mccq(adapter);
  2228. if (!wrb) {
  2229. status = -EBUSY;
  2230. goto err;
  2231. }
  2232. req = cmd->va;
  2233. memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
  2234. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2235. OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
  2236. cmd->size, wrb, cmd);
  2237. status = be_mcc_notify_wait(adapter);
  2238. err:
  2239. spin_unlock_bh(&adapter->mcc_lock);
  2240. return status;
  2241. }
  2242. int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
  2243. {
  2244. struct be_mcc_wrb *wrb;
  2245. struct be_cmd_req_get_port_name *req;
  2246. int status;
  2247. if (!lancer_chip(adapter)) {
  2248. *port_name = adapter->hba_port_num + '0';
  2249. return 0;
  2250. }
  2251. spin_lock_bh(&adapter->mcc_lock);
  2252. wrb = wrb_from_mccq(adapter);
  2253. if (!wrb) {
  2254. status = -EBUSY;
  2255. goto err;
  2256. }
  2257. req = embedded_payload(wrb);
  2258. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2259. OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
  2260. NULL);
  2261. req->hdr.version = 1;
  2262. status = be_mcc_notify_wait(adapter);
  2263. if (!status) {
  2264. struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
  2265. *port_name = resp->port_name[adapter->hba_port_num];
  2266. } else {
  2267. *port_name = adapter->hba_port_num + '0';
  2268. }
  2269. err:
  2270. spin_unlock_bh(&adapter->mcc_lock);
  2271. return status;
  2272. }
  2273. int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
  2274. int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
  2275. {
  2276. struct be_adapter *adapter = netdev_priv(netdev_handle);
  2277. struct be_mcc_wrb *wrb;
  2278. struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
  2279. struct be_cmd_req_hdr *req;
  2280. struct be_cmd_resp_hdr *resp;
  2281. int status;
  2282. spin_lock_bh(&adapter->mcc_lock);
  2283. wrb = wrb_from_mccq(adapter);
  2284. if (!wrb) {
  2285. status = -EBUSY;
  2286. goto err;
  2287. }
  2288. req = embedded_payload(wrb);
  2289. resp = embedded_payload(wrb);
  2290. be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
  2291. hdr->opcode, wrb_payload_size, wrb, NULL);
  2292. memcpy(req, wrb_payload, wrb_payload_size);
  2293. be_dws_cpu_to_le(req, wrb_payload_size);
  2294. status = be_mcc_notify_wait(adapter);
  2295. if (cmd_status)
  2296. *cmd_status = (status & 0xffff);
  2297. if (ext_status)
  2298. *ext_status = 0;
  2299. memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
  2300. be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
  2301. err:
  2302. spin_unlock_bh(&adapter->mcc_lock);
  2303. return status;
  2304. }
  2305. EXPORT_SYMBOL(be_roce_mcc_cmd);