be_cmds.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695
  1. /*
  2. * Copyright (C) 2005 - 2010 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. static void be_mcc_notify(struct be_adapter *adapter)
  20. {
  21. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  22. u32 val = 0;
  23. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  24. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  25. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  26. }
  27. /* To check if valid bit is set, check the entire word as we don't know
  28. * the endianness of the data (old entry is host endian while a new entry is
  29. * little endian) */
  30. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  31. {
  32. if (compl->flags != 0) {
  33. compl->flags = le32_to_cpu(compl->flags);
  34. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  35. return true;
  36. } else {
  37. return false;
  38. }
  39. }
  40. /* Need to reset the entire word that houses the valid bit */
  41. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  42. {
  43. compl->flags = 0;
  44. }
  45. static int be_mcc_compl_process(struct be_adapter *adapter,
  46. struct be_mcc_compl *compl)
  47. {
  48. u16 compl_status, extd_status;
  49. /* Just swap the status to host endian; mcc tag is opaquely copied
  50. * from mcc_wrb */
  51. be_dws_le_to_cpu(compl, 4);
  52. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  53. CQE_STATUS_COMPL_MASK;
  54. if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
  55. (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
  56. adapter->flash_status = compl_status;
  57. complete(&adapter->flash_compl);
  58. }
  59. if (compl_status == MCC_STATUS_SUCCESS) {
  60. if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
  61. struct be_cmd_resp_get_stats *resp =
  62. adapter->stats.cmd.va;
  63. be_dws_le_to_cpu(&resp->hw_stats,
  64. sizeof(resp->hw_stats));
  65. netdev_stats_update(adapter);
  66. }
  67. } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
  68. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  69. CQE_STATUS_EXTD_MASK;
  70. dev_warn(&adapter->pdev->dev,
  71. "Error in cmd completion - opcode %d, compl %d, extd %d\n",
  72. compl->tag0, compl_status, extd_status);
  73. }
  74. return compl_status;
  75. }
  76. /* Link state evt is a string of bytes; no need for endian swapping */
  77. static void be_async_link_state_process(struct be_adapter *adapter,
  78. struct be_async_event_link_state *evt)
  79. {
  80. be_link_status_update(adapter,
  81. evt->port_link_status == ASYNC_EVENT_LINK_UP);
  82. }
  83. static inline bool is_link_state_evt(u32 trailer)
  84. {
  85. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  86. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  87. ASYNC_EVENT_CODE_LINK_STATE);
  88. }
  89. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  90. {
  91. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  92. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  93. if (be_mcc_compl_is_new(compl)) {
  94. queue_tail_inc(mcc_cq);
  95. return compl;
  96. }
  97. return NULL;
  98. }
  99. void be_async_mcc_enable(struct be_adapter *adapter)
  100. {
  101. spin_lock_bh(&adapter->mcc_cq_lock);
  102. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  103. adapter->mcc_obj.rearm_cq = true;
  104. spin_unlock_bh(&adapter->mcc_cq_lock);
  105. }
  106. void be_async_mcc_disable(struct be_adapter *adapter)
  107. {
  108. adapter->mcc_obj.rearm_cq = false;
  109. }
  110. int be_process_mcc(struct be_adapter *adapter, int *status)
  111. {
  112. struct be_mcc_compl *compl;
  113. int num = 0;
  114. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  115. spin_lock_bh(&adapter->mcc_cq_lock);
  116. while ((compl = be_mcc_compl_get(adapter))) {
  117. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  118. /* Interpret flags as an async trailer */
  119. BUG_ON(!is_link_state_evt(compl->flags));
  120. /* Interpret compl as a async link evt */
  121. be_async_link_state_process(adapter,
  122. (struct be_async_event_link_state *) compl);
  123. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  124. *status = be_mcc_compl_process(adapter, compl);
  125. atomic_dec(&mcc_obj->q.used);
  126. }
  127. be_mcc_compl_use(compl);
  128. num++;
  129. }
  130. spin_unlock_bh(&adapter->mcc_cq_lock);
  131. return num;
  132. }
  133. /* Wait till no more pending mcc requests are present */
  134. static int be_mcc_wait_compl(struct be_adapter *adapter)
  135. {
  136. #define mcc_timeout 120000 /* 12s timeout */
  137. int i, num, status = 0;
  138. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  139. for (i = 0; i < mcc_timeout; i++) {
  140. num = be_process_mcc(adapter, &status);
  141. if (num)
  142. be_cq_notify(adapter, mcc_obj->cq.id,
  143. mcc_obj->rearm_cq, num);
  144. if (atomic_read(&mcc_obj->q.used) == 0)
  145. break;
  146. udelay(100);
  147. }
  148. if (i == mcc_timeout) {
  149. dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
  150. return -1;
  151. }
  152. return status;
  153. }
  154. /* Notify MCC requests and wait for completion */
  155. static int be_mcc_notify_wait(struct be_adapter *adapter)
  156. {
  157. be_mcc_notify(adapter);
  158. return be_mcc_wait_compl(adapter);
  159. }
  160. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  161. {
  162. int msecs = 0;
  163. u32 ready;
  164. do {
  165. ready = ioread32(db);
  166. if (ready == 0xffffffff) {
  167. dev_err(&adapter->pdev->dev,
  168. "pci slot disconnected\n");
  169. return -1;
  170. }
  171. ready &= MPU_MAILBOX_DB_RDY_MASK;
  172. if (ready)
  173. break;
  174. if (msecs > 4000) {
  175. dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
  176. return -1;
  177. }
  178. set_current_state(TASK_INTERRUPTIBLE);
  179. schedule_timeout(msecs_to_jiffies(1));
  180. msecs++;
  181. } while (true);
  182. return 0;
  183. }
  184. /*
  185. * Insert the mailbox address into the doorbell in two steps
  186. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  187. */
  188. static int be_mbox_notify_wait(struct be_adapter *adapter)
  189. {
  190. int status;
  191. u32 val = 0;
  192. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  193. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  194. struct be_mcc_mailbox *mbox = mbox_mem->va;
  195. struct be_mcc_compl *compl = &mbox->compl;
  196. /* wait for ready to be set */
  197. status = be_mbox_db_ready_wait(adapter, db);
  198. if (status != 0)
  199. return status;
  200. val |= MPU_MAILBOX_DB_HI_MASK;
  201. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  202. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  203. iowrite32(val, db);
  204. /* wait for ready to be set */
  205. status = be_mbox_db_ready_wait(adapter, db);
  206. if (status != 0)
  207. return status;
  208. val = 0;
  209. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  210. val |= (u32)(mbox_mem->dma >> 4) << 2;
  211. iowrite32(val, db);
  212. status = be_mbox_db_ready_wait(adapter, db);
  213. if (status != 0)
  214. return status;
  215. /* A cq entry has been made now */
  216. if (be_mcc_compl_is_new(compl)) {
  217. status = be_mcc_compl_process(adapter, &mbox->compl);
  218. be_mcc_compl_use(compl);
  219. if (status)
  220. return status;
  221. } else {
  222. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  223. return -1;
  224. }
  225. return 0;
  226. }
  227. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  228. {
  229. u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  230. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  231. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  232. return -1;
  233. else
  234. return 0;
  235. }
  236. int be_cmd_POST(struct be_adapter *adapter)
  237. {
  238. u16 stage;
  239. int status, timeout = 0;
  240. do {
  241. status = be_POST_stage_get(adapter, &stage);
  242. if (status) {
  243. dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
  244. stage);
  245. return -1;
  246. } else if (stage != POST_STAGE_ARMFW_RDY) {
  247. set_current_state(TASK_INTERRUPTIBLE);
  248. schedule_timeout(2 * HZ);
  249. timeout += 2;
  250. } else {
  251. return 0;
  252. }
  253. } while (timeout < 40);
  254. dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
  255. return -1;
  256. }
  257. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  258. {
  259. return wrb->payload.embedded_payload;
  260. }
  261. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  262. {
  263. return &wrb->payload.sgl[0];
  264. }
  265. /* Don't touch the hdr after it's prepared */
  266. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  267. bool embedded, u8 sge_cnt, u32 opcode)
  268. {
  269. if (embedded)
  270. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  271. else
  272. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  273. MCC_WRB_SGE_CNT_SHIFT;
  274. wrb->payload_length = payload_len;
  275. wrb->tag0 = opcode;
  276. be_dws_cpu_to_le(wrb, 8);
  277. }
  278. /* Don't touch the hdr after it's prepared */
  279. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  280. u8 subsystem, u8 opcode, int cmd_len)
  281. {
  282. req_hdr->opcode = opcode;
  283. req_hdr->subsystem = subsystem;
  284. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  285. req_hdr->version = 0;
  286. }
  287. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  288. struct be_dma_mem *mem)
  289. {
  290. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  291. u64 dma = (u64)mem->dma;
  292. for (i = 0; i < buf_pages; i++) {
  293. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  294. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  295. dma += PAGE_SIZE_4K;
  296. }
  297. }
  298. /* Converts interrupt delay in microseconds to multiplier value */
  299. static u32 eq_delay_to_mult(u32 usec_delay)
  300. {
  301. #define MAX_INTR_RATE 651042
  302. const u32 round = 10;
  303. u32 multiplier;
  304. if (usec_delay == 0)
  305. multiplier = 0;
  306. else {
  307. u32 interrupt_rate = 1000000 / usec_delay;
  308. /* Max delay, corresponding to the lowest interrupt rate */
  309. if (interrupt_rate == 0)
  310. multiplier = 1023;
  311. else {
  312. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  313. multiplier /= interrupt_rate;
  314. /* Round the multiplier to the closest value.*/
  315. multiplier = (multiplier + round/2) / round;
  316. multiplier = min(multiplier, (u32)1023);
  317. }
  318. }
  319. return multiplier;
  320. }
  321. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  322. {
  323. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  324. struct be_mcc_wrb *wrb
  325. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  326. memset(wrb, 0, sizeof(*wrb));
  327. return wrb;
  328. }
  329. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  330. {
  331. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  332. struct be_mcc_wrb *wrb;
  333. if (atomic_read(&mccq->used) >= mccq->len) {
  334. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  335. return NULL;
  336. }
  337. wrb = queue_head_node(mccq);
  338. queue_head_inc(mccq);
  339. atomic_inc(&mccq->used);
  340. memset(wrb, 0, sizeof(*wrb));
  341. return wrb;
  342. }
  343. /* Tell fw we're about to start firing cmds by writing a
  344. * special pattern across the wrb hdr; uses mbox
  345. */
  346. int be_cmd_fw_init(struct be_adapter *adapter)
  347. {
  348. u8 *wrb;
  349. int status;
  350. spin_lock(&adapter->mbox_lock);
  351. wrb = (u8 *)wrb_from_mbox(adapter);
  352. *wrb++ = 0xFF;
  353. *wrb++ = 0x12;
  354. *wrb++ = 0x34;
  355. *wrb++ = 0xFF;
  356. *wrb++ = 0xFF;
  357. *wrb++ = 0x56;
  358. *wrb++ = 0x78;
  359. *wrb = 0xFF;
  360. status = be_mbox_notify_wait(adapter);
  361. spin_unlock(&adapter->mbox_lock);
  362. return status;
  363. }
  364. /* Tell fw we're done with firing cmds by writing a
  365. * special pattern across the wrb hdr; uses mbox
  366. */
  367. int be_cmd_fw_clean(struct be_adapter *adapter)
  368. {
  369. u8 *wrb;
  370. int status;
  371. if (adapter->eeh_err)
  372. return -EIO;
  373. spin_lock(&adapter->mbox_lock);
  374. wrb = (u8 *)wrb_from_mbox(adapter);
  375. *wrb++ = 0xFF;
  376. *wrb++ = 0xAA;
  377. *wrb++ = 0xBB;
  378. *wrb++ = 0xFF;
  379. *wrb++ = 0xFF;
  380. *wrb++ = 0xCC;
  381. *wrb++ = 0xDD;
  382. *wrb = 0xFF;
  383. status = be_mbox_notify_wait(adapter);
  384. spin_unlock(&adapter->mbox_lock);
  385. return status;
  386. }
  387. int be_cmd_eq_create(struct be_adapter *adapter,
  388. struct be_queue_info *eq, int eq_delay)
  389. {
  390. struct be_mcc_wrb *wrb;
  391. struct be_cmd_req_eq_create *req;
  392. struct be_dma_mem *q_mem = &eq->dma_mem;
  393. int status;
  394. spin_lock(&adapter->mbox_lock);
  395. wrb = wrb_from_mbox(adapter);
  396. req = embedded_payload(wrb);
  397. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
  398. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  399. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  400. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  401. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  402. /* 4byte eqe*/
  403. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  404. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  405. __ilog2_u32(eq->len/256));
  406. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  407. eq_delay_to_mult(eq_delay));
  408. be_dws_cpu_to_le(req->context, sizeof(req->context));
  409. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  410. status = be_mbox_notify_wait(adapter);
  411. if (!status) {
  412. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  413. eq->id = le16_to_cpu(resp->eq_id);
  414. eq->created = true;
  415. }
  416. spin_unlock(&adapter->mbox_lock);
  417. return status;
  418. }
  419. /* Uses mbox */
  420. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  421. u8 type, bool permanent, u32 if_handle)
  422. {
  423. struct be_mcc_wrb *wrb;
  424. struct be_cmd_req_mac_query *req;
  425. int status;
  426. spin_lock(&adapter->mbox_lock);
  427. wrb = wrb_from_mbox(adapter);
  428. req = embedded_payload(wrb);
  429. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  430. OPCODE_COMMON_NTWK_MAC_QUERY);
  431. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  432. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  433. req->type = type;
  434. if (permanent) {
  435. req->permanent = 1;
  436. } else {
  437. req->if_id = cpu_to_le16((u16) if_handle);
  438. req->permanent = 0;
  439. }
  440. status = be_mbox_notify_wait(adapter);
  441. if (!status) {
  442. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  443. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  444. }
  445. spin_unlock(&adapter->mbox_lock);
  446. return status;
  447. }
  448. /* Uses synchronous MCCQ */
  449. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  450. u32 if_id, u32 *pmac_id)
  451. {
  452. struct be_mcc_wrb *wrb;
  453. struct be_cmd_req_pmac_add *req;
  454. int status;
  455. spin_lock_bh(&adapter->mcc_lock);
  456. wrb = wrb_from_mccq(adapter);
  457. if (!wrb) {
  458. status = -EBUSY;
  459. goto err;
  460. }
  461. req = embedded_payload(wrb);
  462. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  463. OPCODE_COMMON_NTWK_PMAC_ADD);
  464. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  465. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  466. req->if_id = cpu_to_le32(if_id);
  467. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  468. status = be_mcc_notify_wait(adapter);
  469. if (!status) {
  470. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  471. *pmac_id = le32_to_cpu(resp->pmac_id);
  472. }
  473. err:
  474. spin_unlock_bh(&adapter->mcc_lock);
  475. return status;
  476. }
  477. /* Uses synchronous MCCQ */
  478. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
  479. {
  480. struct be_mcc_wrb *wrb;
  481. struct be_cmd_req_pmac_del *req;
  482. int status;
  483. spin_lock_bh(&adapter->mcc_lock);
  484. wrb = wrb_from_mccq(adapter);
  485. if (!wrb) {
  486. status = -EBUSY;
  487. goto err;
  488. }
  489. req = embedded_payload(wrb);
  490. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  491. OPCODE_COMMON_NTWK_PMAC_DEL);
  492. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  493. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  494. req->if_id = cpu_to_le32(if_id);
  495. req->pmac_id = cpu_to_le32(pmac_id);
  496. status = be_mcc_notify_wait(adapter);
  497. err:
  498. spin_unlock_bh(&adapter->mcc_lock);
  499. return status;
  500. }
  501. /* Uses Mbox */
  502. int be_cmd_cq_create(struct be_adapter *adapter,
  503. struct be_queue_info *cq, struct be_queue_info *eq,
  504. bool sol_evts, bool no_delay, int coalesce_wm)
  505. {
  506. struct be_mcc_wrb *wrb;
  507. struct be_cmd_req_cq_create *req;
  508. struct be_dma_mem *q_mem = &cq->dma_mem;
  509. void *ctxt;
  510. int status;
  511. spin_lock(&adapter->mbox_lock);
  512. wrb = wrb_from_mbox(adapter);
  513. req = embedded_payload(wrb);
  514. ctxt = &req->context;
  515. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  516. OPCODE_COMMON_CQ_CREATE);
  517. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  518. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  519. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  520. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  521. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  522. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  523. __ilog2_u32(cq->len/256));
  524. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  525. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  526. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  527. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  528. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  529. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  530. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  531. status = be_mbox_notify_wait(adapter);
  532. if (!status) {
  533. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  534. cq->id = le16_to_cpu(resp->cq_id);
  535. cq->created = true;
  536. }
  537. spin_unlock(&adapter->mbox_lock);
  538. return status;
  539. }
  540. static u32 be_encoded_q_len(int q_len)
  541. {
  542. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  543. if (len_encoded == 16)
  544. len_encoded = 0;
  545. return len_encoded;
  546. }
  547. int be_cmd_mccq_create(struct be_adapter *adapter,
  548. struct be_queue_info *mccq,
  549. struct be_queue_info *cq)
  550. {
  551. struct be_mcc_wrb *wrb;
  552. struct be_cmd_req_mcc_create *req;
  553. struct be_dma_mem *q_mem = &mccq->dma_mem;
  554. void *ctxt;
  555. int status;
  556. spin_lock(&adapter->mbox_lock);
  557. wrb = wrb_from_mbox(adapter);
  558. req = embedded_payload(wrb);
  559. ctxt = &req->context;
  560. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  561. OPCODE_COMMON_MCC_CREATE);
  562. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  563. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  564. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  565. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  566. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  567. be_encoded_q_len(mccq->len));
  568. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  569. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  570. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  571. status = be_mbox_notify_wait(adapter);
  572. if (!status) {
  573. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  574. mccq->id = le16_to_cpu(resp->id);
  575. mccq->created = true;
  576. }
  577. spin_unlock(&adapter->mbox_lock);
  578. return status;
  579. }
  580. int be_cmd_txq_create(struct be_adapter *adapter,
  581. struct be_queue_info *txq,
  582. struct be_queue_info *cq)
  583. {
  584. struct be_mcc_wrb *wrb;
  585. struct be_cmd_req_eth_tx_create *req;
  586. struct be_dma_mem *q_mem = &txq->dma_mem;
  587. void *ctxt;
  588. int status;
  589. spin_lock(&adapter->mbox_lock);
  590. wrb = wrb_from_mbox(adapter);
  591. req = embedded_payload(wrb);
  592. ctxt = &req->context;
  593. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  594. OPCODE_ETH_TX_CREATE);
  595. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  596. sizeof(*req));
  597. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  598. req->ulp_num = BE_ULP1_NUM;
  599. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  600. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  601. be_encoded_q_len(txq->len));
  602. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  603. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  604. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  605. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  606. status = be_mbox_notify_wait(adapter);
  607. if (!status) {
  608. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  609. txq->id = le16_to_cpu(resp->cid);
  610. txq->created = true;
  611. }
  612. spin_unlock(&adapter->mbox_lock);
  613. return status;
  614. }
  615. /* Uses mbox */
  616. int be_cmd_rxq_create(struct be_adapter *adapter,
  617. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  618. u16 max_frame_size, u32 if_id, u32 rss)
  619. {
  620. struct be_mcc_wrb *wrb;
  621. struct be_cmd_req_eth_rx_create *req;
  622. struct be_dma_mem *q_mem = &rxq->dma_mem;
  623. int status;
  624. spin_lock(&adapter->mbox_lock);
  625. wrb = wrb_from_mbox(adapter);
  626. req = embedded_payload(wrb);
  627. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  628. OPCODE_ETH_RX_CREATE);
  629. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  630. sizeof(*req));
  631. req->cq_id = cpu_to_le16(cq_id);
  632. req->frag_size = fls(frag_size) - 1;
  633. req->num_pages = 2;
  634. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  635. req->interface_id = cpu_to_le32(if_id);
  636. req->max_frame_size = cpu_to_le16(max_frame_size);
  637. req->rss_queue = cpu_to_le32(rss);
  638. status = be_mbox_notify_wait(adapter);
  639. if (!status) {
  640. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  641. rxq->id = le16_to_cpu(resp->id);
  642. rxq->created = true;
  643. }
  644. spin_unlock(&adapter->mbox_lock);
  645. return status;
  646. }
  647. /* Generic destroyer function for all types of queues
  648. * Uses Mbox
  649. */
  650. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  651. int queue_type)
  652. {
  653. struct be_mcc_wrb *wrb;
  654. struct be_cmd_req_q_destroy *req;
  655. u8 subsys = 0, opcode = 0;
  656. int status;
  657. if (adapter->eeh_err)
  658. return -EIO;
  659. spin_lock(&adapter->mbox_lock);
  660. wrb = wrb_from_mbox(adapter);
  661. req = embedded_payload(wrb);
  662. switch (queue_type) {
  663. case QTYPE_EQ:
  664. subsys = CMD_SUBSYSTEM_COMMON;
  665. opcode = OPCODE_COMMON_EQ_DESTROY;
  666. break;
  667. case QTYPE_CQ:
  668. subsys = CMD_SUBSYSTEM_COMMON;
  669. opcode = OPCODE_COMMON_CQ_DESTROY;
  670. break;
  671. case QTYPE_TXQ:
  672. subsys = CMD_SUBSYSTEM_ETH;
  673. opcode = OPCODE_ETH_TX_DESTROY;
  674. break;
  675. case QTYPE_RXQ:
  676. subsys = CMD_SUBSYSTEM_ETH;
  677. opcode = OPCODE_ETH_RX_DESTROY;
  678. break;
  679. case QTYPE_MCCQ:
  680. subsys = CMD_SUBSYSTEM_COMMON;
  681. opcode = OPCODE_COMMON_MCC_DESTROY;
  682. break;
  683. default:
  684. BUG();
  685. }
  686. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
  687. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  688. req->id = cpu_to_le16(q->id);
  689. status = be_mbox_notify_wait(adapter);
  690. spin_unlock(&adapter->mbox_lock);
  691. return status;
  692. }
  693. /* Create an rx filtering policy configuration on an i/f
  694. * Uses mbox
  695. */
  696. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  697. u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
  698. u32 domain)
  699. {
  700. struct be_mcc_wrb *wrb;
  701. struct be_cmd_req_if_create *req;
  702. int status;
  703. spin_lock(&adapter->mbox_lock);
  704. wrb = wrb_from_mbox(adapter);
  705. req = embedded_payload(wrb);
  706. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  707. OPCODE_COMMON_NTWK_INTERFACE_CREATE);
  708. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  709. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  710. req->hdr.domain = domain;
  711. req->capability_flags = cpu_to_le32(cap_flags);
  712. req->enable_flags = cpu_to_le32(en_flags);
  713. req->pmac_invalid = pmac_invalid;
  714. if (!pmac_invalid)
  715. memcpy(req->mac_addr, mac, ETH_ALEN);
  716. status = be_mbox_notify_wait(adapter);
  717. if (!status) {
  718. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  719. *if_handle = le32_to_cpu(resp->interface_id);
  720. if (!pmac_invalid)
  721. *pmac_id = le32_to_cpu(resp->pmac_id);
  722. }
  723. spin_unlock(&adapter->mbox_lock);
  724. return status;
  725. }
  726. /* Uses mbox */
  727. int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
  728. {
  729. struct be_mcc_wrb *wrb;
  730. struct be_cmd_req_if_destroy *req;
  731. int status;
  732. if (adapter->eeh_err)
  733. return -EIO;
  734. spin_lock(&adapter->mbox_lock);
  735. wrb = wrb_from_mbox(adapter);
  736. req = embedded_payload(wrb);
  737. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  738. OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
  739. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  740. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  741. req->interface_id = cpu_to_le32(interface_id);
  742. status = be_mbox_notify_wait(adapter);
  743. spin_unlock(&adapter->mbox_lock);
  744. return status;
  745. }
  746. /* Get stats is a non embedded command: the request is not embedded inside
  747. * WRB but is a separate dma memory block
  748. * Uses asynchronous MCC
  749. */
  750. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  751. {
  752. struct be_mcc_wrb *wrb;
  753. struct be_cmd_req_get_stats *req;
  754. struct be_sge *sge;
  755. int status = 0;
  756. spin_lock_bh(&adapter->mcc_lock);
  757. wrb = wrb_from_mccq(adapter);
  758. if (!wrb) {
  759. status = -EBUSY;
  760. goto err;
  761. }
  762. req = nonemb_cmd->va;
  763. sge = nonembedded_sgl(wrb);
  764. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  765. OPCODE_ETH_GET_STATISTICS);
  766. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  767. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  768. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  769. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  770. sge->len = cpu_to_le32(nonemb_cmd->size);
  771. be_mcc_notify(adapter);
  772. err:
  773. spin_unlock_bh(&adapter->mcc_lock);
  774. return status;
  775. }
  776. /* Uses synchronous mcc */
  777. int be_cmd_link_status_query(struct be_adapter *adapter,
  778. bool *link_up, u8 *mac_speed, u16 *link_speed)
  779. {
  780. struct be_mcc_wrb *wrb;
  781. struct be_cmd_req_link_status *req;
  782. int status;
  783. spin_lock_bh(&adapter->mcc_lock);
  784. wrb = wrb_from_mccq(adapter);
  785. if (!wrb) {
  786. status = -EBUSY;
  787. goto err;
  788. }
  789. req = embedded_payload(wrb);
  790. *link_up = false;
  791. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  792. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
  793. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  794. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  795. status = be_mcc_notify_wait(adapter);
  796. if (!status) {
  797. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  798. if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
  799. *link_up = true;
  800. *link_speed = le16_to_cpu(resp->link_speed);
  801. *mac_speed = resp->mac_speed;
  802. }
  803. }
  804. err:
  805. spin_unlock_bh(&adapter->mcc_lock);
  806. return status;
  807. }
  808. /* Uses Mbox */
  809. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
  810. {
  811. struct be_mcc_wrb *wrb;
  812. struct be_cmd_req_get_fw_version *req;
  813. int status;
  814. spin_lock(&adapter->mbox_lock);
  815. wrb = wrb_from_mbox(adapter);
  816. req = embedded_payload(wrb);
  817. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  818. OPCODE_COMMON_GET_FW_VERSION);
  819. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  820. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  821. status = be_mbox_notify_wait(adapter);
  822. if (!status) {
  823. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  824. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  825. }
  826. spin_unlock(&adapter->mbox_lock);
  827. return status;
  828. }
  829. /* set the EQ delay interval of an EQ to specified value
  830. * Uses async mcc
  831. */
  832. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  833. {
  834. struct be_mcc_wrb *wrb;
  835. struct be_cmd_req_modify_eq_delay *req;
  836. int status = 0;
  837. spin_lock_bh(&adapter->mcc_lock);
  838. wrb = wrb_from_mccq(adapter);
  839. if (!wrb) {
  840. status = -EBUSY;
  841. goto err;
  842. }
  843. req = embedded_payload(wrb);
  844. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  845. OPCODE_COMMON_MODIFY_EQ_DELAY);
  846. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  847. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  848. req->num_eq = cpu_to_le32(1);
  849. req->delay[0].eq_id = cpu_to_le32(eq_id);
  850. req->delay[0].phase = 0;
  851. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  852. be_mcc_notify(adapter);
  853. err:
  854. spin_unlock_bh(&adapter->mcc_lock);
  855. return status;
  856. }
  857. /* Uses sycnhronous mcc */
  858. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  859. u32 num, bool untagged, bool promiscuous)
  860. {
  861. struct be_mcc_wrb *wrb;
  862. struct be_cmd_req_vlan_config *req;
  863. int status;
  864. spin_lock_bh(&adapter->mcc_lock);
  865. wrb = wrb_from_mccq(adapter);
  866. if (!wrb) {
  867. status = -EBUSY;
  868. goto err;
  869. }
  870. req = embedded_payload(wrb);
  871. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  872. OPCODE_COMMON_NTWK_VLAN_CONFIG);
  873. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  874. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  875. req->interface_id = if_id;
  876. req->promiscuous = promiscuous;
  877. req->untagged = untagged;
  878. req->num_vlan = num;
  879. if (!promiscuous) {
  880. memcpy(req->normal_vlan, vtag_array,
  881. req->num_vlan * sizeof(vtag_array[0]));
  882. }
  883. status = be_mcc_notify_wait(adapter);
  884. err:
  885. spin_unlock_bh(&adapter->mcc_lock);
  886. return status;
  887. }
  888. /* Uses MCC for this command as it may be called in BH context
  889. * Uses synchronous mcc
  890. */
  891. int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  892. {
  893. struct be_mcc_wrb *wrb;
  894. struct be_cmd_req_promiscuous_config *req;
  895. int status;
  896. spin_lock_bh(&adapter->mcc_lock);
  897. wrb = wrb_from_mccq(adapter);
  898. if (!wrb) {
  899. status = -EBUSY;
  900. goto err;
  901. }
  902. req = embedded_payload(wrb);
  903. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
  904. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  905. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  906. /* In FW versions X.102.149/X.101.487 and later,
  907. * the port setting associated only with the
  908. * issuing pci function will take effect
  909. */
  910. if (port_num)
  911. req->port1_promiscuous = en;
  912. else
  913. req->port0_promiscuous = en;
  914. status = be_mcc_notify_wait(adapter);
  915. err:
  916. spin_unlock_bh(&adapter->mcc_lock);
  917. return status;
  918. }
  919. /*
  920. * Uses MCC for this command as it may be called in BH context
  921. * (mc == NULL) => multicast promiscous
  922. */
  923. int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
  924. struct net_device *netdev, struct be_dma_mem *mem)
  925. {
  926. struct be_mcc_wrb *wrb;
  927. struct be_cmd_req_mcast_mac_config *req = mem->va;
  928. struct be_sge *sge;
  929. int status;
  930. spin_lock_bh(&adapter->mcc_lock);
  931. wrb = wrb_from_mccq(adapter);
  932. if (!wrb) {
  933. status = -EBUSY;
  934. goto err;
  935. }
  936. sge = nonembedded_sgl(wrb);
  937. memset(req, 0, sizeof(*req));
  938. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  939. OPCODE_COMMON_NTWK_MULTICAST_SET);
  940. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  941. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  942. sge->len = cpu_to_le32(mem->size);
  943. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  944. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  945. req->interface_id = if_id;
  946. if (netdev) {
  947. int i;
  948. struct netdev_hw_addr *ha;
  949. req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
  950. i = 0;
  951. netdev_for_each_mc_addr(ha, netdev)
  952. memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
  953. } else {
  954. req->promiscuous = 1;
  955. }
  956. status = be_mcc_notify_wait(adapter);
  957. err:
  958. spin_unlock_bh(&adapter->mcc_lock);
  959. return status;
  960. }
  961. /* Uses synchrounous mcc */
  962. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  963. {
  964. struct be_mcc_wrb *wrb;
  965. struct be_cmd_req_set_flow_control *req;
  966. int status;
  967. spin_lock_bh(&adapter->mcc_lock);
  968. wrb = wrb_from_mccq(adapter);
  969. if (!wrb) {
  970. status = -EBUSY;
  971. goto err;
  972. }
  973. req = embedded_payload(wrb);
  974. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  975. OPCODE_COMMON_SET_FLOW_CONTROL);
  976. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  977. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  978. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  979. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  980. status = be_mcc_notify_wait(adapter);
  981. err:
  982. spin_unlock_bh(&adapter->mcc_lock);
  983. return status;
  984. }
  985. /* Uses sycn mcc */
  986. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  987. {
  988. struct be_mcc_wrb *wrb;
  989. struct be_cmd_req_get_flow_control *req;
  990. int status;
  991. spin_lock_bh(&adapter->mcc_lock);
  992. wrb = wrb_from_mccq(adapter);
  993. if (!wrb) {
  994. status = -EBUSY;
  995. goto err;
  996. }
  997. req = embedded_payload(wrb);
  998. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  999. OPCODE_COMMON_GET_FLOW_CONTROL);
  1000. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1001. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  1002. status = be_mcc_notify_wait(adapter);
  1003. if (!status) {
  1004. struct be_cmd_resp_get_flow_control *resp =
  1005. embedded_payload(wrb);
  1006. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1007. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1008. }
  1009. err:
  1010. spin_unlock_bh(&adapter->mcc_lock);
  1011. return status;
  1012. }
  1013. /* Uses mbox */
  1014. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
  1015. {
  1016. struct be_mcc_wrb *wrb;
  1017. struct be_cmd_req_query_fw_cfg *req;
  1018. int status;
  1019. spin_lock(&adapter->mbox_lock);
  1020. wrb = wrb_from_mbox(adapter);
  1021. req = embedded_payload(wrb);
  1022. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1023. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
  1024. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1025. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  1026. status = be_mbox_notify_wait(adapter);
  1027. if (!status) {
  1028. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1029. *port_num = le32_to_cpu(resp->phys_port);
  1030. *cap = le32_to_cpu(resp->function_cap);
  1031. }
  1032. spin_unlock(&adapter->mbox_lock);
  1033. return status;
  1034. }
  1035. /* Uses mbox */
  1036. int be_cmd_reset_function(struct be_adapter *adapter)
  1037. {
  1038. struct be_mcc_wrb *wrb;
  1039. struct be_cmd_req_hdr *req;
  1040. int status;
  1041. spin_lock(&adapter->mbox_lock);
  1042. wrb = wrb_from_mbox(adapter);
  1043. req = embedded_payload(wrb);
  1044. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1045. OPCODE_COMMON_FUNCTION_RESET);
  1046. be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1047. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1048. status = be_mbox_notify_wait(adapter);
  1049. spin_unlock(&adapter->mbox_lock);
  1050. return status;
  1051. }
  1052. /* Uses sync mcc */
  1053. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1054. u8 bcn, u8 sts, u8 state)
  1055. {
  1056. struct be_mcc_wrb *wrb;
  1057. struct be_cmd_req_enable_disable_beacon *req;
  1058. int status;
  1059. spin_lock_bh(&adapter->mcc_lock);
  1060. wrb = wrb_from_mccq(adapter);
  1061. if (!wrb) {
  1062. status = -EBUSY;
  1063. goto err;
  1064. }
  1065. req = embedded_payload(wrb);
  1066. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1067. OPCODE_COMMON_ENABLE_DISABLE_BEACON);
  1068. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1069. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
  1070. req->port_num = port_num;
  1071. req->beacon_state = state;
  1072. req->beacon_duration = bcn;
  1073. req->status_duration = sts;
  1074. status = be_mcc_notify_wait(adapter);
  1075. err:
  1076. spin_unlock_bh(&adapter->mcc_lock);
  1077. return status;
  1078. }
  1079. /* Uses sync mcc */
  1080. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1081. {
  1082. struct be_mcc_wrb *wrb;
  1083. struct be_cmd_req_get_beacon_state *req;
  1084. int status;
  1085. spin_lock_bh(&adapter->mcc_lock);
  1086. wrb = wrb_from_mccq(adapter);
  1087. if (!wrb) {
  1088. status = -EBUSY;
  1089. goto err;
  1090. }
  1091. req = embedded_payload(wrb);
  1092. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1093. OPCODE_COMMON_GET_BEACON_STATE);
  1094. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1095. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
  1096. req->port_num = port_num;
  1097. status = be_mcc_notify_wait(adapter);
  1098. if (!status) {
  1099. struct be_cmd_resp_get_beacon_state *resp =
  1100. embedded_payload(wrb);
  1101. *state = resp->beacon_state;
  1102. }
  1103. err:
  1104. spin_unlock_bh(&adapter->mcc_lock);
  1105. return status;
  1106. }
  1107. /* Uses sync mcc */
  1108. int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
  1109. u8 *connector)
  1110. {
  1111. struct be_mcc_wrb *wrb;
  1112. struct be_cmd_req_port_type *req;
  1113. int status;
  1114. spin_lock_bh(&adapter->mcc_lock);
  1115. wrb = wrb_from_mccq(adapter);
  1116. if (!wrb) {
  1117. status = -EBUSY;
  1118. goto err;
  1119. }
  1120. req = embedded_payload(wrb);
  1121. be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
  1122. OPCODE_COMMON_READ_TRANSRECV_DATA);
  1123. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1124. OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
  1125. req->port = cpu_to_le32(port);
  1126. req->page_num = cpu_to_le32(TR_PAGE_A0);
  1127. status = be_mcc_notify_wait(adapter);
  1128. if (!status) {
  1129. struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
  1130. *connector = resp->data.connector;
  1131. }
  1132. err:
  1133. spin_unlock_bh(&adapter->mcc_lock);
  1134. return status;
  1135. }
  1136. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1137. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1138. {
  1139. struct be_mcc_wrb *wrb;
  1140. struct be_cmd_write_flashrom *req;
  1141. struct be_sge *sge;
  1142. int status;
  1143. spin_lock_bh(&adapter->mcc_lock);
  1144. adapter->flash_status = 0;
  1145. wrb = wrb_from_mccq(adapter);
  1146. if (!wrb) {
  1147. status = -EBUSY;
  1148. goto err_unlock;
  1149. }
  1150. req = cmd->va;
  1151. sge = nonembedded_sgl(wrb);
  1152. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1153. OPCODE_COMMON_WRITE_FLASHROM);
  1154. wrb->tag1 = CMD_SUBSYSTEM_COMMON;
  1155. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1156. OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
  1157. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1158. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1159. sge->len = cpu_to_le32(cmd->size);
  1160. req->params.op_type = cpu_to_le32(flash_type);
  1161. req->params.op_code = cpu_to_le32(flash_opcode);
  1162. req->params.data_buf_size = cpu_to_le32(buf_size);
  1163. be_mcc_notify(adapter);
  1164. spin_unlock_bh(&adapter->mcc_lock);
  1165. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1166. msecs_to_jiffies(12000)))
  1167. status = -1;
  1168. else
  1169. status = adapter->flash_status;
  1170. return status;
  1171. err_unlock:
  1172. spin_unlock_bh(&adapter->mcc_lock);
  1173. return status;
  1174. }
  1175. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  1176. int offset)
  1177. {
  1178. struct be_mcc_wrb *wrb;
  1179. struct be_cmd_write_flashrom *req;
  1180. int status;
  1181. spin_lock_bh(&adapter->mcc_lock);
  1182. wrb = wrb_from_mccq(adapter);
  1183. if (!wrb) {
  1184. status = -EBUSY;
  1185. goto err;
  1186. }
  1187. req = embedded_payload(wrb);
  1188. be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
  1189. OPCODE_COMMON_READ_FLASHROM);
  1190. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1191. OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
  1192. req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
  1193. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1194. req->params.offset = cpu_to_le32(offset);
  1195. req->params.data_buf_size = cpu_to_le32(0x4);
  1196. status = be_mcc_notify_wait(adapter);
  1197. if (!status)
  1198. memcpy(flashed_crc, req->params.data_buf, 4);
  1199. err:
  1200. spin_unlock_bh(&adapter->mcc_lock);
  1201. return status;
  1202. }
  1203. int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1204. struct be_dma_mem *nonemb_cmd)
  1205. {
  1206. struct be_mcc_wrb *wrb;
  1207. struct be_cmd_req_acpi_wol_magic_config *req;
  1208. struct be_sge *sge;
  1209. int status;
  1210. spin_lock_bh(&adapter->mcc_lock);
  1211. wrb = wrb_from_mccq(adapter);
  1212. if (!wrb) {
  1213. status = -EBUSY;
  1214. goto err;
  1215. }
  1216. req = nonemb_cmd->va;
  1217. sge = nonembedded_sgl(wrb);
  1218. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1219. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
  1220. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1221. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
  1222. memcpy(req->magic_mac, mac, ETH_ALEN);
  1223. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1224. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1225. sge->len = cpu_to_le32(nonemb_cmd->size);
  1226. status = be_mcc_notify_wait(adapter);
  1227. err:
  1228. spin_unlock_bh(&adapter->mcc_lock);
  1229. return status;
  1230. }
  1231. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1232. u8 loopback_type, u8 enable)
  1233. {
  1234. struct be_mcc_wrb *wrb;
  1235. struct be_cmd_req_set_lmode *req;
  1236. int status;
  1237. spin_lock_bh(&adapter->mcc_lock);
  1238. wrb = wrb_from_mccq(adapter);
  1239. if (!wrb) {
  1240. status = -EBUSY;
  1241. goto err;
  1242. }
  1243. req = embedded_payload(wrb);
  1244. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1245. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
  1246. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1247. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  1248. sizeof(*req));
  1249. req->src_port = port_num;
  1250. req->dest_port = port_num;
  1251. req->loopback_type = loopback_type;
  1252. req->loopback_state = enable;
  1253. status = be_mcc_notify_wait(adapter);
  1254. err:
  1255. spin_unlock_bh(&adapter->mcc_lock);
  1256. return status;
  1257. }
  1258. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1259. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1260. {
  1261. struct be_mcc_wrb *wrb;
  1262. struct be_cmd_req_loopback_test *req;
  1263. int status;
  1264. spin_lock_bh(&adapter->mcc_lock);
  1265. wrb = wrb_from_mccq(adapter);
  1266. if (!wrb) {
  1267. status = -EBUSY;
  1268. goto err;
  1269. }
  1270. req = embedded_payload(wrb);
  1271. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1272. OPCODE_LOWLEVEL_LOOPBACK_TEST);
  1273. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1274. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
  1275. req->hdr.timeout = 4;
  1276. req->pattern = cpu_to_le64(pattern);
  1277. req->src_port = cpu_to_le32(port_num);
  1278. req->dest_port = cpu_to_le32(port_num);
  1279. req->pkt_size = cpu_to_le32(pkt_size);
  1280. req->num_pkts = cpu_to_le32(num_pkts);
  1281. req->loopback_type = cpu_to_le32(loopback_type);
  1282. status = be_mcc_notify_wait(adapter);
  1283. if (!status) {
  1284. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1285. status = le32_to_cpu(resp->status);
  1286. }
  1287. err:
  1288. spin_unlock_bh(&adapter->mcc_lock);
  1289. return status;
  1290. }
  1291. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1292. u32 byte_cnt, struct be_dma_mem *cmd)
  1293. {
  1294. struct be_mcc_wrb *wrb;
  1295. struct be_cmd_req_ddrdma_test *req;
  1296. struct be_sge *sge;
  1297. int status;
  1298. int i, j = 0;
  1299. spin_lock_bh(&adapter->mcc_lock);
  1300. wrb = wrb_from_mccq(adapter);
  1301. if (!wrb) {
  1302. status = -EBUSY;
  1303. goto err;
  1304. }
  1305. req = cmd->va;
  1306. sge = nonembedded_sgl(wrb);
  1307. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1308. OPCODE_LOWLEVEL_HOST_DDR_DMA);
  1309. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1310. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
  1311. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1312. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1313. sge->len = cpu_to_le32(cmd->size);
  1314. req->pattern = cpu_to_le64(pattern);
  1315. req->byte_count = cpu_to_le32(byte_cnt);
  1316. for (i = 0; i < byte_cnt; i++) {
  1317. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1318. j++;
  1319. if (j > 7)
  1320. j = 0;
  1321. }
  1322. status = be_mcc_notify_wait(adapter);
  1323. if (!status) {
  1324. struct be_cmd_resp_ddrdma_test *resp;
  1325. resp = cmd->va;
  1326. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1327. resp->snd_err) {
  1328. status = -1;
  1329. }
  1330. }
  1331. err:
  1332. spin_unlock_bh(&adapter->mcc_lock);
  1333. return status;
  1334. }
  1335. int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  1336. struct be_dma_mem *nonemb_cmd)
  1337. {
  1338. struct be_mcc_wrb *wrb;
  1339. struct be_cmd_req_seeprom_read *req;
  1340. struct be_sge *sge;
  1341. int status;
  1342. spin_lock_bh(&adapter->mcc_lock);
  1343. wrb = wrb_from_mccq(adapter);
  1344. req = nonemb_cmd->va;
  1345. sge = nonembedded_sgl(wrb);
  1346. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1347. OPCODE_COMMON_SEEPROM_READ);
  1348. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1349. OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
  1350. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1351. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1352. sge->len = cpu_to_le32(nonemb_cmd->size);
  1353. status = be_mcc_notify_wait(adapter);
  1354. spin_unlock_bh(&adapter->mcc_lock);
  1355. return status;
  1356. }