be_cmds.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696
  1. /*
  2. * Copyright (C) 2005 - 2010 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. static void be_mcc_notify(struct be_adapter *adapter)
  20. {
  21. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  22. u32 val = 0;
  23. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  24. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  25. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  26. }
  27. /* To check if valid bit is set, check the entire word as we don't know
  28. * the endianness of the data (old entry is host endian while a new entry is
  29. * little endian) */
  30. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  31. {
  32. if (compl->flags != 0) {
  33. compl->flags = le32_to_cpu(compl->flags);
  34. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  35. return true;
  36. } else {
  37. return false;
  38. }
  39. }
  40. /* Need to reset the entire word that houses the valid bit */
  41. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  42. {
  43. compl->flags = 0;
  44. }
  45. static int be_mcc_compl_process(struct be_adapter *adapter,
  46. struct be_mcc_compl *compl)
  47. {
  48. u16 compl_status, extd_status;
  49. /* Just swap the status to host endian; mcc tag is opaquely copied
  50. * from mcc_wrb */
  51. be_dws_le_to_cpu(compl, 4);
  52. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  53. CQE_STATUS_COMPL_MASK;
  54. if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
  55. (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
  56. adapter->flash_status = compl_status;
  57. complete(&adapter->flash_compl);
  58. }
  59. if (compl_status == MCC_STATUS_SUCCESS) {
  60. if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
  61. struct be_cmd_resp_get_stats *resp =
  62. adapter->stats.cmd.va;
  63. be_dws_le_to_cpu(&resp->hw_stats,
  64. sizeof(resp->hw_stats));
  65. netdev_stats_update(adapter);
  66. }
  67. } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
  68. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  69. CQE_STATUS_EXTD_MASK;
  70. dev_warn(&adapter->pdev->dev,
  71. "Error in cmd completion - opcode %d, compl %d, extd %d\n",
  72. compl->tag0, compl_status, extd_status);
  73. }
  74. return compl_status;
  75. }
  76. /* Link state evt is a string of bytes; no need for endian swapping */
  77. static void be_async_link_state_process(struct be_adapter *adapter,
  78. struct be_async_event_link_state *evt)
  79. {
  80. be_link_status_update(adapter,
  81. evt->port_link_status == ASYNC_EVENT_LINK_UP);
  82. }
  83. static inline bool is_link_state_evt(u32 trailer)
  84. {
  85. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  86. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  87. ASYNC_EVENT_CODE_LINK_STATE);
  88. }
  89. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  90. {
  91. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  92. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  93. if (be_mcc_compl_is_new(compl)) {
  94. queue_tail_inc(mcc_cq);
  95. return compl;
  96. }
  97. return NULL;
  98. }
  99. void be_async_mcc_enable(struct be_adapter *adapter)
  100. {
  101. spin_lock_bh(&adapter->mcc_cq_lock);
  102. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  103. adapter->mcc_obj.rearm_cq = true;
  104. spin_unlock_bh(&adapter->mcc_cq_lock);
  105. }
  106. void be_async_mcc_disable(struct be_adapter *adapter)
  107. {
  108. adapter->mcc_obj.rearm_cq = false;
  109. }
  110. int be_process_mcc(struct be_adapter *adapter, int *status)
  111. {
  112. struct be_mcc_compl *compl;
  113. int num = 0;
  114. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  115. spin_lock_bh(&adapter->mcc_cq_lock);
  116. while ((compl = be_mcc_compl_get(adapter))) {
  117. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  118. /* Interpret flags as an async trailer */
  119. BUG_ON(!is_link_state_evt(compl->flags));
  120. /* Interpret compl as a async link evt */
  121. be_async_link_state_process(adapter,
  122. (struct be_async_event_link_state *) compl);
  123. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  124. *status = be_mcc_compl_process(adapter, compl);
  125. atomic_dec(&mcc_obj->q.used);
  126. }
  127. be_mcc_compl_use(compl);
  128. num++;
  129. }
  130. spin_unlock_bh(&adapter->mcc_cq_lock);
  131. return num;
  132. }
  133. /* Wait till no more pending mcc requests are present */
  134. static int be_mcc_wait_compl(struct be_adapter *adapter)
  135. {
  136. #define mcc_timeout 120000 /* 12s timeout */
  137. int i, num, status = 0;
  138. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  139. for (i = 0; i < mcc_timeout; i++) {
  140. num = be_process_mcc(adapter, &status);
  141. if (num)
  142. be_cq_notify(adapter, mcc_obj->cq.id,
  143. mcc_obj->rearm_cq, num);
  144. if (atomic_read(&mcc_obj->q.used) == 0)
  145. break;
  146. udelay(100);
  147. }
  148. if (i == mcc_timeout) {
  149. dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
  150. return -1;
  151. }
  152. return status;
  153. }
  154. /* Notify MCC requests and wait for completion */
  155. static int be_mcc_notify_wait(struct be_adapter *adapter)
  156. {
  157. be_mcc_notify(adapter);
  158. return be_mcc_wait_compl(adapter);
  159. }
  160. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  161. {
  162. int cnt = 0, wait = 5;
  163. u32 ready;
  164. do {
  165. ready = ioread32(db);
  166. if (ready == 0xffffffff) {
  167. dev_err(&adapter->pdev->dev,
  168. "pci slot disconnected\n");
  169. return -1;
  170. }
  171. ready &= MPU_MAILBOX_DB_RDY_MASK;
  172. if (ready)
  173. break;
  174. if (cnt > 4000000) {
  175. dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
  176. return -1;
  177. }
  178. if (cnt > 50)
  179. wait = 200;
  180. cnt += wait;
  181. udelay(wait);
  182. } while (true);
  183. return 0;
  184. }
  185. /*
  186. * Insert the mailbox address into the doorbell in two steps
  187. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  188. */
  189. static int be_mbox_notify_wait(struct be_adapter *adapter)
  190. {
  191. int status;
  192. u32 val = 0;
  193. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  194. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  195. struct be_mcc_mailbox *mbox = mbox_mem->va;
  196. struct be_mcc_compl *compl = &mbox->compl;
  197. /* wait for ready to be set */
  198. status = be_mbox_db_ready_wait(adapter, db);
  199. if (status != 0)
  200. return status;
  201. val |= MPU_MAILBOX_DB_HI_MASK;
  202. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  203. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  204. iowrite32(val, db);
  205. /* wait for ready to be set */
  206. status = be_mbox_db_ready_wait(adapter, db);
  207. if (status != 0)
  208. return status;
  209. val = 0;
  210. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  211. val |= (u32)(mbox_mem->dma >> 4) << 2;
  212. iowrite32(val, db);
  213. status = be_mbox_db_ready_wait(adapter, db);
  214. if (status != 0)
  215. return status;
  216. /* A cq entry has been made now */
  217. if (be_mcc_compl_is_new(compl)) {
  218. status = be_mcc_compl_process(adapter, &mbox->compl);
  219. be_mcc_compl_use(compl);
  220. if (status)
  221. return status;
  222. } else {
  223. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  224. return -1;
  225. }
  226. return 0;
  227. }
  228. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  229. {
  230. u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  231. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  232. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  233. return -1;
  234. else
  235. return 0;
  236. }
  237. int be_cmd_POST(struct be_adapter *adapter)
  238. {
  239. u16 stage;
  240. int status, timeout = 0;
  241. do {
  242. status = be_POST_stage_get(adapter, &stage);
  243. if (status) {
  244. dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
  245. stage);
  246. return -1;
  247. } else if (stage != POST_STAGE_ARMFW_RDY) {
  248. set_current_state(TASK_INTERRUPTIBLE);
  249. schedule_timeout(2 * HZ);
  250. timeout += 2;
  251. } else {
  252. return 0;
  253. }
  254. } while (timeout < 40);
  255. dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
  256. return -1;
  257. }
  258. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  259. {
  260. return wrb->payload.embedded_payload;
  261. }
  262. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  263. {
  264. return &wrb->payload.sgl[0];
  265. }
  266. /* Don't touch the hdr after it's prepared */
  267. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  268. bool embedded, u8 sge_cnt, u32 opcode)
  269. {
  270. if (embedded)
  271. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  272. else
  273. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  274. MCC_WRB_SGE_CNT_SHIFT;
  275. wrb->payload_length = payload_len;
  276. wrb->tag0 = opcode;
  277. be_dws_cpu_to_le(wrb, 8);
  278. }
  279. /* Don't touch the hdr after it's prepared */
  280. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  281. u8 subsystem, u8 opcode, int cmd_len)
  282. {
  283. req_hdr->opcode = opcode;
  284. req_hdr->subsystem = subsystem;
  285. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  286. req_hdr->version = 0;
  287. }
  288. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  289. struct be_dma_mem *mem)
  290. {
  291. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  292. u64 dma = (u64)mem->dma;
  293. for (i = 0; i < buf_pages; i++) {
  294. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  295. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  296. dma += PAGE_SIZE_4K;
  297. }
  298. }
  299. /* Converts interrupt delay in microseconds to multiplier value */
  300. static u32 eq_delay_to_mult(u32 usec_delay)
  301. {
  302. #define MAX_INTR_RATE 651042
  303. const u32 round = 10;
  304. u32 multiplier;
  305. if (usec_delay == 0)
  306. multiplier = 0;
  307. else {
  308. u32 interrupt_rate = 1000000 / usec_delay;
  309. /* Max delay, corresponding to the lowest interrupt rate */
  310. if (interrupt_rate == 0)
  311. multiplier = 1023;
  312. else {
  313. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  314. multiplier /= interrupt_rate;
  315. /* Round the multiplier to the closest value.*/
  316. multiplier = (multiplier + round/2) / round;
  317. multiplier = min(multiplier, (u32)1023);
  318. }
  319. }
  320. return multiplier;
  321. }
  322. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  323. {
  324. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  325. struct be_mcc_wrb *wrb
  326. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  327. memset(wrb, 0, sizeof(*wrb));
  328. return wrb;
  329. }
  330. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  331. {
  332. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  333. struct be_mcc_wrb *wrb;
  334. if (atomic_read(&mccq->used) >= mccq->len) {
  335. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  336. return NULL;
  337. }
  338. wrb = queue_head_node(mccq);
  339. queue_head_inc(mccq);
  340. atomic_inc(&mccq->used);
  341. memset(wrb, 0, sizeof(*wrb));
  342. return wrb;
  343. }
  344. /* Tell fw we're about to start firing cmds by writing a
  345. * special pattern across the wrb hdr; uses mbox
  346. */
  347. int be_cmd_fw_init(struct be_adapter *adapter)
  348. {
  349. u8 *wrb;
  350. int status;
  351. spin_lock(&adapter->mbox_lock);
  352. wrb = (u8 *)wrb_from_mbox(adapter);
  353. *wrb++ = 0xFF;
  354. *wrb++ = 0x12;
  355. *wrb++ = 0x34;
  356. *wrb++ = 0xFF;
  357. *wrb++ = 0xFF;
  358. *wrb++ = 0x56;
  359. *wrb++ = 0x78;
  360. *wrb = 0xFF;
  361. status = be_mbox_notify_wait(adapter);
  362. spin_unlock(&adapter->mbox_lock);
  363. return status;
  364. }
  365. /* Tell fw we're done with firing cmds by writing a
  366. * special pattern across the wrb hdr; uses mbox
  367. */
  368. int be_cmd_fw_clean(struct be_adapter *adapter)
  369. {
  370. u8 *wrb;
  371. int status;
  372. if (adapter->eeh_err)
  373. return -EIO;
  374. spin_lock(&adapter->mbox_lock);
  375. wrb = (u8 *)wrb_from_mbox(adapter);
  376. *wrb++ = 0xFF;
  377. *wrb++ = 0xAA;
  378. *wrb++ = 0xBB;
  379. *wrb++ = 0xFF;
  380. *wrb++ = 0xFF;
  381. *wrb++ = 0xCC;
  382. *wrb++ = 0xDD;
  383. *wrb = 0xFF;
  384. status = be_mbox_notify_wait(adapter);
  385. spin_unlock(&adapter->mbox_lock);
  386. return status;
  387. }
  388. int be_cmd_eq_create(struct be_adapter *adapter,
  389. struct be_queue_info *eq, int eq_delay)
  390. {
  391. struct be_mcc_wrb *wrb;
  392. struct be_cmd_req_eq_create *req;
  393. struct be_dma_mem *q_mem = &eq->dma_mem;
  394. int status;
  395. spin_lock(&adapter->mbox_lock);
  396. wrb = wrb_from_mbox(adapter);
  397. req = embedded_payload(wrb);
  398. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
  399. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  400. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  401. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  402. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  403. /* 4byte eqe*/
  404. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  405. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  406. __ilog2_u32(eq->len/256));
  407. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  408. eq_delay_to_mult(eq_delay));
  409. be_dws_cpu_to_le(req->context, sizeof(req->context));
  410. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  411. status = be_mbox_notify_wait(adapter);
  412. if (!status) {
  413. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  414. eq->id = le16_to_cpu(resp->eq_id);
  415. eq->created = true;
  416. }
  417. spin_unlock(&adapter->mbox_lock);
  418. return status;
  419. }
  420. /* Uses mbox */
  421. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  422. u8 type, bool permanent, u32 if_handle)
  423. {
  424. struct be_mcc_wrb *wrb;
  425. struct be_cmd_req_mac_query *req;
  426. int status;
  427. spin_lock(&adapter->mbox_lock);
  428. wrb = wrb_from_mbox(adapter);
  429. req = embedded_payload(wrb);
  430. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  431. OPCODE_COMMON_NTWK_MAC_QUERY);
  432. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  433. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  434. req->type = type;
  435. if (permanent) {
  436. req->permanent = 1;
  437. } else {
  438. req->if_id = cpu_to_le16((u16) if_handle);
  439. req->permanent = 0;
  440. }
  441. status = be_mbox_notify_wait(adapter);
  442. if (!status) {
  443. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  444. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  445. }
  446. spin_unlock(&adapter->mbox_lock);
  447. return status;
  448. }
  449. /* Uses synchronous MCCQ */
  450. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  451. u32 if_id, u32 *pmac_id)
  452. {
  453. struct be_mcc_wrb *wrb;
  454. struct be_cmd_req_pmac_add *req;
  455. int status;
  456. spin_lock_bh(&adapter->mcc_lock);
  457. wrb = wrb_from_mccq(adapter);
  458. if (!wrb) {
  459. status = -EBUSY;
  460. goto err;
  461. }
  462. req = embedded_payload(wrb);
  463. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  464. OPCODE_COMMON_NTWK_PMAC_ADD);
  465. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  466. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  467. req->if_id = cpu_to_le32(if_id);
  468. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  469. status = be_mcc_notify_wait(adapter);
  470. if (!status) {
  471. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  472. *pmac_id = le32_to_cpu(resp->pmac_id);
  473. }
  474. err:
  475. spin_unlock_bh(&adapter->mcc_lock);
  476. return status;
  477. }
  478. /* Uses synchronous MCCQ */
  479. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
  480. {
  481. struct be_mcc_wrb *wrb;
  482. struct be_cmd_req_pmac_del *req;
  483. int status;
  484. spin_lock_bh(&adapter->mcc_lock);
  485. wrb = wrb_from_mccq(adapter);
  486. if (!wrb) {
  487. status = -EBUSY;
  488. goto err;
  489. }
  490. req = embedded_payload(wrb);
  491. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  492. OPCODE_COMMON_NTWK_PMAC_DEL);
  493. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  494. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  495. req->if_id = cpu_to_le32(if_id);
  496. req->pmac_id = cpu_to_le32(pmac_id);
  497. status = be_mcc_notify_wait(adapter);
  498. err:
  499. spin_unlock_bh(&adapter->mcc_lock);
  500. return status;
  501. }
  502. /* Uses Mbox */
  503. int be_cmd_cq_create(struct be_adapter *adapter,
  504. struct be_queue_info *cq, struct be_queue_info *eq,
  505. bool sol_evts, bool no_delay, int coalesce_wm)
  506. {
  507. struct be_mcc_wrb *wrb;
  508. struct be_cmd_req_cq_create *req;
  509. struct be_dma_mem *q_mem = &cq->dma_mem;
  510. void *ctxt;
  511. int status;
  512. spin_lock(&adapter->mbox_lock);
  513. wrb = wrb_from_mbox(adapter);
  514. req = embedded_payload(wrb);
  515. ctxt = &req->context;
  516. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  517. OPCODE_COMMON_CQ_CREATE);
  518. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  519. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  520. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  521. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  522. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  523. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  524. __ilog2_u32(cq->len/256));
  525. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  526. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  527. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  528. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  529. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  530. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  531. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  532. status = be_mbox_notify_wait(adapter);
  533. if (!status) {
  534. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  535. cq->id = le16_to_cpu(resp->cq_id);
  536. cq->created = true;
  537. }
  538. spin_unlock(&adapter->mbox_lock);
  539. return status;
  540. }
  541. static u32 be_encoded_q_len(int q_len)
  542. {
  543. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  544. if (len_encoded == 16)
  545. len_encoded = 0;
  546. return len_encoded;
  547. }
  548. int be_cmd_mccq_create(struct be_adapter *adapter,
  549. struct be_queue_info *mccq,
  550. struct be_queue_info *cq)
  551. {
  552. struct be_mcc_wrb *wrb;
  553. struct be_cmd_req_mcc_create *req;
  554. struct be_dma_mem *q_mem = &mccq->dma_mem;
  555. void *ctxt;
  556. int status;
  557. spin_lock(&adapter->mbox_lock);
  558. wrb = wrb_from_mbox(adapter);
  559. req = embedded_payload(wrb);
  560. ctxt = &req->context;
  561. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  562. OPCODE_COMMON_MCC_CREATE);
  563. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  564. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  565. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  566. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  567. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  568. be_encoded_q_len(mccq->len));
  569. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  570. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  571. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  572. status = be_mbox_notify_wait(adapter);
  573. if (!status) {
  574. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  575. mccq->id = le16_to_cpu(resp->id);
  576. mccq->created = true;
  577. }
  578. spin_unlock(&adapter->mbox_lock);
  579. return status;
  580. }
  581. int be_cmd_txq_create(struct be_adapter *adapter,
  582. struct be_queue_info *txq,
  583. struct be_queue_info *cq)
  584. {
  585. struct be_mcc_wrb *wrb;
  586. struct be_cmd_req_eth_tx_create *req;
  587. struct be_dma_mem *q_mem = &txq->dma_mem;
  588. void *ctxt;
  589. int status;
  590. spin_lock(&adapter->mbox_lock);
  591. wrb = wrb_from_mbox(adapter);
  592. req = embedded_payload(wrb);
  593. ctxt = &req->context;
  594. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  595. OPCODE_ETH_TX_CREATE);
  596. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  597. sizeof(*req));
  598. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  599. req->ulp_num = BE_ULP1_NUM;
  600. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  601. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  602. be_encoded_q_len(txq->len));
  603. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  604. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  605. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  606. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  607. status = be_mbox_notify_wait(adapter);
  608. if (!status) {
  609. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  610. txq->id = le16_to_cpu(resp->cid);
  611. txq->created = true;
  612. }
  613. spin_unlock(&adapter->mbox_lock);
  614. return status;
  615. }
  616. /* Uses mbox */
  617. int be_cmd_rxq_create(struct be_adapter *adapter,
  618. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  619. u16 max_frame_size, u32 if_id, u32 rss)
  620. {
  621. struct be_mcc_wrb *wrb;
  622. struct be_cmd_req_eth_rx_create *req;
  623. struct be_dma_mem *q_mem = &rxq->dma_mem;
  624. int status;
  625. spin_lock(&adapter->mbox_lock);
  626. wrb = wrb_from_mbox(adapter);
  627. req = embedded_payload(wrb);
  628. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  629. OPCODE_ETH_RX_CREATE);
  630. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  631. sizeof(*req));
  632. req->cq_id = cpu_to_le16(cq_id);
  633. req->frag_size = fls(frag_size) - 1;
  634. req->num_pages = 2;
  635. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  636. req->interface_id = cpu_to_le32(if_id);
  637. req->max_frame_size = cpu_to_le16(max_frame_size);
  638. req->rss_queue = cpu_to_le32(rss);
  639. status = be_mbox_notify_wait(adapter);
  640. if (!status) {
  641. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  642. rxq->id = le16_to_cpu(resp->id);
  643. rxq->created = true;
  644. }
  645. spin_unlock(&adapter->mbox_lock);
  646. return status;
  647. }
  648. /* Generic destroyer function for all types of queues
  649. * Uses Mbox
  650. */
  651. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  652. int queue_type)
  653. {
  654. struct be_mcc_wrb *wrb;
  655. struct be_cmd_req_q_destroy *req;
  656. u8 subsys = 0, opcode = 0;
  657. int status;
  658. if (adapter->eeh_err)
  659. return -EIO;
  660. spin_lock(&adapter->mbox_lock);
  661. wrb = wrb_from_mbox(adapter);
  662. req = embedded_payload(wrb);
  663. switch (queue_type) {
  664. case QTYPE_EQ:
  665. subsys = CMD_SUBSYSTEM_COMMON;
  666. opcode = OPCODE_COMMON_EQ_DESTROY;
  667. break;
  668. case QTYPE_CQ:
  669. subsys = CMD_SUBSYSTEM_COMMON;
  670. opcode = OPCODE_COMMON_CQ_DESTROY;
  671. break;
  672. case QTYPE_TXQ:
  673. subsys = CMD_SUBSYSTEM_ETH;
  674. opcode = OPCODE_ETH_TX_DESTROY;
  675. break;
  676. case QTYPE_RXQ:
  677. subsys = CMD_SUBSYSTEM_ETH;
  678. opcode = OPCODE_ETH_RX_DESTROY;
  679. break;
  680. case QTYPE_MCCQ:
  681. subsys = CMD_SUBSYSTEM_COMMON;
  682. opcode = OPCODE_COMMON_MCC_DESTROY;
  683. break;
  684. default:
  685. BUG();
  686. }
  687. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
  688. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  689. req->id = cpu_to_le16(q->id);
  690. status = be_mbox_notify_wait(adapter);
  691. spin_unlock(&adapter->mbox_lock);
  692. return status;
  693. }
  694. /* Create an rx filtering policy configuration on an i/f
  695. * Uses mbox
  696. */
  697. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  698. u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
  699. u32 domain)
  700. {
  701. struct be_mcc_wrb *wrb;
  702. struct be_cmd_req_if_create *req;
  703. int status;
  704. spin_lock(&adapter->mbox_lock);
  705. wrb = wrb_from_mbox(adapter);
  706. req = embedded_payload(wrb);
  707. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  708. OPCODE_COMMON_NTWK_INTERFACE_CREATE);
  709. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  710. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  711. req->hdr.domain = domain;
  712. req->capability_flags = cpu_to_le32(cap_flags);
  713. req->enable_flags = cpu_to_le32(en_flags);
  714. req->pmac_invalid = pmac_invalid;
  715. if (!pmac_invalid)
  716. memcpy(req->mac_addr, mac, ETH_ALEN);
  717. status = be_mbox_notify_wait(adapter);
  718. if (!status) {
  719. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  720. *if_handle = le32_to_cpu(resp->interface_id);
  721. if (!pmac_invalid)
  722. *pmac_id = le32_to_cpu(resp->pmac_id);
  723. }
  724. spin_unlock(&adapter->mbox_lock);
  725. return status;
  726. }
  727. /* Uses mbox */
  728. int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
  729. {
  730. struct be_mcc_wrb *wrb;
  731. struct be_cmd_req_if_destroy *req;
  732. int status;
  733. if (adapter->eeh_err)
  734. return -EIO;
  735. spin_lock(&adapter->mbox_lock);
  736. wrb = wrb_from_mbox(adapter);
  737. req = embedded_payload(wrb);
  738. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  739. OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
  740. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  741. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  742. req->interface_id = cpu_to_le32(interface_id);
  743. status = be_mbox_notify_wait(adapter);
  744. spin_unlock(&adapter->mbox_lock);
  745. return status;
  746. }
  747. /* Get stats is a non embedded command: the request is not embedded inside
  748. * WRB but is a separate dma memory block
  749. * Uses asynchronous MCC
  750. */
  751. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  752. {
  753. struct be_mcc_wrb *wrb;
  754. struct be_cmd_req_get_stats *req;
  755. struct be_sge *sge;
  756. int status = 0;
  757. spin_lock_bh(&adapter->mcc_lock);
  758. wrb = wrb_from_mccq(adapter);
  759. if (!wrb) {
  760. status = -EBUSY;
  761. goto err;
  762. }
  763. req = nonemb_cmd->va;
  764. sge = nonembedded_sgl(wrb);
  765. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  766. OPCODE_ETH_GET_STATISTICS);
  767. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  768. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  769. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  770. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  771. sge->len = cpu_to_le32(nonemb_cmd->size);
  772. be_mcc_notify(adapter);
  773. err:
  774. spin_unlock_bh(&adapter->mcc_lock);
  775. return status;
  776. }
  777. /* Uses synchronous mcc */
  778. int be_cmd_link_status_query(struct be_adapter *adapter,
  779. bool *link_up, u8 *mac_speed, u16 *link_speed)
  780. {
  781. struct be_mcc_wrb *wrb;
  782. struct be_cmd_req_link_status *req;
  783. int status;
  784. spin_lock_bh(&adapter->mcc_lock);
  785. wrb = wrb_from_mccq(adapter);
  786. if (!wrb) {
  787. status = -EBUSY;
  788. goto err;
  789. }
  790. req = embedded_payload(wrb);
  791. *link_up = false;
  792. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  793. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
  794. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  795. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  796. status = be_mcc_notify_wait(adapter);
  797. if (!status) {
  798. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  799. if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
  800. *link_up = true;
  801. *link_speed = le16_to_cpu(resp->link_speed);
  802. *mac_speed = resp->mac_speed;
  803. }
  804. }
  805. err:
  806. spin_unlock_bh(&adapter->mcc_lock);
  807. return status;
  808. }
  809. /* Uses Mbox */
  810. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
  811. {
  812. struct be_mcc_wrb *wrb;
  813. struct be_cmd_req_get_fw_version *req;
  814. int status;
  815. spin_lock(&adapter->mbox_lock);
  816. wrb = wrb_from_mbox(adapter);
  817. req = embedded_payload(wrb);
  818. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  819. OPCODE_COMMON_GET_FW_VERSION);
  820. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  821. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  822. status = be_mbox_notify_wait(adapter);
  823. if (!status) {
  824. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  825. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  826. }
  827. spin_unlock(&adapter->mbox_lock);
  828. return status;
  829. }
  830. /* set the EQ delay interval of an EQ to specified value
  831. * Uses async mcc
  832. */
  833. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  834. {
  835. struct be_mcc_wrb *wrb;
  836. struct be_cmd_req_modify_eq_delay *req;
  837. int status = 0;
  838. spin_lock_bh(&adapter->mcc_lock);
  839. wrb = wrb_from_mccq(adapter);
  840. if (!wrb) {
  841. status = -EBUSY;
  842. goto err;
  843. }
  844. req = embedded_payload(wrb);
  845. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  846. OPCODE_COMMON_MODIFY_EQ_DELAY);
  847. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  848. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  849. req->num_eq = cpu_to_le32(1);
  850. req->delay[0].eq_id = cpu_to_le32(eq_id);
  851. req->delay[0].phase = 0;
  852. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  853. be_mcc_notify(adapter);
  854. err:
  855. spin_unlock_bh(&adapter->mcc_lock);
  856. return status;
  857. }
  858. /* Uses sycnhronous mcc */
  859. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  860. u32 num, bool untagged, bool promiscuous)
  861. {
  862. struct be_mcc_wrb *wrb;
  863. struct be_cmd_req_vlan_config *req;
  864. int status;
  865. spin_lock_bh(&adapter->mcc_lock);
  866. wrb = wrb_from_mccq(adapter);
  867. if (!wrb) {
  868. status = -EBUSY;
  869. goto err;
  870. }
  871. req = embedded_payload(wrb);
  872. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  873. OPCODE_COMMON_NTWK_VLAN_CONFIG);
  874. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  875. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  876. req->interface_id = if_id;
  877. req->promiscuous = promiscuous;
  878. req->untagged = untagged;
  879. req->num_vlan = num;
  880. if (!promiscuous) {
  881. memcpy(req->normal_vlan, vtag_array,
  882. req->num_vlan * sizeof(vtag_array[0]));
  883. }
  884. status = be_mcc_notify_wait(adapter);
  885. err:
  886. spin_unlock_bh(&adapter->mcc_lock);
  887. return status;
  888. }
  889. /* Uses MCC for this command as it may be called in BH context
  890. * Uses synchronous mcc
  891. */
  892. int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  893. {
  894. struct be_mcc_wrb *wrb;
  895. struct be_cmd_req_promiscuous_config *req;
  896. int status;
  897. spin_lock_bh(&adapter->mcc_lock);
  898. wrb = wrb_from_mccq(adapter);
  899. if (!wrb) {
  900. status = -EBUSY;
  901. goto err;
  902. }
  903. req = embedded_payload(wrb);
  904. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
  905. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  906. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  907. /* In FW versions X.102.149/X.101.487 and later,
  908. * the port setting associated only with the
  909. * issuing pci function will take effect
  910. */
  911. if (port_num)
  912. req->port1_promiscuous = en;
  913. else
  914. req->port0_promiscuous = en;
  915. status = be_mcc_notify_wait(adapter);
  916. err:
  917. spin_unlock_bh(&adapter->mcc_lock);
  918. return status;
  919. }
  920. /*
  921. * Uses MCC for this command as it may be called in BH context
  922. * (mc == NULL) => multicast promiscous
  923. */
  924. int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
  925. struct net_device *netdev, struct be_dma_mem *mem)
  926. {
  927. struct be_mcc_wrb *wrb;
  928. struct be_cmd_req_mcast_mac_config *req = mem->va;
  929. struct be_sge *sge;
  930. int status;
  931. spin_lock_bh(&adapter->mcc_lock);
  932. wrb = wrb_from_mccq(adapter);
  933. if (!wrb) {
  934. status = -EBUSY;
  935. goto err;
  936. }
  937. sge = nonembedded_sgl(wrb);
  938. memset(req, 0, sizeof(*req));
  939. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  940. OPCODE_COMMON_NTWK_MULTICAST_SET);
  941. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  942. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  943. sge->len = cpu_to_le32(mem->size);
  944. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  945. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  946. req->interface_id = if_id;
  947. if (netdev) {
  948. int i;
  949. struct netdev_hw_addr *ha;
  950. req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
  951. i = 0;
  952. netdev_for_each_mc_addr(ha, netdev)
  953. memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
  954. } else {
  955. req->promiscuous = 1;
  956. }
  957. status = be_mcc_notify_wait(adapter);
  958. err:
  959. spin_unlock_bh(&adapter->mcc_lock);
  960. return status;
  961. }
  962. /* Uses synchrounous mcc */
  963. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  964. {
  965. struct be_mcc_wrb *wrb;
  966. struct be_cmd_req_set_flow_control *req;
  967. int status;
  968. spin_lock_bh(&adapter->mcc_lock);
  969. wrb = wrb_from_mccq(adapter);
  970. if (!wrb) {
  971. status = -EBUSY;
  972. goto err;
  973. }
  974. req = embedded_payload(wrb);
  975. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  976. OPCODE_COMMON_SET_FLOW_CONTROL);
  977. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  978. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  979. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  980. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  981. status = be_mcc_notify_wait(adapter);
  982. err:
  983. spin_unlock_bh(&adapter->mcc_lock);
  984. return status;
  985. }
  986. /* Uses sycn mcc */
  987. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  988. {
  989. struct be_mcc_wrb *wrb;
  990. struct be_cmd_req_get_flow_control *req;
  991. int status;
  992. spin_lock_bh(&adapter->mcc_lock);
  993. wrb = wrb_from_mccq(adapter);
  994. if (!wrb) {
  995. status = -EBUSY;
  996. goto err;
  997. }
  998. req = embedded_payload(wrb);
  999. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1000. OPCODE_COMMON_GET_FLOW_CONTROL);
  1001. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1002. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  1003. status = be_mcc_notify_wait(adapter);
  1004. if (!status) {
  1005. struct be_cmd_resp_get_flow_control *resp =
  1006. embedded_payload(wrb);
  1007. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1008. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1009. }
  1010. err:
  1011. spin_unlock_bh(&adapter->mcc_lock);
  1012. return status;
  1013. }
  1014. /* Uses mbox */
  1015. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
  1016. {
  1017. struct be_mcc_wrb *wrb;
  1018. struct be_cmd_req_query_fw_cfg *req;
  1019. int status;
  1020. spin_lock(&adapter->mbox_lock);
  1021. wrb = wrb_from_mbox(adapter);
  1022. req = embedded_payload(wrb);
  1023. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1024. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
  1025. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1026. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  1027. status = be_mbox_notify_wait(adapter);
  1028. if (!status) {
  1029. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1030. *port_num = le32_to_cpu(resp->phys_port);
  1031. *cap = le32_to_cpu(resp->function_cap);
  1032. }
  1033. spin_unlock(&adapter->mbox_lock);
  1034. return status;
  1035. }
  1036. /* Uses mbox */
  1037. int be_cmd_reset_function(struct be_adapter *adapter)
  1038. {
  1039. struct be_mcc_wrb *wrb;
  1040. struct be_cmd_req_hdr *req;
  1041. int status;
  1042. spin_lock(&adapter->mbox_lock);
  1043. wrb = wrb_from_mbox(adapter);
  1044. req = embedded_payload(wrb);
  1045. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1046. OPCODE_COMMON_FUNCTION_RESET);
  1047. be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1048. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1049. status = be_mbox_notify_wait(adapter);
  1050. spin_unlock(&adapter->mbox_lock);
  1051. return status;
  1052. }
  1053. /* Uses sync mcc */
  1054. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1055. u8 bcn, u8 sts, u8 state)
  1056. {
  1057. struct be_mcc_wrb *wrb;
  1058. struct be_cmd_req_enable_disable_beacon *req;
  1059. int status;
  1060. spin_lock_bh(&adapter->mcc_lock);
  1061. wrb = wrb_from_mccq(adapter);
  1062. if (!wrb) {
  1063. status = -EBUSY;
  1064. goto err;
  1065. }
  1066. req = embedded_payload(wrb);
  1067. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1068. OPCODE_COMMON_ENABLE_DISABLE_BEACON);
  1069. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1070. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
  1071. req->port_num = port_num;
  1072. req->beacon_state = state;
  1073. req->beacon_duration = bcn;
  1074. req->status_duration = sts;
  1075. status = be_mcc_notify_wait(adapter);
  1076. err:
  1077. spin_unlock_bh(&adapter->mcc_lock);
  1078. return status;
  1079. }
  1080. /* Uses sync mcc */
  1081. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1082. {
  1083. struct be_mcc_wrb *wrb;
  1084. struct be_cmd_req_get_beacon_state *req;
  1085. int status;
  1086. spin_lock_bh(&adapter->mcc_lock);
  1087. wrb = wrb_from_mccq(adapter);
  1088. if (!wrb) {
  1089. status = -EBUSY;
  1090. goto err;
  1091. }
  1092. req = embedded_payload(wrb);
  1093. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1094. OPCODE_COMMON_GET_BEACON_STATE);
  1095. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1096. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
  1097. req->port_num = port_num;
  1098. status = be_mcc_notify_wait(adapter);
  1099. if (!status) {
  1100. struct be_cmd_resp_get_beacon_state *resp =
  1101. embedded_payload(wrb);
  1102. *state = resp->beacon_state;
  1103. }
  1104. err:
  1105. spin_unlock_bh(&adapter->mcc_lock);
  1106. return status;
  1107. }
  1108. /* Uses sync mcc */
  1109. int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
  1110. u8 *connector)
  1111. {
  1112. struct be_mcc_wrb *wrb;
  1113. struct be_cmd_req_port_type *req;
  1114. int status;
  1115. spin_lock_bh(&adapter->mcc_lock);
  1116. wrb = wrb_from_mccq(adapter);
  1117. if (!wrb) {
  1118. status = -EBUSY;
  1119. goto err;
  1120. }
  1121. req = embedded_payload(wrb);
  1122. be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
  1123. OPCODE_COMMON_READ_TRANSRECV_DATA);
  1124. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1125. OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
  1126. req->port = cpu_to_le32(port);
  1127. req->page_num = cpu_to_le32(TR_PAGE_A0);
  1128. status = be_mcc_notify_wait(adapter);
  1129. if (!status) {
  1130. struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
  1131. *connector = resp->data.connector;
  1132. }
  1133. err:
  1134. spin_unlock_bh(&adapter->mcc_lock);
  1135. return status;
  1136. }
  1137. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1138. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1139. {
  1140. struct be_mcc_wrb *wrb;
  1141. struct be_cmd_write_flashrom *req;
  1142. struct be_sge *sge;
  1143. int status;
  1144. spin_lock_bh(&adapter->mcc_lock);
  1145. adapter->flash_status = 0;
  1146. wrb = wrb_from_mccq(adapter);
  1147. if (!wrb) {
  1148. status = -EBUSY;
  1149. goto err_unlock;
  1150. }
  1151. req = cmd->va;
  1152. sge = nonembedded_sgl(wrb);
  1153. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1154. OPCODE_COMMON_WRITE_FLASHROM);
  1155. wrb->tag1 = CMD_SUBSYSTEM_COMMON;
  1156. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1157. OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
  1158. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1159. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1160. sge->len = cpu_to_le32(cmd->size);
  1161. req->params.op_type = cpu_to_le32(flash_type);
  1162. req->params.op_code = cpu_to_le32(flash_opcode);
  1163. req->params.data_buf_size = cpu_to_le32(buf_size);
  1164. be_mcc_notify(adapter);
  1165. spin_unlock_bh(&adapter->mcc_lock);
  1166. if (!wait_for_completion_timeout(&adapter->flash_compl,
  1167. msecs_to_jiffies(12000)))
  1168. status = -1;
  1169. else
  1170. status = adapter->flash_status;
  1171. return status;
  1172. err_unlock:
  1173. spin_unlock_bh(&adapter->mcc_lock);
  1174. return status;
  1175. }
  1176. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  1177. int offset)
  1178. {
  1179. struct be_mcc_wrb *wrb;
  1180. struct be_cmd_write_flashrom *req;
  1181. int status;
  1182. spin_lock_bh(&adapter->mcc_lock);
  1183. wrb = wrb_from_mccq(adapter);
  1184. if (!wrb) {
  1185. status = -EBUSY;
  1186. goto err;
  1187. }
  1188. req = embedded_payload(wrb);
  1189. be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
  1190. OPCODE_COMMON_READ_FLASHROM);
  1191. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1192. OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
  1193. req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
  1194. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1195. req->params.offset = cpu_to_le32(offset);
  1196. req->params.data_buf_size = cpu_to_le32(0x4);
  1197. status = be_mcc_notify_wait(adapter);
  1198. if (!status)
  1199. memcpy(flashed_crc, req->params.data_buf, 4);
  1200. err:
  1201. spin_unlock_bh(&adapter->mcc_lock);
  1202. return status;
  1203. }
  1204. int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1205. struct be_dma_mem *nonemb_cmd)
  1206. {
  1207. struct be_mcc_wrb *wrb;
  1208. struct be_cmd_req_acpi_wol_magic_config *req;
  1209. struct be_sge *sge;
  1210. int status;
  1211. spin_lock_bh(&adapter->mcc_lock);
  1212. wrb = wrb_from_mccq(adapter);
  1213. if (!wrb) {
  1214. status = -EBUSY;
  1215. goto err;
  1216. }
  1217. req = nonemb_cmd->va;
  1218. sge = nonembedded_sgl(wrb);
  1219. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1220. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
  1221. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1222. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
  1223. memcpy(req->magic_mac, mac, ETH_ALEN);
  1224. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1225. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1226. sge->len = cpu_to_le32(nonemb_cmd->size);
  1227. status = be_mcc_notify_wait(adapter);
  1228. err:
  1229. spin_unlock_bh(&adapter->mcc_lock);
  1230. return status;
  1231. }
  1232. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1233. u8 loopback_type, u8 enable)
  1234. {
  1235. struct be_mcc_wrb *wrb;
  1236. struct be_cmd_req_set_lmode *req;
  1237. int status;
  1238. spin_lock_bh(&adapter->mcc_lock);
  1239. wrb = wrb_from_mccq(adapter);
  1240. if (!wrb) {
  1241. status = -EBUSY;
  1242. goto err;
  1243. }
  1244. req = embedded_payload(wrb);
  1245. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1246. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
  1247. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1248. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  1249. sizeof(*req));
  1250. req->src_port = port_num;
  1251. req->dest_port = port_num;
  1252. req->loopback_type = loopback_type;
  1253. req->loopback_state = enable;
  1254. status = be_mcc_notify_wait(adapter);
  1255. err:
  1256. spin_unlock_bh(&adapter->mcc_lock);
  1257. return status;
  1258. }
  1259. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1260. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1261. {
  1262. struct be_mcc_wrb *wrb;
  1263. struct be_cmd_req_loopback_test *req;
  1264. int status;
  1265. spin_lock_bh(&adapter->mcc_lock);
  1266. wrb = wrb_from_mccq(adapter);
  1267. if (!wrb) {
  1268. status = -EBUSY;
  1269. goto err;
  1270. }
  1271. req = embedded_payload(wrb);
  1272. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1273. OPCODE_LOWLEVEL_LOOPBACK_TEST);
  1274. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1275. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
  1276. req->hdr.timeout = cpu_to_le32(4);
  1277. req->pattern = cpu_to_le64(pattern);
  1278. req->src_port = cpu_to_le32(port_num);
  1279. req->dest_port = cpu_to_le32(port_num);
  1280. req->pkt_size = cpu_to_le32(pkt_size);
  1281. req->num_pkts = cpu_to_le32(num_pkts);
  1282. req->loopback_type = cpu_to_le32(loopback_type);
  1283. status = be_mcc_notify_wait(adapter);
  1284. if (!status) {
  1285. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1286. status = le32_to_cpu(resp->status);
  1287. }
  1288. err:
  1289. spin_unlock_bh(&adapter->mcc_lock);
  1290. return status;
  1291. }
  1292. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1293. u32 byte_cnt, struct be_dma_mem *cmd)
  1294. {
  1295. struct be_mcc_wrb *wrb;
  1296. struct be_cmd_req_ddrdma_test *req;
  1297. struct be_sge *sge;
  1298. int status;
  1299. int i, j = 0;
  1300. spin_lock_bh(&adapter->mcc_lock);
  1301. wrb = wrb_from_mccq(adapter);
  1302. if (!wrb) {
  1303. status = -EBUSY;
  1304. goto err;
  1305. }
  1306. req = cmd->va;
  1307. sge = nonembedded_sgl(wrb);
  1308. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1309. OPCODE_LOWLEVEL_HOST_DDR_DMA);
  1310. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1311. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
  1312. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1313. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1314. sge->len = cpu_to_le32(cmd->size);
  1315. req->pattern = cpu_to_le64(pattern);
  1316. req->byte_count = cpu_to_le32(byte_cnt);
  1317. for (i = 0; i < byte_cnt; i++) {
  1318. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1319. j++;
  1320. if (j > 7)
  1321. j = 0;
  1322. }
  1323. status = be_mcc_notify_wait(adapter);
  1324. if (!status) {
  1325. struct be_cmd_resp_ddrdma_test *resp;
  1326. resp = cmd->va;
  1327. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1328. resp->snd_err) {
  1329. status = -1;
  1330. }
  1331. }
  1332. err:
  1333. spin_unlock_bh(&adapter->mcc_lock);
  1334. return status;
  1335. }
  1336. int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  1337. struct be_dma_mem *nonemb_cmd)
  1338. {
  1339. struct be_mcc_wrb *wrb;
  1340. struct be_cmd_req_seeprom_read *req;
  1341. struct be_sge *sge;
  1342. int status;
  1343. spin_lock_bh(&adapter->mcc_lock);
  1344. wrb = wrb_from_mccq(adapter);
  1345. req = nonemb_cmd->va;
  1346. sge = nonembedded_sgl(wrb);
  1347. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1348. OPCODE_COMMON_SEEPROM_READ);
  1349. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1350. OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
  1351. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1352. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1353. sge->len = cpu_to_le32(nonemb_cmd->size);
  1354. status = be_mcc_notify_wait(adapter);
  1355. spin_unlock_bh(&adapter->mcc_lock);
  1356. return status;
  1357. }