be_cmds.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609
  1. /*
  2. * Copyright (C) 2005 - 2009 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. static void be_mcc_notify(struct be_adapter *adapter)
  20. {
  21. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  22. u32 val = 0;
  23. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  24. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  25. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  26. }
  27. /* To check if valid bit is set, check the entire word as we don't know
  28. * the endianness of the data (old entry is host endian while a new entry is
  29. * little endian) */
  30. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  31. {
  32. if (compl->flags != 0) {
  33. compl->flags = le32_to_cpu(compl->flags);
  34. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  35. return true;
  36. } else {
  37. return false;
  38. }
  39. }
  40. /* Need to reset the entire word that houses the valid bit */
  41. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  42. {
  43. compl->flags = 0;
  44. }
  45. static int be_mcc_compl_process(struct be_adapter *adapter,
  46. struct be_mcc_compl *compl)
  47. {
  48. u16 compl_status, extd_status;
  49. /* Just swap the status to host endian; mcc tag is opaquely copied
  50. * from mcc_wrb */
  51. be_dws_le_to_cpu(compl, 4);
  52. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  53. CQE_STATUS_COMPL_MASK;
  54. if (compl_status == MCC_STATUS_SUCCESS) {
  55. if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
  56. struct be_cmd_resp_get_stats *resp =
  57. adapter->stats.cmd.va;
  58. be_dws_le_to_cpu(&resp->hw_stats,
  59. sizeof(resp->hw_stats));
  60. netdev_stats_update(adapter);
  61. }
  62. } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
  63. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  64. CQE_STATUS_EXTD_MASK;
  65. dev_warn(&adapter->pdev->dev,
  66. "Error in cmd completion - opcode %d, compl %d, extd %d\n",
  67. compl->tag0, compl_status, extd_status);
  68. }
  69. return compl_status;
  70. }
  71. /* Link state evt is a string of bytes; no need for endian swapping */
  72. static void be_async_link_state_process(struct be_adapter *adapter,
  73. struct be_async_event_link_state *evt)
  74. {
  75. be_link_status_update(adapter,
  76. evt->port_link_status == ASYNC_EVENT_LINK_UP);
  77. }
  78. static inline bool is_link_state_evt(u32 trailer)
  79. {
  80. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  81. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  82. ASYNC_EVENT_CODE_LINK_STATE);
  83. }
  84. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  85. {
  86. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  87. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  88. if (be_mcc_compl_is_new(compl)) {
  89. queue_tail_inc(mcc_cq);
  90. return compl;
  91. }
  92. return NULL;
  93. }
  94. int be_process_mcc(struct be_adapter *adapter)
  95. {
  96. struct be_mcc_compl *compl;
  97. int num = 0, status = 0;
  98. spin_lock_bh(&adapter->mcc_cq_lock);
  99. while ((compl = be_mcc_compl_get(adapter))) {
  100. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  101. /* Interpret flags as an async trailer */
  102. BUG_ON(!is_link_state_evt(compl->flags));
  103. /* Interpret compl as a async link evt */
  104. be_async_link_state_process(adapter,
  105. (struct be_async_event_link_state *) compl);
  106. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  107. status = be_mcc_compl_process(adapter, compl);
  108. atomic_dec(&adapter->mcc_obj.q.used);
  109. }
  110. be_mcc_compl_use(compl);
  111. num++;
  112. }
  113. if (num)
  114. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
  115. spin_unlock_bh(&adapter->mcc_cq_lock);
  116. return status;
  117. }
  118. /* Wait till no more pending mcc requests are present */
  119. static int be_mcc_wait_compl(struct be_adapter *adapter)
  120. {
  121. #define mcc_timeout 120000 /* 12s timeout */
  122. int i, status;
  123. for (i = 0; i < mcc_timeout; i++) {
  124. status = be_process_mcc(adapter);
  125. if (status)
  126. return status;
  127. if (atomic_read(&adapter->mcc_obj.q.used) == 0)
  128. break;
  129. udelay(100);
  130. }
  131. if (i == mcc_timeout) {
  132. dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
  133. return -1;
  134. }
  135. return 0;
  136. }
  137. /* Notify MCC requests and wait for completion */
  138. static int be_mcc_notify_wait(struct be_adapter *adapter)
  139. {
  140. be_mcc_notify(adapter);
  141. return be_mcc_wait_compl(adapter);
  142. }
  143. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  144. {
  145. int cnt = 0, wait = 5;
  146. u32 ready;
  147. do {
  148. ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
  149. if (ready)
  150. break;
  151. if (cnt > 4000000) {
  152. dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
  153. return -1;
  154. }
  155. if (cnt > 50)
  156. wait = 200;
  157. cnt += wait;
  158. udelay(wait);
  159. } while (true);
  160. return 0;
  161. }
  162. /*
  163. * Insert the mailbox address into the doorbell in two steps
  164. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  165. */
  166. static int be_mbox_notify_wait(struct be_adapter *adapter)
  167. {
  168. int status;
  169. u32 val = 0;
  170. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  171. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  172. struct be_mcc_mailbox *mbox = mbox_mem->va;
  173. struct be_mcc_compl *compl = &mbox->compl;
  174. val |= MPU_MAILBOX_DB_HI_MASK;
  175. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  176. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  177. iowrite32(val, db);
  178. /* wait for ready to be set */
  179. status = be_mbox_db_ready_wait(adapter, db);
  180. if (status != 0)
  181. return status;
  182. val = 0;
  183. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  184. val |= (u32)(mbox_mem->dma >> 4) << 2;
  185. iowrite32(val, db);
  186. status = be_mbox_db_ready_wait(adapter, db);
  187. if (status != 0)
  188. return status;
  189. /* A cq entry has been made now */
  190. if (be_mcc_compl_is_new(compl)) {
  191. status = be_mcc_compl_process(adapter, &mbox->compl);
  192. be_mcc_compl_use(compl);
  193. if (status)
  194. return status;
  195. } else {
  196. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  197. return -1;
  198. }
  199. return 0;
  200. }
  201. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  202. {
  203. u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  204. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  205. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  206. return -1;
  207. else
  208. return 0;
  209. }
  210. int be_cmd_POST(struct be_adapter *adapter)
  211. {
  212. u16 stage;
  213. int status, timeout = 0;
  214. do {
  215. status = be_POST_stage_get(adapter, &stage);
  216. if (status) {
  217. dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
  218. stage);
  219. return -1;
  220. } else if (stage != POST_STAGE_ARMFW_RDY) {
  221. set_current_state(TASK_INTERRUPTIBLE);
  222. schedule_timeout(2 * HZ);
  223. timeout += 2;
  224. } else {
  225. return 0;
  226. }
  227. } while (timeout < 20);
  228. dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
  229. return -1;
  230. }
  231. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  232. {
  233. return wrb->payload.embedded_payload;
  234. }
  235. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  236. {
  237. return &wrb->payload.sgl[0];
  238. }
  239. /* Don't touch the hdr after it's prepared */
  240. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  241. bool embedded, u8 sge_cnt, u32 opcode)
  242. {
  243. if (embedded)
  244. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  245. else
  246. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  247. MCC_WRB_SGE_CNT_SHIFT;
  248. wrb->payload_length = payload_len;
  249. wrb->tag0 = opcode;
  250. be_dws_cpu_to_le(wrb, 20);
  251. }
  252. /* Don't touch the hdr after it's prepared */
  253. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  254. u8 subsystem, u8 opcode, int cmd_len)
  255. {
  256. req_hdr->opcode = opcode;
  257. req_hdr->subsystem = subsystem;
  258. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  259. }
  260. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  261. struct be_dma_mem *mem)
  262. {
  263. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  264. u64 dma = (u64)mem->dma;
  265. for (i = 0; i < buf_pages; i++) {
  266. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  267. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  268. dma += PAGE_SIZE_4K;
  269. }
  270. }
  271. /* Converts interrupt delay in microseconds to multiplier value */
  272. static u32 eq_delay_to_mult(u32 usec_delay)
  273. {
  274. #define MAX_INTR_RATE 651042
  275. const u32 round = 10;
  276. u32 multiplier;
  277. if (usec_delay == 0)
  278. multiplier = 0;
  279. else {
  280. u32 interrupt_rate = 1000000 / usec_delay;
  281. /* Max delay, corresponding to the lowest interrupt rate */
  282. if (interrupt_rate == 0)
  283. multiplier = 1023;
  284. else {
  285. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  286. multiplier /= interrupt_rate;
  287. /* Round the multiplier to the closest value.*/
  288. multiplier = (multiplier + round/2) / round;
  289. multiplier = min(multiplier, (u32)1023);
  290. }
  291. }
  292. return multiplier;
  293. }
  294. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  295. {
  296. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  297. struct be_mcc_wrb *wrb
  298. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  299. memset(wrb, 0, sizeof(*wrb));
  300. return wrb;
  301. }
  302. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  303. {
  304. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  305. struct be_mcc_wrb *wrb;
  306. if (atomic_read(&mccq->used) >= mccq->len) {
  307. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  308. return NULL;
  309. }
  310. wrb = queue_head_node(mccq);
  311. queue_head_inc(mccq);
  312. atomic_inc(&mccq->used);
  313. memset(wrb, 0, sizeof(*wrb));
  314. return wrb;
  315. }
  316. /* Tell fw we're about to start firing cmds by writing a
  317. * special pattern across the wrb hdr; uses mbox
  318. */
  319. int be_cmd_fw_init(struct be_adapter *adapter)
  320. {
  321. u8 *wrb;
  322. int status;
  323. spin_lock(&adapter->mbox_lock);
  324. wrb = (u8 *)wrb_from_mbox(adapter);
  325. *wrb++ = 0xFF;
  326. *wrb++ = 0x12;
  327. *wrb++ = 0x34;
  328. *wrb++ = 0xFF;
  329. *wrb++ = 0xFF;
  330. *wrb++ = 0x56;
  331. *wrb++ = 0x78;
  332. *wrb = 0xFF;
  333. status = be_mbox_notify_wait(adapter);
  334. spin_unlock(&adapter->mbox_lock);
  335. return status;
  336. }
  337. /* Tell fw we're done with firing cmds by writing a
  338. * special pattern across the wrb hdr; uses mbox
  339. */
  340. int be_cmd_fw_clean(struct be_adapter *adapter)
  341. {
  342. u8 *wrb;
  343. int status;
  344. spin_lock(&adapter->mbox_lock);
  345. wrb = (u8 *)wrb_from_mbox(adapter);
  346. *wrb++ = 0xFF;
  347. *wrb++ = 0xAA;
  348. *wrb++ = 0xBB;
  349. *wrb++ = 0xFF;
  350. *wrb++ = 0xFF;
  351. *wrb++ = 0xCC;
  352. *wrb++ = 0xDD;
  353. *wrb = 0xFF;
  354. status = be_mbox_notify_wait(adapter);
  355. spin_unlock(&adapter->mbox_lock);
  356. return status;
  357. }
  358. int be_cmd_eq_create(struct be_adapter *adapter,
  359. struct be_queue_info *eq, int eq_delay)
  360. {
  361. struct be_mcc_wrb *wrb;
  362. struct be_cmd_req_eq_create *req;
  363. struct be_dma_mem *q_mem = &eq->dma_mem;
  364. int status;
  365. spin_lock(&adapter->mbox_lock);
  366. wrb = wrb_from_mbox(adapter);
  367. req = embedded_payload(wrb);
  368. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
  369. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  370. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  371. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  372. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  373. be_pci_func(adapter));
  374. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  375. /* 4byte eqe*/
  376. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  377. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  378. __ilog2_u32(eq->len/256));
  379. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  380. eq_delay_to_mult(eq_delay));
  381. be_dws_cpu_to_le(req->context, sizeof(req->context));
  382. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  383. status = be_mbox_notify_wait(adapter);
  384. if (!status) {
  385. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  386. eq->id = le16_to_cpu(resp->eq_id);
  387. eq->created = true;
  388. }
  389. spin_unlock(&adapter->mbox_lock);
  390. return status;
  391. }
  392. /* Uses mbox */
  393. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  394. u8 type, bool permanent, u32 if_handle)
  395. {
  396. struct be_mcc_wrb *wrb;
  397. struct be_cmd_req_mac_query *req;
  398. int status;
  399. spin_lock(&adapter->mbox_lock);
  400. wrb = wrb_from_mbox(adapter);
  401. req = embedded_payload(wrb);
  402. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  403. OPCODE_COMMON_NTWK_MAC_QUERY);
  404. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  405. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  406. req->type = type;
  407. if (permanent) {
  408. req->permanent = 1;
  409. } else {
  410. req->if_id = cpu_to_le16((u16) if_handle);
  411. req->permanent = 0;
  412. }
  413. status = be_mbox_notify_wait(adapter);
  414. if (!status) {
  415. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  416. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  417. }
  418. spin_unlock(&adapter->mbox_lock);
  419. return status;
  420. }
  421. /* Uses synchronous MCCQ */
  422. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  423. u32 if_id, u32 *pmac_id)
  424. {
  425. struct be_mcc_wrb *wrb;
  426. struct be_cmd_req_pmac_add *req;
  427. int status;
  428. spin_lock_bh(&adapter->mcc_lock);
  429. wrb = wrb_from_mccq(adapter);
  430. if (!wrb) {
  431. status = -EBUSY;
  432. goto err;
  433. }
  434. req = embedded_payload(wrb);
  435. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  436. OPCODE_COMMON_NTWK_PMAC_ADD);
  437. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  438. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  439. req->if_id = cpu_to_le32(if_id);
  440. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  441. status = be_mcc_notify_wait(adapter);
  442. if (!status) {
  443. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  444. *pmac_id = le32_to_cpu(resp->pmac_id);
  445. }
  446. err:
  447. spin_unlock_bh(&adapter->mcc_lock);
  448. return status;
  449. }
  450. /* Uses synchronous MCCQ */
  451. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
  452. {
  453. struct be_mcc_wrb *wrb;
  454. struct be_cmd_req_pmac_del *req;
  455. int status;
  456. spin_lock_bh(&adapter->mcc_lock);
  457. wrb = wrb_from_mccq(adapter);
  458. if (!wrb) {
  459. status = -EBUSY;
  460. goto err;
  461. }
  462. req = embedded_payload(wrb);
  463. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  464. OPCODE_COMMON_NTWK_PMAC_DEL);
  465. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  466. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  467. req->if_id = cpu_to_le32(if_id);
  468. req->pmac_id = cpu_to_le32(pmac_id);
  469. status = be_mcc_notify_wait(adapter);
  470. err:
  471. spin_unlock_bh(&adapter->mcc_lock);
  472. return status;
  473. }
  474. /* Uses Mbox */
  475. int be_cmd_cq_create(struct be_adapter *adapter,
  476. struct be_queue_info *cq, struct be_queue_info *eq,
  477. bool sol_evts, bool no_delay, int coalesce_wm)
  478. {
  479. struct be_mcc_wrb *wrb;
  480. struct be_cmd_req_cq_create *req;
  481. struct be_dma_mem *q_mem = &cq->dma_mem;
  482. void *ctxt;
  483. int status;
  484. spin_lock(&adapter->mbox_lock);
  485. wrb = wrb_from_mbox(adapter);
  486. req = embedded_payload(wrb);
  487. ctxt = &req->context;
  488. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  489. OPCODE_COMMON_CQ_CREATE);
  490. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  491. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  492. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  493. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  494. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  495. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  496. __ilog2_u32(cq->len/256));
  497. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  498. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  499. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  500. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  501. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  502. AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
  503. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  504. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  505. status = be_mbox_notify_wait(adapter);
  506. if (!status) {
  507. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  508. cq->id = le16_to_cpu(resp->cq_id);
  509. cq->created = true;
  510. }
  511. spin_unlock(&adapter->mbox_lock);
  512. return status;
  513. }
  514. static u32 be_encoded_q_len(int q_len)
  515. {
  516. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  517. if (len_encoded == 16)
  518. len_encoded = 0;
  519. return len_encoded;
  520. }
  521. int be_cmd_mccq_create(struct be_adapter *adapter,
  522. struct be_queue_info *mccq,
  523. struct be_queue_info *cq)
  524. {
  525. struct be_mcc_wrb *wrb;
  526. struct be_cmd_req_mcc_create *req;
  527. struct be_dma_mem *q_mem = &mccq->dma_mem;
  528. void *ctxt;
  529. int status;
  530. spin_lock(&adapter->mbox_lock);
  531. wrb = wrb_from_mbox(adapter);
  532. req = embedded_payload(wrb);
  533. ctxt = &req->context;
  534. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  535. OPCODE_COMMON_MCC_CREATE);
  536. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  537. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  538. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  539. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
  540. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  541. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  542. be_encoded_q_len(mccq->len));
  543. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  544. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  545. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  546. status = be_mbox_notify_wait(adapter);
  547. if (!status) {
  548. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  549. mccq->id = le16_to_cpu(resp->id);
  550. mccq->created = true;
  551. }
  552. spin_unlock(&adapter->mbox_lock);
  553. return status;
  554. }
  555. int be_cmd_txq_create(struct be_adapter *adapter,
  556. struct be_queue_info *txq,
  557. struct be_queue_info *cq)
  558. {
  559. struct be_mcc_wrb *wrb;
  560. struct be_cmd_req_eth_tx_create *req;
  561. struct be_dma_mem *q_mem = &txq->dma_mem;
  562. void *ctxt;
  563. int status;
  564. spin_lock(&adapter->mbox_lock);
  565. wrb = wrb_from_mbox(adapter);
  566. req = embedded_payload(wrb);
  567. ctxt = &req->context;
  568. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  569. OPCODE_ETH_TX_CREATE);
  570. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  571. sizeof(*req));
  572. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  573. req->ulp_num = BE_ULP1_NUM;
  574. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  575. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  576. be_encoded_q_len(txq->len));
  577. AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
  578. be_pci_func(adapter));
  579. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  580. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  581. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  582. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  583. status = be_mbox_notify_wait(adapter);
  584. if (!status) {
  585. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  586. txq->id = le16_to_cpu(resp->cid);
  587. txq->created = true;
  588. }
  589. spin_unlock(&adapter->mbox_lock);
  590. return status;
  591. }
  592. /* Uses mbox */
  593. int be_cmd_rxq_create(struct be_adapter *adapter,
  594. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  595. u16 max_frame_size, u32 if_id, u32 rss)
  596. {
  597. struct be_mcc_wrb *wrb;
  598. struct be_cmd_req_eth_rx_create *req;
  599. struct be_dma_mem *q_mem = &rxq->dma_mem;
  600. int status;
  601. spin_lock(&adapter->mbox_lock);
  602. wrb = wrb_from_mbox(adapter);
  603. req = embedded_payload(wrb);
  604. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  605. OPCODE_ETH_RX_CREATE);
  606. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  607. sizeof(*req));
  608. req->cq_id = cpu_to_le16(cq_id);
  609. req->frag_size = fls(frag_size) - 1;
  610. req->num_pages = 2;
  611. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  612. req->interface_id = cpu_to_le32(if_id);
  613. req->max_frame_size = cpu_to_le16(max_frame_size);
  614. req->rss_queue = cpu_to_le32(rss);
  615. status = be_mbox_notify_wait(adapter);
  616. if (!status) {
  617. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  618. rxq->id = le16_to_cpu(resp->id);
  619. rxq->created = true;
  620. }
  621. spin_unlock(&adapter->mbox_lock);
  622. return status;
  623. }
  624. /* Generic destroyer function for all types of queues
  625. * Uses Mbox
  626. */
  627. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  628. int queue_type)
  629. {
  630. struct be_mcc_wrb *wrb;
  631. struct be_cmd_req_q_destroy *req;
  632. u8 subsys = 0, opcode = 0;
  633. int status;
  634. spin_lock(&adapter->mbox_lock);
  635. wrb = wrb_from_mbox(adapter);
  636. req = embedded_payload(wrb);
  637. switch (queue_type) {
  638. case QTYPE_EQ:
  639. subsys = CMD_SUBSYSTEM_COMMON;
  640. opcode = OPCODE_COMMON_EQ_DESTROY;
  641. break;
  642. case QTYPE_CQ:
  643. subsys = CMD_SUBSYSTEM_COMMON;
  644. opcode = OPCODE_COMMON_CQ_DESTROY;
  645. break;
  646. case QTYPE_TXQ:
  647. subsys = CMD_SUBSYSTEM_ETH;
  648. opcode = OPCODE_ETH_TX_DESTROY;
  649. break;
  650. case QTYPE_RXQ:
  651. subsys = CMD_SUBSYSTEM_ETH;
  652. opcode = OPCODE_ETH_RX_DESTROY;
  653. break;
  654. case QTYPE_MCCQ:
  655. subsys = CMD_SUBSYSTEM_COMMON;
  656. opcode = OPCODE_COMMON_MCC_DESTROY;
  657. break;
  658. default:
  659. BUG();
  660. }
  661. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
  662. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  663. req->id = cpu_to_le16(q->id);
  664. status = be_mbox_notify_wait(adapter);
  665. spin_unlock(&adapter->mbox_lock);
  666. return status;
  667. }
  668. /* Create an rx filtering policy configuration on an i/f
  669. * Uses mbox
  670. */
  671. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  672. u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
  673. {
  674. struct be_mcc_wrb *wrb;
  675. struct be_cmd_req_if_create *req;
  676. int status;
  677. spin_lock(&adapter->mbox_lock);
  678. wrb = wrb_from_mbox(adapter);
  679. req = embedded_payload(wrb);
  680. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  681. OPCODE_COMMON_NTWK_INTERFACE_CREATE);
  682. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  683. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  684. req->capability_flags = cpu_to_le32(cap_flags);
  685. req->enable_flags = cpu_to_le32(en_flags);
  686. req->pmac_invalid = pmac_invalid;
  687. if (!pmac_invalid)
  688. memcpy(req->mac_addr, mac, ETH_ALEN);
  689. status = be_mbox_notify_wait(adapter);
  690. if (!status) {
  691. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  692. *if_handle = le32_to_cpu(resp->interface_id);
  693. if (!pmac_invalid)
  694. *pmac_id = le32_to_cpu(resp->pmac_id);
  695. }
  696. spin_unlock(&adapter->mbox_lock);
  697. return status;
  698. }
  699. /* Uses mbox */
  700. int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
  701. {
  702. struct be_mcc_wrb *wrb;
  703. struct be_cmd_req_if_destroy *req;
  704. int status;
  705. spin_lock(&adapter->mbox_lock);
  706. wrb = wrb_from_mbox(adapter);
  707. req = embedded_payload(wrb);
  708. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  709. OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
  710. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  711. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  712. req->interface_id = cpu_to_le32(interface_id);
  713. status = be_mbox_notify_wait(adapter);
  714. spin_unlock(&adapter->mbox_lock);
  715. return status;
  716. }
  717. /* Get stats is a non embedded command: the request is not embedded inside
  718. * WRB but is a separate dma memory block
  719. * Uses asynchronous MCC
  720. */
  721. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  722. {
  723. struct be_mcc_wrb *wrb;
  724. struct be_cmd_req_get_stats *req;
  725. struct be_sge *sge;
  726. int status = 0;
  727. spin_lock_bh(&adapter->mcc_lock);
  728. wrb = wrb_from_mccq(adapter);
  729. if (!wrb) {
  730. status = -EBUSY;
  731. goto err;
  732. }
  733. req = nonemb_cmd->va;
  734. sge = nonembedded_sgl(wrb);
  735. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  736. OPCODE_ETH_GET_STATISTICS);
  737. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  738. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  739. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  740. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  741. sge->len = cpu_to_le32(nonemb_cmd->size);
  742. be_mcc_notify(adapter);
  743. err:
  744. spin_unlock_bh(&adapter->mcc_lock);
  745. return status;
  746. }
  747. /* Uses synchronous mcc */
  748. int be_cmd_link_status_query(struct be_adapter *adapter,
  749. bool *link_up, u8 *mac_speed, u16 *link_speed)
  750. {
  751. struct be_mcc_wrb *wrb;
  752. struct be_cmd_req_link_status *req;
  753. int status;
  754. spin_lock_bh(&adapter->mcc_lock);
  755. wrb = wrb_from_mccq(adapter);
  756. if (!wrb) {
  757. status = -EBUSY;
  758. goto err;
  759. }
  760. req = embedded_payload(wrb);
  761. *link_up = false;
  762. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  763. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
  764. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  765. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  766. status = be_mcc_notify_wait(adapter);
  767. if (!status) {
  768. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  769. if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
  770. *link_up = true;
  771. *link_speed = le16_to_cpu(resp->link_speed);
  772. *mac_speed = resp->mac_speed;
  773. }
  774. }
  775. err:
  776. spin_unlock_bh(&adapter->mcc_lock);
  777. return status;
  778. }
  779. /* Uses Mbox */
  780. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
  781. {
  782. struct be_mcc_wrb *wrb;
  783. struct be_cmd_req_get_fw_version *req;
  784. int status;
  785. spin_lock(&adapter->mbox_lock);
  786. wrb = wrb_from_mbox(adapter);
  787. req = embedded_payload(wrb);
  788. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  789. OPCODE_COMMON_GET_FW_VERSION);
  790. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  791. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  792. status = be_mbox_notify_wait(adapter);
  793. if (!status) {
  794. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  795. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  796. }
  797. spin_unlock(&adapter->mbox_lock);
  798. return status;
  799. }
  800. /* set the EQ delay interval of an EQ to specified value
  801. * Uses async mcc
  802. */
  803. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  804. {
  805. struct be_mcc_wrb *wrb;
  806. struct be_cmd_req_modify_eq_delay *req;
  807. int status = 0;
  808. spin_lock_bh(&adapter->mcc_lock);
  809. wrb = wrb_from_mccq(adapter);
  810. if (!wrb) {
  811. status = -EBUSY;
  812. goto err;
  813. }
  814. req = embedded_payload(wrb);
  815. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  816. OPCODE_COMMON_MODIFY_EQ_DELAY);
  817. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  818. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  819. req->num_eq = cpu_to_le32(1);
  820. req->delay[0].eq_id = cpu_to_le32(eq_id);
  821. req->delay[0].phase = 0;
  822. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  823. be_mcc_notify(adapter);
  824. err:
  825. spin_unlock_bh(&adapter->mcc_lock);
  826. return status;
  827. }
  828. /* Uses sycnhronous mcc */
  829. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  830. u32 num, bool untagged, bool promiscuous)
  831. {
  832. struct be_mcc_wrb *wrb;
  833. struct be_cmd_req_vlan_config *req;
  834. int status;
  835. spin_lock_bh(&adapter->mcc_lock);
  836. wrb = wrb_from_mccq(adapter);
  837. if (!wrb) {
  838. status = -EBUSY;
  839. goto err;
  840. }
  841. req = embedded_payload(wrb);
  842. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  843. OPCODE_COMMON_NTWK_VLAN_CONFIG);
  844. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  845. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  846. req->interface_id = if_id;
  847. req->promiscuous = promiscuous;
  848. req->untagged = untagged;
  849. req->num_vlan = num;
  850. if (!promiscuous) {
  851. memcpy(req->normal_vlan, vtag_array,
  852. req->num_vlan * sizeof(vtag_array[0]));
  853. }
  854. status = be_mcc_notify_wait(adapter);
  855. err:
  856. spin_unlock_bh(&adapter->mcc_lock);
  857. return status;
  858. }
  859. /* Uses MCC for this command as it may be called in BH context
  860. * Uses synchronous mcc
  861. */
  862. int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  863. {
  864. struct be_mcc_wrb *wrb;
  865. struct be_cmd_req_promiscuous_config *req;
  866. int status;
  867. spin_lock_bh(&adapter->mcc_lock);
  868. wrb = wrb_from_mccq(adapter);
  869. if (!wrb) {
  870. status = -EBUSY;
  871. goto err;
  872. }
  873. req = embedded_payload(wrb);
  874. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
  875. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  876. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  877. if (port_num)
  878. req->port1_promiscuous = en;
  879. else
  880. req->port0_promiscuous = en;
  881. status = be_mcc_notify_wait(adapter);
  882. err:
  883. spin_unlock_bh(&adapter->mcc_lock);
  884. return status;
  885. }
  886. /*
  887. * Uses MCC for this command as it may be called in BH context
  888. * (mc == NULL) => multicast promiscous
  889. */
  890. int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
  891. struct dev_mc_list *mc_list, u32 mc_count,
  892. struct be_dma_mem *mem)
  893. {
  894. struct be_mcc_wrb *wrb;
  895. struct be_cmd_req_mcast_mac_config *req = mem->va;
  896. struct be_sge *sge;
  897. int status;
  898. spin_lock_bh(&adapter->mcc_lock);
  899. wrb = wrb_from_mccq(adapter);
  900. if (!wrb) {
  901. status = -EBUSY;
  902. goto err;
  903. }
  904. sge = nonembedded_sgl(wrb);
  905. memset(req, 0, sizeof(*req));
  906. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  907. OPCODE_COMMON_NTWK_MULTICAST_SET);
  908. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  909. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  910. sge->len = cpu_to_le32(mem->size);
  911. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  912. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  913. req->interface_id = if_id;
  914. if (mc_list) {
  915. int i;
  916. struct dev_mc_list *mc;
  917. req->num_mac = cpu_to_le16(mc_count);
  918. for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
  919. memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
  920. } else {
  921. req->promiscuous = 1;
  922. }
  923. status = be_mcc_notify_wait(adapter);
  924. err:
  925. spin_unlock_bh(&adapter->mcc_lock);
  926. return status;
  927. }
  928. /* Uses synchrounous mcc */
  929. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  930. {
  931. struct be_mcc_wrb *wrb;
  932. struct be_cmd_req_set_flow_control *req;
  933. int status;
  934. spin_lock_bh(&adapter->mcc_lock);
  935. wrb = wrb_from_mccq(adapter);
  936. if (!wrb) {
  937. status = -EBUSY;
  938. goto err;
  939. }
  940. req = embedded_payload(wrb);
  941. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  942. OPCODE_COMMON_SET_FLOW_CONTROL);
  943. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  944. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  945. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  946. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  947. status = be_mcc_notify_wait(adapter);
  948. err:
  949. spin_unlock_bh(&adapter->mcc_lock);
  950. return status;
  951. }
  952. /* Uses sycn mcc */
  953. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  954. {
  955. struct be_mcc_wrb *wrb;
  956. struct be_cmd_req_get_flow_control *req;
  957. int status;
  958. spin_lock_bh(&adapter->mcc_lock);
  959. wrb = wrb_from_mccq(adapter);
  960. if (!wrb) {
  961. status = -EBUSY;
  962. goto err;
  963. }
  964. req = embedded_payload(wrb);
  965. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  966. OPCODE_COMMON_GET_FLOW_CONTROL);
  967. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  968. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  969. status = be_mcc_notify_wait(adapter);
  970. if (!status) {
  971. struct be_cmd_resp_get_flow_control *resp =
  972. embedded_payload(wrb);
  973. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  974. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  975. }
  976. err:
  977. spin_unlock_bh(&adapter->mcc_lock);
  978. return status;
  979. }
  980. /* Uses mbox */
  981. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
  982. {
  983. struct be_mcc_wrb *wrb;
  984. struct be_cmd_req_query_fw_cfg *req;
  985. int status;
  986. spin_lock(&adapter->mbox_lock);
  987. wrb = wrb_from_mbox(adapter);
  988. req = embedded_payload(wrb);
  989. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  990. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
  991. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  992. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  993. status = be_mbox_notify_wait(adapter);
  994. if (!status) {
  995. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  996. *port_num = le32_to_cpu(resp->phys_port);
  997. *cap = le32_to_cpu(resp->function_cap);
  998. }
  999. spin_unlock(&adapter->mbox_lock);
  1000. return status;
  1001. }
  1002. /* Uses mbox */
  1003. int be_cmd_reset_function(struct be_adapter *adapter)
  1004. {
  1005. struct be_mcc_wrb *wrb;
  1006. struct be_cmd_req_hdr *req;
  1007. int status;
  1008. spin_lock(&adapter->mbox_lock);
  1009. wrb = wrb_from_mbox(adapter);
  1010. req = embedded_payload(wrb);
  1011. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1012. OPCODE_COMMON_FUNCTION_RESET);
  1013. be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1014. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1015. status = be_mbox_notify_wait(adapter);
  1016. spin_unlock(&adapter->mbox_lock);
  1017. return status;
  1018. }
  1019. /* Uses sync mcc */
  1020. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1021. u8 bcn, u8 sts, u8 state)
  1022. {
  1023. struct be_mcc_wrb *wrb;
  1024. struct be_cmd_req_enable_disable_beacon *req;
  1025. int status;
  1026. spin_lock_bh(&adapter->mcc_lock);
  1027. wrb = wrb_from_mccq(adapter);
  1028. if (!wrb) {
  1029. status = -EBUSY;
  1030. goto err;
  1031. }
  1032. req = embedded_payload(wrb);
  1033. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1034. OPCODE_COMMON_ENABLE_DISABLE_BEACON);
  1035. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1036. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
  1037. req->port_num = port_num;
  1038. req->beacon_state = state;
  1039. req->beacon_duration = bcn;
  1040. req->status_duration = sts;
  1041. status = be_mcc_notify_wait(adapter);
  1042. err:
  1043. spin_unlock_bh(&adapter->mcc_lock);
  1044. return status;
  1045. }
  1046. /* Uses sync mcc */
  1047. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1048. {
  1049. struct be_mcc_wrb *wrb;
  1050. struct be_cmd_req_get_beacon_state *req;
  1051. int status;
  1052. spin_lock_bh(&adapter->mcc_lock);
  1053. wrb = wrb_from_mccq(adapter);
  1054. if (!wrb) {
  1055. status = -EBUSY;
  1056. goto err;
  1057. }
  1058. req = embedded_payload(wrb);
  1059. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1060. OPCODE_COMMON_GET_BEACON_STATE);
  1061. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1062. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
  1063. req->port_num = port_num;
  1064. status = be_mcc_notify_wait(adapter);
  1065. if (!status) {
  1066. struct be_cmd_resp_get_beacon_state *resp =
  1067. embedded_payload(wrb);
  1068. *state = resp->beacon_state;
  1069. }
  1070. err:
  1071. spin_unlock_bh(&adapter->mcc_lock);
  1072. return status;
  1073. }
  1074. /* Uses sync mcc */
  1075. int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
  1076. u8 *connector)
  1077. {
  1078. struct be_mcc_wrb *wrb;
  1079. struct be_cmd_req_port_type *req;
  1080. int status;
  1081. spin_lock_bh(&adapter->mcc_lock);
  1082. wrb = wrb_from_mccq(adapter);
  1083. if (!wrb) {
  1084. status = -EBUSY;
  1085. goto err;
  1086. }
  1087. req = embedded_payload(wrb);
  1088. be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
  1089. OPCODE_COMMON_READ_TRANSRECV_DATA);
  1090. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1091. OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
  1092. req->port = cpu_to_le32(port);
  1093. req->page_num = cpu_to_le32(TR_PAGE_A0);
  1094. status = be_mcc_notify_wait(adapter);
  1095. if (!status) {
  1096. struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
  1097. *connector = resp->data.connector;
  1098. }
  1099. err:
  1100. spin_unlock_bh(&adapter->mcc_lock);
  1101. return status;
  1102. }
  1103. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1104. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1105. {
  1106. struct be_mcc_wrb *wrb;
  1107. struct be_cmd_write_flashrom *req = cmd->va;
  1108. struct be_sge *sge;
  1109. int status;
  1110. spin_lock_bh(&adapter->mcc_lock);
  1111. wrb = wrb_from_mccq(adapter);
  1112. if (!wrb) {
  1113. status = -EBUSY;
  1114. goto err;
  1115. }
  1116. req = cmd->va;
  1117. sge = nonembedded_sgl(wrb);
  1118. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1119. OPCODE_COMMON_WRITE_FLASHROM);
  1120. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1121. OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
  1122. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1123. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1124. sge->len = cpu_to_le32(cmd->size);
  1125. req->params.op_type = cpu_to_le32(flash_type);
  1126. req->params.op_code = cpu_to_le32(flash_opcode);
  1127. req->params.data_buf_size = cpu_to_le32(buf_size);
  1128. status = be_mcc_notify_wait(adapter);
  1129. err:
  1130. spin_unlock_bh(&adapter->mcc_lock);
  1131. return status;
  1132. }
  1133. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
  1134. {
  1135. struct be_mcc_wrb *wrb;
  1136. struct be_cmd_write_flashrom *req;
  1137. int status;
  1138. spin_lock_bh(&adapter->mcc_lock);
  1139. wrb = wrb_from_mccq(adapter);
  1140. if (!wrb) {
  1141. status = -EBUSY;
  1142. goto err;
  1143. }
  1144. req = embedded_payload(wrb);
  1145. be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
  1146. OPCODE_COMMON_READ_FLASHROM);
  1147. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1148. OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
  1149. req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
  1150. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1151. req->params.offset = 0x3FFFC;
  1152. req->params.data_buf_size = 0x4;
  1153. status = be_mcc_notify_wait(adapter);
  1154. if (!status)
  1155. memcpy(flashed_crc, req->params.data_buf, 4);
  1156. err:
  1157. spin_unlock_bh(&adapter->mcc_lock);
  1158. return status;
  1159. }
  1160. extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1161. struct be_dma_mem *nonemb_cmd)
  1162. {
  1163. struct be_mcc_wrb *wrb;
  1164. struct be_cmd_req_acpi_wol_magic_config *req;
  1165. struct be_sge *sge;
  1166. int status;
  1167. spin_lock_bh(&adapter->mcc_lock);
  1168. wrb = wrb_from_mccq(adapter);
  1169. if (!wrb) {
  1170. status = -EBUSY;
  1171. goto err;
  1172. }
  1173. req = nonemb_cmd->va;
  1174. sge = nonembedded_sgl(wrb);
  1175. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1176. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
  1177. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1178. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
  1179. memcpy(req->magic_mac, mac, ETH_ALEN);
  1180. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1181. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1182. sge->len = cpu_to_le32(nonemb_cmd->size);
  1183. status = be_mcc_notify_wait(adapter);
  1184. err:
  1185. spin_unlock_bh(&adapter->mcc_lock);
  1186. return status;
  1187. }
  1188. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1189. u8 loopback_type, u8 enable)
  1190. {
  1191. struct be_mcc_wrb *wrb;
  1192. struct be_cmd_req_set_lmode *req;
  1193. int status;
  1194. spin_lock_bh(&adapter->mcc_lock);
  1195. wrb = wrb_from_mccq(adapter);
  1196. if (!wrb) {
  1197. status = -EBUSY;
  1198. goto err;
  1199. }
  1200. req = embedded_payload(wrb);
  1201. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1202. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
  1203. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1204. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  1205. sizeof(*req));
  1206. req->src_port = port_num;
  1207. req->dest_port = port_num;
  1208. req->loopback_type = loopback_type;
  1209. req->loopback_state = enable;
  1210. status = be_mcc_notify_wait(adapter);
  1211. err:
  1212. spin_unlock_bh(&adapter->mcc_lock);
  1213. return status;
  1214. }
  1215. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1216. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1217. {
  1218. struct be_mcc_wrb *wrb;
  1219. struct be_cmd_req_loopback_test *req;
  1220. int status;
  1221. spin_lock_bh(&adapter->mcc_lock);
  1222. wrb = wrb_from_mccq(adapter);
  1223. if (!wrb) {
  1224. status = -EBUSY;
  1225. goto err;
  1226. }
  1227. req = embedded_payload(wrb);
  1228. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1229. OPCODE_LOWLEVEL_LOOPBACK_TEST);
  1230. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1231. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
  1232. req->hdr.timeout = 4;
  1233. req->pattern = cpu_to_le64(pattern);
  1234. req->src_port = cpu_to_le32(port_num);
  1235. req->dest_port = cpu_to_le32(port_num);
  1236. req->pkt_size = cpu_to_le32(pkt_size);
  1237. req->num_pkts = cpu_to_le32(num_pkts);
  1238. req->loopback_type = cpu_to_le32(loopback_type);
  1239. status = be_mcc_notify_wait(adapter);
  1240. if (!status) {
  1241. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1242. status = le32_to_cpu(resp->status);
  1243. }
  1244. err:
  1245. spin_unlock_bh(&adapter->mcc_lock);
  1246. return status;
  1247. }
  1248. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1249. u32 byte_cnt, struct be_dma_mem *cmd)
  1250. {
  1251. struct be_mcc_wrb *wrb;
  1252. struct be_cmd_req_ddrdma_test *req;
  1253. struct be_sge *sge;
  1254. int status;
  1255. int i, j = 0;
  1256. spin_lock_bh(&adapter->mcc_lock);
  1257. wrb = wrb_from_mccq(adapter);
  1258. if (!wrb) {
  1259. status = -EBUSY;
  1260. goto err;
  1261. }
  1262. req = cmd->va;
  1263. sge = nonembedded_sgl(wrb);
  1264. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1265. OPCODE_LOWLEVEL_HOST_DDR_DMA);
  1266. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1267. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
  1268. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1269. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1270. sge->len = cpu_to_le32(cmd->size);
  1271. req->pattern = cpu_to_le64(pattern);
  1272. req->byte_count = cpu_to_le32(byte_cnt);
  1273. for (i = 0; i < byte_cnt; i++) {
  1274. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1275. j++;
  1276. if (j > 7)
  1277. j = 0;
  1278. }
  1279. status = be_mcc_notify_wait(adapter);
  1280. if (!status) {
  1281. struct be_cmd_resp_ddrdma_test *resp;
  1282. resp = cmd->va;
  1283. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1284. resp->snd_err) {
  1285. status = -1;
  1286. }
  1287. }
  1288. err:
  1289. spin_unlock_bh(&adapter->mcc_lock);
  1290. return status;
  1291. }