be_cmds.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. /*
  2. * Copyright (C) 2005 - 2010 ServerEngines
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@serverengines.com
  12. *
  13. * ServerEngines
  14. * 209 N. Fair Oaks Ave
  15. * Sunnyvale, CA 94085
  16. */
  17. #include "be.h"
  18. #include "be_cmds.h"
  19. static void be_mcc_notify(struct be_adapter *adapter)
  20. {
  21. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  22. u32 val = 0;
  23. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  24. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  25. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  26. }
  27. /* To check if valid bit is set, check the entire word as we don't know
  28. * the endianness of the data (old entry is host endian while a new entry is
  29. * little endian) */
  30. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  31. {
  32. if (compl->flags != 0) {
  33. compl->flags = le32_to_cpu(compl->flags);
  34. BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
  35. return true;
  36. } else {
  37. return false;
  38. }
  39. }
  40. /* Need to reset the entire word that houses the valid bit */
  41. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  42. {
  43. compl->flags = 0;
  44. }
  45. static int be_mcc_compl_process(struct be_adapter *adapter,
  46. struct be_mcc_compl *compl)
  47. {
  48. u16 compl_status, extd_status;
  49. /* Just swap the status to host endian; mcc tag is opaquely copied
  50. * from mcc_wrb */
  51. be_dws_le_to_cpu(compl, 4);
  52. compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
  53. CQE_STATUS_COMPL_MASK;
  54. if (compl_status == MCC_STATUS_SUCCESS) {
  55. if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
  56. struct be_cmd_resp_get_stats *resp =
  57. adapter->stats.cmd.va;
  58. be_dws_le_to_cpu(&resp->hw_stats,
  59. sizeof(resp->hw_stats));
  60. netdev_stats_update(adapter);
  61. }
  62. } else if (compl_status != MCC_STATUS_NOT_SUPPORTED) {
  63. extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
  64. CQE_STATUS_EXTD_MASK;
  65. dev_warn(&adapter->pdev->dev,
  66. "Error in cmd completion - opcode %d, compl %d, extd %d\n",
  67. compl->tag0, compl_status, extd_status);
  68. }
  69. return compl_status;
  70. }
  71. /* Link state evt is a string of bytes; no need for endian swapping */
  72. static void be_async_link_state_process(struct be_adapter *adapter,
  73. struct be_async_event_link_state *evt)
  74. {
  75. be_link_status_update(adapter,
  76. evt->port_link_status == ASYNC_EVENT_LINK_UP);
  77. }
  78. static inline bool is_link_state_evt(u32 trailer)
  79. {
  80. return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
  81. ASYNC_TRAILER_EVENT_CODE_MASK) ==
  82. ASYNC_EVENT_CODE_LINK_STATE);
  83. }
  84. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  85. {
  86. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  87. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  88. if (be_mcc_compl_is_new(compl)) {
  89. queue_tail_inc(mcc_cq);
  90. return compl;
  91. }
  92. return NULL;
  93. }
  94. void be_async_mcc_enable(struct be_adapter *adapter)
  95. {
  96. spin_lock_bh(&adapter->mcc_cq_lock);
  97. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  98. adapter->mcc_obj.rearm_cq = true;
  99. spin_unlock_bh(&adapter->mcc_cq_lock);
  100. }
  101. void be_async_mcc_disable(struct be_adapter *adapter)
  102. {
  103. adapter->mcc_obj.rearm_cq = false;
  104. }
  105. int be_process_mcc(struct be_adapter *adapter, int *status)
  106. {
  107. struct be_mcc_compl *compl;
  108. int num = 0;
  109. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  110. spin_lock_bh(&adapter->mcc_cq_lock);
  111. while ((compl = be_mcc_compl_get(adapter))) {
  112. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  113. /* Interpret flags as an async trailer */
  114. BUG_ON(!is_link_state_evt(compl->flags));
  115. /* Interpret compl as a async link evt */
  116. be_async_link_state_process(adapter,
  117. (struct be_async_event_link_state *) compl);
  118. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  119. *status = be_mcc_compl_process(adapter, compl);
  120. atomic_dec(&mcc_obj->q.used);
  121. }
  122. be_mcc_compl_use(compl);
  123. num++;
  124. }
  125. spin_unlock_bh(&adapter->mcc_cq_lock);
  126. return num;
  127. }
  128. /* Wait till no more pending mcc requests are present */
  129. static int be_mcc_wait_compl(struct be_adapter *adapter)
  130. {
  131. #define mcc_timeout 120000 /* 12s timeout */
  132. int i, num, status = 0;
  133. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  134. for (i = 0; i < mcc_timeout; i++) {
  135. num = be_process_mcc(adapter, &status);
  136. if (num)
  137. be_cq_notify(adapter, mcc_obj->cq.id,
  138. mcc_obj->rearm_cq, num);
  139. if (atomic_read(&mcc_obj->q.used) == 0)
  140. break;
  141. udelay(100);
  142. }
  143. if (i == mcc_timeout) {
  144. dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
  145. return -1;
  146. }
  147. return status;
  148. }
  149. /* Notify MCC requests and wait for completion */
  150. static int be_mcc_notify_wait(struct be_adapter *adapter)
  151. {
  152. be_mcc_notify(adapter);
  153. return be_mcc_wait_compl(adapter);
  154. }
  155. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  156. {
  157. int cnt = 0, wait = 5;
  158. u32 ready;
  159. do {
  160. ready = ioread32(db);
  161. if (ready == 0xffffffff) {
  162. dev_err(&adapter->pdev->dev,
  163. "pci slot disconnected\n");
  164. return -1;
  165. }
  166. ready &= MPU_MAILBOX_DB_RDY_MASK;
  167. if (ready)
  168. break;
  169. if (cnt > 4000000) {
  170. dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
  171. return -1;
  172. }
  173. if (cnt > 50)
  174. wait = 200;
  175. cnt += wait;
  176. udelay(wait);
  177. } while (true);
  178. return 0;
  179. }
  180. /*
  181. * Insert the mailbox address into the doorbell in two steps
  182. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  183. */
  184. static int be_mbox_notify_wait(struct be_adapter *adapter)
  185. {
  186. int status;
  187. u32 val = 0;
  188. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  189. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  190. struct be_mcc_mailbox *mbox = mbox_mem->va;
  191. struct be_mcc_compl *compl = &mbox->compl;
  192. /* wait for ready to be set */
  193. status = be_mbox_db_ready_wait(adapter, db);
  194. if (status != 0)
  195. return status;
  196. val |= MPU_MAILBOX_DB_HI_MASK;
  197. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  198. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  199. iowrite32(val, db);
  200. /* wait for ready to be set */
  201. status = be_mbox_db_ready_wait(adapter, db);
  202. if (status != 0)
  203. return status;
  204. val = 0;
  205. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  206. val |= (u32)(mbox_mem->dma >> 4) << 2;
  207. iowrite32(val, db);
  208. status = be_mbox_db_ready_wait(adapter, db);
  209. if (status != 0)
  210. return status;
  211. /* A cq entry has been made now */
  212. if (be_mcc_compl_is_new(compl)) {
  213. status = be_mcc_compl_process(adapter, &mbox->compl);
  214. be_mcc_compl_use(compl);
  215. if (status)
  216. return status;
  217. } else {
  218. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  219. return -1;
  220. }
  221. return 0;
  222. }
  223. static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
  224. {
  225. u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
  226. *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
  227. if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
  228. return -1;
  229. else
  230. return 0;
  231. }
  232. int be_cmd_POST(struct be_adapter *adapter)
  233. {
  234. u16 stage;
  235. int status, timeout = 0;
  236. do {
  237. status = be_POST_stage_get(adapter, &stage);
  238. if (status) {
  239. dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
  240. stage);
  241. return -1;
  242. } else if (stage != POST_STAGE_ARMFW_RDY) {
  243. set_current_state(TASK_INTERRUPTIBLE);
  244. schedule_timeout(2 * HZ);
  245. timeout += 2;
  246. } else {
  247. return 0;
  248. }
  249. } while (timeout < 20);
  250. dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
  251. return -1;
  252. }
  253. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  254. {
  255. return wrb->payload.embedded_payload;
  256. }
  257. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  258. {
  259. return &wrb->payload.sgl[0];
  260. }
  261. /* Don't touch the hdr after it's prepared */
  262. static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
  263. bool embedded, u8 sge_cnt, u32 opcode)
  264. {
  265. if (embedded)
  266. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  267. else
  268. wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
  269. MCC_WRB_SGE_CNT_SHIFT;
  270. wrb->payload_length = payload_len;
  271. wrb->tag0 = opcode;
  272. be_dws_cpu_to_le(wrb, 8);
  273. }
  274. /* Don't touch the hdr after it's prepared */
  275. static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  276. u8 subsystem, u8 opcode, int cmd_len)
  277. {
  278. req_hdr->opcode = opcode;
  279. req_hdr->subsystem = subsystem;
  280. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  281. req_hdr->version = 0;
  282. }
  283. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  284. struct be_dma_mem *mem)
  285. {
  286. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  287. u64 dma = (u64)mem->dma;
  288. for (i = 0; i < buf_pages; i++) {
  289. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  290. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  291. dma += PAGE_SIZE_4K;
  292. }
  293. }
  294. /* Converts interrupt delay in microseconds to multiplier value */
  295. static u32 eq_delay_to_mult(u32 usec_delay)
  296. {
  297. #define MAX_INTR_RATE 651042
  298. const u32 round = 10;
  299. u32 multiplier;
  300. if (usec_delay == 0)
  301. multiplier = 0;
  302. else {
  303. u32 interrupt_rate = 1000000 / usec_delay;
  304. /* Max delay, corresponding to the lowest interrupt rate */
  305. if (interrupt_rate == 0)
  306. multiplier = 1023;
  307. else {
  308. multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
  309. multiplier /= interrupt_rate;
  310. /* Round the multiplier to the closest value.*/
  311. multiplier = (multiplier + round/2) / round;
  312. multiplier = min(multiplier, (u32)1023);
  313. }
  314. }
  315. return multiplier;
  316. }
  317. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  318. {
  319. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  320. struct be_mcc_wrb *wrb
  321. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  322. memset(wrb, 0, sizeof(*wrb));
  323. return wrb;
  324. }
  325. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  326. {
  327. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  328. struct be_mcc_wrb *wrb;
  329. if (atomic_read(&mccq->used) >= mccq->len) {
  330. dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
  331. return NULL;
  332. }
  333. wrb = queue_head_node(mccq);
  334. queue_head_inc(mccq);
  335. atomic_inc(&mccq->used);
  336. memset(wrb, 0, sizeof(*wrb));
  337. return wrb;
  338. }
  339. /* Tell fw we're about to start firing cmds by writing a
  340. * special pattern across the wrb hdr; uses mbox
  341. */
  342. int be_cmd_fw_init(struct be_adapter *adapter)
  343. {
  344. u8 *wrb;
  345. int status;
  346. spin_lock(&adapter->mbox_lock);
  347. wrb = (u8 *)wrb_from_mbox(adapter);
  348. *wrb++ = 0xFF;
  349. *wrb++ = 0x12;
  350. *wrb++ = 0x34;
  351. *wrb++ = 0xFF;
  352. *wrb++ = 0xFF;
  353. *wrb++ = 0x56;
  354. *wrb++ = 0x78;
  355. *wrb = 0xFF;
  356. status = be_mbox_notify_wait(adapter);
  357. spin_unlock(&adapter->mbox_lock);
  358. return status;
  359. }
  360. /* Tell fw we're done with firing cmds by writing a
  361. * special pattern across the wrb hdr; uses mbox
  362. */
  363. int be_cmd_fw_clean(struct be_adapter *adapter)
  364. {
  365. u8 *wrb;
  366. int status;
  367. if (adapter->eeh_err)
  368. return -EIO;
  369. spin_lock(&adapter->mbox_lock);
  370. wrb = (u8 *)wrb_from_mbox(adapter);
  371. *wrb++ = 0xFF;
  372. *wrb++ = 0xAA;
  373. *wrb++ = 0xBB;
  374. *wrb++ = 0xFF;
  375. *wrb++ = 0xFF;
  376. *wrb++ = 0xCC;
  377. *wrb++ = 0xDD;
  378. *wrb = 0xFF;
  379. status = be_mbox_notify_wait(adapter);
  380. spin_unlock(&adapter->mbox_lock);
  381. return status;
  382. }
  383. int be_cmd_eq_create(struct be_adapter *adapter,
  384. struct be_queue_info *eq, int eq_delay)
  385. {
  386. struct be_mcc_wrb *wrb;
  387. struct be_cmd_req_eq_create *req;
  388. struct be_dma_mem *q_mem = &eq->dma_mem;
  389. int status;
  390. spin_lock(&adapter->mbox_lock);
  391. wrb = wrb_from_mbox(adapter);
  392. req = embedded_payload(wrb);
  393. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
  394. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  395. OPCODE_COMMON_EQ_CREATE, sizeof(*req));
  396. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  397. AMAP_SET_BITS(struct amap_eq_context, func, req->context,
  398. be_pci_func(adapter));
  399. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  400. /* 4byte eqe*/
  401. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  402. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  403. __ilog2_u32(eq->len/256));
  404. AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
  405. eq_delay_to_mult(eq_delay));
  406. be_dws_cpu_to_le(req->context, sizeof(req->context));
  407. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  408. status = be_mbox_notify_wait(adapter);
  409. if (!status) {
  410. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  411. eq->id = le16_to_cpu(resp->eq_id);
  412. eq->created = true;
  413. }
  414. spin_unlock(&adapter->mbox_lock);
  415. return status;
  416. }
  417. /* Uses mbox */
  418. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  419. u8 type, bool permanent, u32 if_handle)
  420. {
  421. struct be_mcc_wrb *wrb;
  422. struct be_cmd_req_mac_query *req;
  423. int status;
  424. spin_lock(&adapter->mbox_lock);
  425. wrb = wrb_from_mbox(adapter);
  426. req = embedded_payload(wrb);
  427. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  428. OPCODE_COMMON_NTWK_MAC_QUERY);
  429. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  430. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
  431. req->type = type;
  432. if (permanent) {
  433. req->permanent = 1;
  434. } else {
  435. req->if_id = cpu_to_le16((u16) if_handle);
  436. req->permanent = 0;
  437. }
  438. status = be_mbox_notify_wait(adapter);
  439. if (!status) {
  440. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  441. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  442. }
  443. spin_unlock(&adapter->mbox_lock);
  444. return status;
  445. }
  446. /* Uses synchronous MCCQ */
  447. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  448. u32 if_id, u32 *pmac_id)
  449. {
  450. struct be_mcc_wrb *wrb;
  451. struct be_cmd_req_pmac_add *req;
  452. int status;
  453. spin_lock_bh(&adapter->mcc_lock);
  454. wrb = wrb_from_mccq(adapter);
  455. if (!wrb) {
  456. status = -EBUSY;
  457. goto err;
  458. }
  459. req = embedded_payload(wrb);
  460. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  461. OPCODE_COMMON_NTWK_PMAC_ADD);
  462. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  463. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
  464. req->if_id = cpu_to_le32(if_id);
  465. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  466. status = be_mcc_notify_wait(adapter);
  467. if (!status) {
  468. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  469. *pmac_id = le32_to_cpu(resp->pmac_id);
  470. }
  471. err:
  472. spin_unlock_bh(&adapter->mcc_lock);
  473. return status;
  474. }
  475. /* Uses synchronous MCCQ */
  476. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
  477. {
  478. struct be_mcc_wrb *wrb;
  479. struct be_cmd_req_pmac_del *req;
  480. int status;
  481. spin_lock_bh(&adapter->mcc_lock);
  482. wrb = wrb_from_mccq(adapter);
  483. if (!wrb) {
  484. status = -EBUSY;
  485. goto err;
  486. }
  487. req = embedded_payload(wrb);
  488. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  489. OPCODE_COMMON_NTWK_PMAC_DEL);
  490. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  491. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
  492. req->if_id = cpu_to_le32(if_id);
  493. req->pmac_id = cpu_to_le32(pmac_id);
  494. status = be_mcc_notify_wait(adapter);
  495. err:
  496. spin_unlock_bh(&adapter->mcc_lock);
  497. return status;
  498. }
  499. /* Uses Mbox */
  500. int be_cmd_cq_create(struct be_adapter *adapter,
  501. struct be_queue_info *cq, struct be_queue_info *eq,
  502. bool sol_evts, bool no_delay, int coalesce_wm)
  503. {
  504. struct be_mcc_wrb *wrb;
  505. struct be_cmd_req_cq_create *req;
  506. struct be_dma_mem *q_mem = &cq->dma_mem;
  507. void *ctxt;
  508. int status;
  509. spin_lock(&adapter->mbox_lock);
  510. wrb = wrb_from_mbox(adapter);
  511. req = embedded_payload(wrb);
  512. ctxt = &req->context;
  513. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  514. OPCODE_COMMON_CQ_CREATE);
  515. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  516. OPCODE_COMMON_CQ_CREATE, sizeof(*req));
  517. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  518. AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
  519. AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
  520. AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
  521. __ilog2_u32(cq->len/256));
  522. AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
  523. AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
  524. AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
  525. AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
  526. AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
  527. AMAP_SET_BITS(struct amap_cq_context, func, ctxt, be_pci_func(adapter));
  528. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  529. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  530. status = be_mbox_notify_wait(adapter);
  531. if (!status) {
  532. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  533. cq->id = le16_to_cpu(resp->cq_id);
  534. cq->created = true;
  535. }
  536. spin_unlock(&adapter->mbox_lock);
  537. return status;
  538. }
  539. static u32 be_encoded_q_len(int q_len)
  540. {
  541. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  542. if (len_encoded == 16)
  543. len_encoded = 0;
  544. return len_encoded;
  545. }
  546. int be_cmd_mccq_create(struct be_adapter *adapter,
  547. struct be_queue_info *mccq,
  548. struct be_queue_info *cq)
  549. {
  550. struct be_mcc_wrb *wrb;
  551. struct be_cmd_req_mcc_create *req;
  552. struct be_dma_mem *q_mem = &mccq->dma_mem;
  553. void *ctxt;
  554. int status;
  555. spin_lock(&adapter->mbox_lock);
  556. wrb = wrb_from_mbox(adapter);
  557. req = embedded_payload(wrb);
  558. ctxt = &req->context;
  559. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  560. OPCODE_COMMON_MCC_CREATE);
  561. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  562. OPCODE_COMMON_MCC_CREATE, sizeof(*req));
  563. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  564. AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, be_pci_func(adapter));
  565. AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
  566. AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
  567. be_encoded_q_len(mccq->len));
  568. AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
  569. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  570. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  571. status = be_mbox_notify_wait(adapter);
  572. if (!status) {
  573. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  574. mccq->id = le16_to_cpu(resp->id);
  575. mccq->created = true;
  576. }
  577. spin_unlock(&adapter->mbox_lock);
  578. return status;
  579. }
  580. int be_cmd_txq_create(struct be_adapter *adapter,
  581. struct be_queue_info *txq,
  582. struct be_queue_info *cq)
  583. {
  584. struct be_mcc_wrb *wrb;
  585. struct be_cmd_req_eth_tx_create *req;
  586. struct be_dma_mem *q_mem = &txq->dma_mem;
  587. void *ctxt;
  588. int status;
  589. spin_lock(&adapter->mbox_lock);
  590. wrb = wrb_from_mbox(adapter);
  591. req = embedded_payload(wrb);
  592. ctxt = &req->context;
  593. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  594. OPCODE_ETH_TX_CREATE);
  595. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
  596. sizeof(*req));
  597. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  598. req->ulp_num = BE_ULP1_NUM;
  599. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  600. AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
  601. be_encoded_q_len(txq->len));
  602. AMAP_SET_BITS(struct amap_tx_context, pci_func_id, ctxt,
  603. be_pci_func(adapter));
  604. AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
  605. AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
  606. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  607. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  608. status = be_mbox_notify_wait(adapter);
  609. if (!status) {
  610. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
  611. txq->id = le16_to_cpu(resp->cid);
  612. txq->created = true;
  613. }
  614. spin_unlock(&adapter->mbox_lock);
  615. return status;
  616. }
  617. /* Uses mbox */
  618. int be_cmd_rxq_create(struct be_adapter *adapter,
  619. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  620. u16 max_frame_size, u32 if_id, u32 rss)
  621. {
  622. struct be_mcc_wrb *wrb;
  623. struct be_cmd_req_eth_rx_create *req;
  624. struct be_dma_mem *q_mem = &rxq->dma_mem;
  625. int status;
  626. spin_lock(&adapter->mbox_lock);
  627. wrb = wrb_from_mbox(adapter);
  628. req = embedded_payload(wrb);
  629. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  630. OPCODE_ETH_RX_CREATE);
  631. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
  632. sizeof(*req));
  633. req->cq_id = cpu_to_le16(cq_id);
  634. req->frag_size = fls(frag_size) - 1;
  635. req->num_pages = 2;
  636. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  637. req->interface_id = cpu_to_le32(if_id);
  638. req->max_frame_size = cpu_to_le16(max_frame_size);
  639. req->rss_queue = cpu_to_le32(rss);
  640. status = be_mbox_notify_wait(adapter);
  641. if (!status) {
  642. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  643. rxq->id = le16_to_cpu(resp->id);
  644. rxq->created = true;
  645. }
  646. spin_unlock(&adapter->mbox_lock);
  647. return status;
  648. }
  649. /* Generic destroyer function for all types of queues
  650. * Uses Mbox
  651. */
  652. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  653. int queue_type)
  654. {
  655. struct be_mcc_wrb *wrb;
  656. struct be_cmd_req_q_destroy *req;
  657. u8 subsys = 0, opcode = 0;
  658. int status;
  659. if (adapter->eeh_err)
  660. return -EIO;
  661. spin_lock(&adapter->mbox_lock);
  662. wrb = wrb_from_mbox(adapter);
  663. req = embedded_payload(wrb);
  664. switch (queue_type) {
  665. case QTYPE_EQ:
  666. subsys = CMD_SUBSYSTEM_COMMON;
  667. opcode = OPCODE_COMMON_EQ_DESTROY;
  668. break;
  669. case QTYPE_CQ:
  670. subsys = CMD_SUBSYSTEM_COMMON;
  671. opcode = OPCODE_COMMON_CQ_DESTROY;
  672. break;
  673. case QTYPE_TXQ:
  674. subsys = CMD_SUBSYSTEM_ETH;
  675. opcode = OPCODE_ETH_TX_DESTROY;
  676. break;
  677. case QTYPE_RXQ:
  678. subsys = CMD_SUBSYSTEM_ETH;
  679. opcode = OPCODE_ETH_RX_DESTROY;
  680. break;
  681. case QTYPE_MCCQ:
  682. subsys = CMD_SUBSYSTEM_COMMON;
  683. opcode = OPCODE_COMMON_MCC_DESTROY;
  684. break;
  685. default:
  686. BUG();
  687. }
  688. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
  689. be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
  690. req->id = cpu_to_le16(q->id);
  691. status = be_mbox_notify_wait(adapter);
  692. spin_unlock(&adapter->mbox_lock);
  693. return status;
  694. }
  695. /* Create an rx filtering policy configuration on an i/f
  696. * Uses mbox
  697. */
  698. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  699. u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
  700. {
  701. struct be_mcc_wrb *wrb;
  702. struct be_cmd_req_if_create *req;
  703. int status;
  704. spin_lock(&adapter->mbox_lock);
  705. wrb = wrb_from_mbox(adapter);
  706. req = embedded_payload(wrb);
  707. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  708. OPCODE_COMMON_NTWK_INTERFACE_CREATE);
  709. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  710. OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  711. req->capability_flags = cpu_to_le32(cap_flags);
  712. req->enable_flags = cpu_to_le32(en_flags);
  713. req->pmac_invalid = pmac_invalid;
  714. if (!pmac_invalid)
  715. memcpy(req->mac_addr, mac, ETH_ALEN);
  716. status = be_mbox_notify_wait(adapter);
  717. if (!status) {
  718. struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
  719. *if_handle = le32_to_cpu(resp->interface_id);
  720. if (!pmac_invalid)
  721. *pmac_id = le32_to_cpu(resp->pmac_id);
  722. }
  723. spin_unlock(&adapter->mbox_lock);
  724. return status;
  725. }
  726. /* Uses mbox */
  727. int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
  728. {
  729. struct be_mcc_wrb *wrb;
  730. struct be_cmd_req_if_destroy *req;
  731. int status;
  732. if (adapter->eeh_err)
  733. return -EIO;
  734. spin_lock(&adapter->mbox_lock);
  735. wrb = wrb_from_mbox(adapter);
  736. req = embedded_payload(wrb);
  737. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  738. OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
  739. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  740. OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
  741. req->interface_id = cpu_to_le32(interface_id);
  742. status = be_mbox_notify_wait(adapter);
  743. spin_unlock(&adapter->mbox_lock);
  744. return status;
  745. }
  746. /* Get stats is a non embedded command: the request is not embedded inside
  747. * WRB but is a separate dma memory block
  748. * Uses asynchronous MCC
  749. */
  750. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  751. {
  752. struct be_mcc_wrb *wrb;
  753. struct be_cmd_req_get_stats *req;
  754. struct be_sge *sge;
  755. int status = 0;
  756. spin_lock_bh(&adapter->mcc_lock);
  757. wrb = wrb_from_mccq(adapter);
  758. if (!wrb) {
  759. status = -EBUSY;
  760. goto err;
  761. }
  762. req = nonemb_cmd->va;
  763. sge = nonembedded_sgl(wrb);
  764. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  765. OPCODE_ETH_GET_STATISTICS);
  766. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  767. OPCODE_ETH_GET_STATISTICS, sizeof(*req));
  768. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  769. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  770. sge->len = cpu_to_le32(nonemb_cmd->size);
  771. be_mcc_notify(adapter);
  772. err:
  773. spin_unlock_bh(&adapter->mcc_lock);
  774. return status;
  775. }
  776. /* Uses synchronous mcc */
  777. int be_cmd_link_status_query(struct be_adapter *adapter,
  778. bool *link_up, u8 *mac_speed, u16 *link_speed)
  779. {
  780. struct be_mcc_wrb *wrb;
  781. struct be_cmd_req_link_status *req;
  782. int status;
  783. spin_lock_bh(&adapter->mcc_lock);
  784. wrb = wrb_from_mccq(adapter);
  785. if (!wrb) {
  786. status = -EBUSY;
  787. goto err;
  788. }
  789. req = embedded_payload(wrb);
  790. *link_up = false;
  791. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  792. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
  793. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  794. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
  795. status = be_mcc_notify_wait(adapter);
  796. if (!status) {
  797. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  798. if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
  799. *link_up = true;
  800. *link_speed = le16_to_cpu(resp->link_speed);
  801. *mac_speed = resp->mac_speed;
  802. }
  803. }
  804. err:
  805. spin_unlock_bh(&adapter->mcc_lock);
  806. return status;
  807. }
  808. /* Uses Mbox */
  809. int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
  810. {
  811. struct be_mcc_wrb *wrb;
  812. struct be_cmd_req_get_fw_version *req;
  813. int status;
  814. spin_lock(&adapter->mbox_lock);
  815. wrb = wrb_from_mbox(adapter);
  816. req = embedded_payload(wrb);
  817. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  818. OPCODE_COMMON_GET_FW_VERSION);
  819. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  820. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
  821. status = be_mbox_notify_wait(adapter);
  822. if (!status) {
  823. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  824. strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
  825. }
  826. spin_unlock(&adapter->mbox_lock);
  827. return status;
  828. }
  829. /* set the EQ delay interval of an EQ to specified value
  830. * Uses async mcc
  831. */
  832. int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
  833. {
  834. struct be_mcc_wrb *wrb;
  835. struct be_cmd_req_modify_eq_delay *req;
  836. int status = 0;
  837. spin_lock_bh(&adapter->mcc_lock);
  838. wrb = wrb_from_mccq(adapter);
  839. if (!wrb) {
  840. status = -EBUSY;
  841. goto err;
  842. }
  843. req = embedded_payload(wrb);
  844. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  845. OPCODE_COMMON_MODIFY_EQ_DELAY);
  846. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  847. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
  848. req->num_eq = cpu_to_le32(1);
  849. req->delay[0].eq_id = cpu_to_le32(eq_id);
  850. req->delay[0].phase = 0;
  851. req->delay[0].delay_multiplier = cpu_to_le32(eqd);
  852. be_mcc_notify(adapter);
  853. err:
  854. spin_unlock_bh(&adapter->mcc_lock);
  855. return status;
  856. }
  857. /* Uses sycnhronous mcc */
  858. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  859. u32 num, bool untagged, bool promiscuous)
  860. {
  861. struct be_mcc_wrb *wrb;
  862. struct be_cmd_req_vlan_config *req;
  863. int status;
  864. spin_lock_bh(&adapter->mcc_lock);
  865. wrb = wrb_from_mccq(adapter);
  866. if (!wrb) {
  867. status = -EBUSY;
  868. goto err;
  869. }
  870. req = embedded_payload(wrb);
  871. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  872. OPCODE_COMMON_NTWK_VLAN_CONFIG);
  873. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  874. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
  875. req->interface_id = if_id;
  876. req->promiscuous = promiscuous;
  877. req->untagged = untagged;
  878. req->num_vlan = num;
  879. if (!promiscuous) {
  880. memcpy(req->normal_vlan, vtag_array,
  881. req->num_vlan * sizeof(vtag_array[0]));
  882. }
  883. status = be_mcc_notify_wait(adapter);
  884. err:
  885. spin_unlock_bh(&adapter->mcc_lock);
  886. return status;
  887. }
  888. /* Uses MCC for this command as it may be called in BH context
  889. * Uses synchronous mcc
  890. */
  891. int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
  892. {
  893. struct be_mcc_wrb *wrb;
  894. struct be_cmd_req_promiscuous_config *req;
  895. int status;
  896. spin_lock_bh(&adapter->mcc_lock);
  897. wrb = wrb_from_mccq(adapter);
  898. if (!wrb) {
  899. status = -EBUSY;
  900. goto err;
  901. }
  902. req = embedded_payload(wrb);
  903. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
  904. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  905. OPCODE_ETH_PROMISCUOUS, sizeof(*req));
  906. if (port_num)
  907. req->port1_promiscuous = en;
  908. else
  909. req->port0_promiscuous = en;
  910. status = be_mcc_notify_wait(adapter);
  911. err:
  912. spin_unlock_bh(&adapter->mcc_lock);
  913. return status;
  914. }
  915. /*
  916. * Uses MCC for this command as it may be called in BH context
  917. * (mc == NULL) => multicast promiscous
  918. */
  919. int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
  920. struct net_device *netdev, struct be_dma_mem *mem)
  921. {
  922. struct be_mcc_wrb *wrb;
  923. struct be_cmd_req_mcast_mac_config *req = mem->va;
  924. struct be_sge *sge;
  925. int status;
  926. spin_lock_bh(&adapter->mcc_lock);
  927. wrb = wrb_from_mccq(adapter);
  928. if (!wrb) {
  929. status = -EBUSY;
  930. goto err;
  931. }
  932. sge = nonembedded_sgl(wrb);
  933. memset(req, 0, sizeof(*req));
  934. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  935. OPCODE_COMMON_NTWK_MULTICAST_SET);
  936. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  937. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  938. sge->len = cpu_to_le32(mem->size);
  939. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  940. OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
  941. req->interface_id = if_id;
  942. if (netdev) {
  943. int i;
  944. struct dev_mc_list *mc;
  945. req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
  946. i = 0;
  947. netdev_for_each_mc_addr(mc, netdev)
  948. memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
  949. } else {
  950. req->promiscuous = 1;
  951. }
  952. status = be_mcc_notify_wait(adapter);
  953. err:
  954. spin_unlock_bh(&adapter->mcc_lock);
  955. return status;
  956. }
  957. /* Uses synchrounous mcc */
  958. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  959. {
  960. struct be_mcc_wrb *wrb;
  961. struct be_cmd_req_set_flow_control *req;
  962. int status;
  963. spin_lock_bh(&adapter->mcc_lock);
  964. wrb = wrb_from_mccq(adapter);
  965. if (!wrb) {
  966. status = -EBUSY;
  967. goto err;
  968. }
  969. req = embedded_payload(wrb);
  970. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  971. OPCODE_COMMON_SET_FLOW_CONTROL);
  972. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  973. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
  974. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  975. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  976. status = be_mcc_notify_wait(adapter);
  977. err:
  978. spin_unlock_bh(&adapter->mcc_lock);
  979. return status;
  980. }
  981. /* Uses sycn mcc */
  982. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  983. {
  984. struct be_mcc_wrb *wrb;
  985. struct be_cmd_req_get_flow_control *req;
  986. int status;
  987. spin_lock_bh(&adapter->mcc_lock);
  988. wrb = wrb_from_mccq(adapter);
  989. if (!wrb) {
  990. status = -EBUSY;
  991. goto err;
  992. }
  993. req = embedded_payload(wrb);
  994. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  995. OPCODE_COMMON_GET_FLOW_CONTROL);
  996. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  997. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
  998. status = be_mcc_notify_wait(adapter);
  999. if (!status) {
  1000. struct be_cmd_resp_get_flow_control *resp =
  1001. embedded_payload(wrb);
  1002. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1003. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1004. }
  1005. err:
  1006. spin_unlock_bh(&adapter->mcc_lock);
  1007. return status;
  1008. }
  1009. /* Uses mbox */
  1010. int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *cap)
  1011. {
  1012. struct be_mcc_wrb *wrb;
  1013. struct be_cmd_req_query_fw_cfg *req;
  1014. int status;
  1015. spin_lock(&adapter->mbox_lock);
  1016. wrb = wrb_from_mbox(adapter);
  1017. req = embedded_payload(wrb);
  1018. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1019. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
  1020. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1021. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
  1022. status = be_mbox_notify_wait(adapter);
  1023. if (!status) {
  1024. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1025. *port_num = le32_to_cpu(resp->phys_port);
  1026. *cap = le32_to_cpu(resp->function_cap);
  1027. }
  1028. spin_unlock(&adapter->mbox_lock);
  1029. return status;
  1030. }
  1031. /* Uses mbox */
  1032. int be_cmd_reset_function(struct be_adapter *adapter)
  1033. {
  1034. struct be_mcc_wrb *wrb;
  1035. struct be_cmd_req_hdr *req;
  1036. int status;
  1037. spin_lock(&adapter->mbox_lock);
  1038. wrb = wrb_from_mbox(adapter);
  1039. req = embedded_payload(wrb);
  1040. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1041. OPCODE_COMMON_FUNCTION_RESET);
  1042. be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1043. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
  1044. status = be_mbox_notify_wait(adapter);
  1045. spin_unlock(&adapter->mbox_lock);
  1046. return status;
  1047. }
  1048. /* Uses sync mcc */
  1049. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1050. u8 bcn, u8 sts, u8 state)
  1051. {
  1052. struct be_mcc_wrb *wrb;
  1053. struct be_cmd_req_enable_disable_beacon *req;
  1054. int status;
  1055. spin_lock_bh(&adapter->mcc_lock);
  1056. wrb = wrb_from_mccq(adapter);
  1057. if (!wrb) {
  1058. status = -EBUSY;
  1059. goto err;
  1060. }
  1061. req = embedded_payload(wrb);
  1062. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1063. OPCODE_COMMON_ENABLE_DISABLE_BEACON);
  1064. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1065. OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
  1066. req->port_num = port_num;
  1067. req->beacon_state = state;
  1068. req->beacon_duration = bcn;
  1069. req->status_duration = sts;
  1070. status = be_mcc_notify_wait(adapter);
  1071. err:
  1072. spin_unlock_bh(&adapter->mcc_lock);
  1073. return status;
  1074. }
  1075. /* Uses sync mcc */
  1076. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1077. {
  1078. struct be_mcc_wrb *wrb;
  1079. struct be_cmd_req_get_beacon_state *req;
  1080. int status;
  1081. spin_lock_bh(&adapter->mcc_lock);
  1082. wrb = wrb_from_mccq(adapter);
  1083. if (!wrb) {
  1084. status = -EBUSY;
  1085. goto err;
  1086. }
  1087. req = embedded_payload(wrb);
  1088. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1089. OPCODE_COMMON_GET_BEACON_STATE);
  1090. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1091. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
  1092. req->port_num = port_num;
  1093. status = be_mcc_notify_wait(adapter);
  1094. if (!status) {
  1095. struct be_cmd_resp_get_beacon_state *resp =
  1096. embedded_payload(wrb);
  1097. *state = resp->beacon_state;
  1098. }
  1099. err:
  1100. spin_unlock_bh(&adapter->mcc_lock);
  1101. return status;
  1102. }
  1103. /* Uses sync mcc */
  1104. int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
  1105. u8 *connector)
  1106. {
  1107. struct be_mcc_wrb *wrb;
  1108. struct be_cmd_req_port_type *req;
  1109. int status;
  1110. spin_lock_bh(&adapter->mcc_lock);
  1111. wrb = wrb_from_mccq(adapter);
  1112. if (!wrb) {
  1113. status = -EBUSY;
  1114. goto err;
  1115. }
  1116. req = embedded_payload(wrb);
  1117. be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0,
  1118. OPCODE_COMMON_READ_TRANSRECV_DATA);
  1119. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1120. OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
  1121. req->port = cpu_to_le32(port);
  1122. req->page_num = cpu_to_le32(TR_PAGE_A0);
  1123. status = be_mcc_notify_wait(adapter);
  1124. if (!status) {
  1125. struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
  1126. *connector = resp->data.connector;
  1127. }
  1128. err:
  1129. spin_unlock_bh(&adapter->mcc_lock);
  1130. return status;
  1131. }
  1132. int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
  1133. u32 flash_type, u32 flash_opcode, u32 buf_size)
  1134. {
  1135. struct be_mcc_wrb *wrb;
  1136. struct be_cmd_write_flashrom *req;
  1137. struct be_sge *sge;
  1138. int status;
  1139. spin_lock_bh(&adapter->mcc_lock);
  1140. wrb = wrb_from_mccq(adapter);
  1141. if (!wrb) {
  1142. status = -EBUSY;
  1143. goto err;
  1144. }
  1145. req = cmd->va;
  1146. sge = nonembedded_sgl(wrb);
  1147. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1148. OPCODE_COMMON_WRITE_FLASHROM);
  1149. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1150. OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
  1151. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1152. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1153. sge->len = cpu_to_le32(cmd->size);
  1154. req->params.op_type = cpu_to_le32(flash_type);
  1155. req->params.op_code = cpu_to_le32(flash_opcode);
  1156. req->params.data_buf_size = cpu_to_le32(buf_size);
  1157. status = be_mcc_notify_wait(adapter);
  1158. err:
  1159. spin_unlock_bh(&adapter->mcc_lock);
  1160. return status;
  1161. }
  1162. int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  1163. int offset)
  1164. {
  1165. struct be_mcc_wrb *wrb;
  1166. struct be_cmd_write_flashrom *req;
  1167. int status;
  1168. spin_lock_bh(&adapter->mcc_lock);
  1169. wrb = wrb_from_mccq(adapter);
  1170. if (!wrb) {
  1171. status = -EBUSY;
  1172. goto err;
  1173. }
  1174. req = embedded_payload(wrb);
  1175. be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
  1176. OPCODE_COMMON_READ_FLASHROM);
  1177. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1178. OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
  1179. req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
  1180. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  1181. req->params.offset = offset;
  1182. req->params.data_buf_size = 0x4;
  1183. status = be_mcc_notify_wait(adapter);
  1184. if (!status)
  1185. memcpy(flashed_crc, req->params.data_buf, 4);
  1186. err:
  1187. spin_unlock_bh(&adapter->mcc_lock);
  1188. return status;
  1189. }
  1190. extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  1191. struct be_dma_mem *nonemb_cmd)
  1192. {
  1193. struct be_mcc_wrb *wrb;
  1194. struct be_cmd_req_acpi_wol_magic_config *req;
  1195. struct be_sge *sge;
  1196. int status;
  1197. spin_lock_bh(&adapter->mcc_lock);
  1198. wrb = wrb_from_mccq(adapter);
  1199. if (!wrb) {
  1200. status = -EBUSY;
  1201. goto err;
  1202. }
  1203. req = nonemb_cmd->va;
  1204. sge = nonembedded_sgl(wrb);
  1205. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1206. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
  1207. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1208. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
  1209. memcpy(req->magic_mac, mac, ETH_ALEN);
  1210. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1211. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1212. sge->len = cpu_to_le32(nonemb_cmd->size);
  1213. status = be_mcc_notify_wait(adapter);
  1214. err:
  1215. spin_unlock_bh(&adapter->mcc_lock);
  1216. return status;
  1217. }
  1218. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  1219. u8 loopback_type, u8 enable)
  1220. {
  1221. struct be_mcc_wrb *wrb;
  1222. struct be_cmd_req_set_lmode *req;
  1223. int status;
  1224. spin_lock_bh(&adapter->mcc_lock);
  1225. wrb = wrb_from_mccq(adapter);
  1226. if (!wrb) {
  1227. status = -EBUSY;
  1228. goto err;
  1229. }
  1230. req = embedded_payload(wrb);
  1231. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1232. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
  1233. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1234. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  1235. sizeof(*req));
  1236. req->src_port = port_num;
  1237. req->dest_port = port_num;
  1238. req->loopback_type = loopback_type;
  1239. req->loopback_state = enable;
  1240. status = be_mcc_notify_wait(adapter);
  1241. err:
  1242. spin_unlock_bh(&adapter->mcc_lock);
  1243. return status;
  1244. }
  1245. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  1246. u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
  1247. {
  1248. struct be_mcc_wrb *wrb;
  1249. struct be_cmd_req_loopback_test *req;
  1250. int status;
  1251. spin_lock_bh(&adapter->mcc_lock);
  1252. wrb = wrb_from_mccq(adapter);
  1253. if (!wrb) {
  1254. status = -EBUSY;
  1255. goto err;
  1256. }
  1257. req = embedded_payload(wrb);
  1258. be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
  1259. OPCODE_LOWLEVEL_LOOPBACK_TEST);
  1260. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1261. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
  1262. req->hdr.timeout = 4;
  1263. req->pattern = cpu_to_le64(pattern);
  1264. req->src_port = cpu_to_le32(port_num);
  1265. req->dest_port = cpu_to_le32(port_num);
  1266. req->pkt_size = cpu_to_le32(pkt_size);
  1267. req->num_pkts = cpu_to_le32(num_pkts);
  1268. req->loopback_type = cpu_to_le32(loopback_type);
  1269. status = be_mcc_notify_wait(adapter);
  1270. if (!status) {
  1271. struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
  1272. status = le32_to_cpu(resp->status);
  1273. }
  1274. err:
  1275. spin_unlock_bh(&adapter->mcc_lock);
  1276. return status;
  1277. }
  1278. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  1279. u32 byte_cnt, struct be_dma_mem *cmd)
  1280. {
  1281. struct be_mcc_wrb *wrb;
  1282. struct be_cmd_req_ddrdma_test *req;
  1283. struct be_sge *sge;
  1284. int status;
  1285. int i, j = 0;
  1286. spin_lock_bh(&adapter->mcc_lock);
  1287. wrb = wrb_from_mccq(adapter);
  1288. if (!wrb) {
  1289. status = -EBUSY;
  1290. goto err;
  1291. }
  1292. req = cmd->va;
  1293. sge = nonembedded_sgl(wrb);
  1294. be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
  1295. OPCODE_LOWLEVEL_HOST_DDR_DMA);
  1296. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  1297. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
  1298. sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
  1299. sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
  1300. sge->len = cpu_to_le32(cmd->size);
  1301. req->pattern = cpu_to_le64(pattern);
  1302. req->byte_count = cpu_to_le32(byte_cnt);
  1303. for (i = 0; i < byte_cnt; i++) {
  1304. req->snd_buff[i] = (u8)(pattern >> (j*8));
  1305. j++;
  1306. if (j > 7)
  1307. j = 0;
  1308. }
  1309. status = be_mcc_notify_wait(adapter);
  1310. if (!status) {
  1311. struct be_cmd_resp_ddrdma_test *resp;
  1312. resp = cmd->va;
  1313. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  1314. resp->snd_err) {
  1315. status = -1;
  1316. }
  1317. }
  1318. err:
  1319. spin_unlock_bh(&adapter->mcc_lock);
  1320. return status;
  1321. }
  1322. extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  1323. struct be_dma_mem *nonemb_cmd)
  1324. {
  1325. struct be_mcc_wrb *wrb;
  1326. struct be_cmd_req_seeprom_read *req;
  1327. struct be_sge *sge;
  1328. int status;
  1329. spin_lock_bh(&adapter->mcc_lock);
  1330. wrb = wrb_from_mccq(adapter);
  1331. req = nonemb_cmd->va;
  1332. sge = nonembedded_sgl(wrb);
  1333. be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
  1334. OPCODE_COMMON_SEEPROM_READ);
  1335. be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1336. OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
  1337. sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
  1338. sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
  1339. sge->len = cpu_to_le32(nonemb_cmd->size);
  1340. status = be_mcc_notify_wait(adapter);
  1341. spin_unlock_bh(&adapter->mcc_lock);
  1342. return status;
  1343. }