mcdi.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2008-2011 Solarflare Communications Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation, incorporated herein by reference.
  8. */
  9. #include <linux/delay.h>
  10. #include "net_driver.h"
  11. #include "nic.h"
  12. #include "io.h"
  13. #include "farch_regs.h"
  14. #include "mcdi_pcol.h"
  15. #include "phy.h"
  16. /**************************************************************************
  17. *
  18. * Management-Controller-to-Driver Interface
  19. *
  20. **************************************************************************
  21. */
  22. #define MCDI_RPC_TIMEOUT (10 * HZ)
  23. /* A reboot/assertion causes the MCDI status word to be set after the
  24. * command word is set or a REBOOT event is sent. If we notice a reboot
  25. * via these mechanisms then wait 10ms for the status word to be set. */
  26. #define MCDI_STATUS_DELAY_US 100
  27. #define MCDI_STATUS_DELAY_COUNT 100
  28. #define MCDI_STATUS_SLEEP_MS \
  29. (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  30. #define SEQ_MASK \
  31. EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  32. static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
  33. {
  34. EFX_BUG_ON_PARANOID(!efx->mcdi);
  35. return &efx->mcdi->iface;
  36. }
  37. int efx_mcdi_init(struct efx_nic *efx)
  38. {
  39. struct efx_mcdi_iface *mcdi;
  40. efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  41. if (!efx->mcdi)
  42. return -ENOMEM;
  43. mcdi = efx_mcdi(efx);
  44. init_waitqueue_head(&mcdi->wq);
  45. spin_lock_init(&mcdi->iface_lock);
  46. atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
  47. mcdi->mode = MCDI_MODE_POLL;
  48. (void) efx_mcdi_poll_reboot(efx);
  49. /* Recover from a failed assertion before probing */
  50. return efx_mcdi_handle_assertion(efx);
  51. }
  52. void efx_mcdi_fini(struct efx_nic *efx)
  53. {
  54. BUG_ON(efx->mcdi &&
  55. atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
  56. kfree(efx->mcdi);
  57. }
  58. static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
  59. const efx_dword_t *inbuf, size_t inlen)
  60. {
  61. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  62. efx_dword_t hdr[2];
  63. size_t hdr_len;
  64. u32 xflags, seqno;
  65. BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
  66. seqno = mcdi->seqno & SEQ_MASK;
  67. xflags = 0;
  68. if (mcdi->mode == MCDI_MODE_EVENTS)
  69. xflags |= MCDI_HEADER_XFLAGS_EVREQ;
  70. if (efx->type->mcdi_max_ver == 1) {
  71. /* MCDI v1 */
  72. EFX_POPULATE_DWORD_6(hdr[0],
  73. MCDI_HEADER_RESPONSE, 0,
  74. MCDI_HEADER_RESYNC, 1,
  75. MCDI_HEADER_CODE, cmd,
  76. MCDI_HEADER_DATALEN, inlen,
  77. MCDI_HEADER_SEQ, seqno,
  78. MCDI_HEADER_XFLAGS, xflags);
  79. hdr_len = 4;
  80. } else {
  81. /* MCDI v2 */
  82. BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
  83. EFX_POPULATE_DWORD_6(hdr[0],
  84. MCDI_HEADER_RESPONSE, 0,
  85. MCDI_HEADER_RESYNC, 1,
  86. MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
  87. MCDI_HEADER_DATALEN, 0,
  88. MCDI_HEADER_SEQ, seqno,
  89. MCDI_HEADER_XFLAGS, xflags);
  90. EFX_POPULATE_DWORD_2(hdr[1],
  91. MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
  92. MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
  93. hdr_len = 8;
  94. }
  95. efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
  96. }
  97. static void
  98. efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen)
  99. {
  100. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  101. BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
  102. efx->type->mcdi_read_response(efx, outbuf, mcdi->resp_hdr_len, outlen);
  103. }
  104. static int efx_mcdi_errno(unsigned int mcdi_err)
  105. {
  106. switch (mcdi_err) {
  107. case 0:
  108. return 0;
  109. #define TRANSLATE_ERROR(name) \
  110. case MC_CMD_ERR_ ## name: \
  111. return -name;
  112. TRANSLATE_ERROR(EPERM);
  113. TRANSLATE_ERROR(ENOENT);
  114. TRANSLATE_ERROR(EINTR);
  115. TRANSLATE_ERROR(EAGAIN);
  116. TRANSLATE_ERROR(EACCES);
  117. TRANSLATE_ERROR(EBUSY);
  118. TRANSLATE_ERROR(EINVAL);
  119. TRANSLATE_ERROR(EDEADLK);
  120. TRANSLATE_ERROR(ENOSYS);
  121. TRANSLATE_ERROR(ETIME);
  122. TRANSLATE_ERROR(EALREADY);
  123. TRANSLATE_ERROR(ENOSPC);
  124. #undef TRANSLATE_ERROR
  125. case MC_CMD_ERR_ALLOC_FAIL:
  126. return -ENOBUFS;
  127. case MC_CMD_ERR_MAC_EXIST:
  128. return -EADDRINUSE;
  129. default:
  130. return -EPROTO;
  131. }
  132. }
  133. static void efx_mcdi_read_response_header(struct efx_nic *efx)
  134. {
  135. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  136. unsigned int respseq, respcmd, error;
  137. efx_dword_t hdr;
  138. efx->type->mcdi_read_response(efx, &hdr, 0, 4);
  139. respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
  140. respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
  141. error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
  142. if (respcmd != MC_CMD_V2_EXTN) {
  143. mcdi->resp_hdr_len = 4;
  144. mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
  145. } else {
  146. efx->type->mcdi_read_response(efx, &hdr, 4, 4);
  147. mcdi->resp_hdr_len = 8;
  148. mcdi->resp_data_len =
  149. EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
  150. }
  151. if (error && mcdi->resp_data_len == 0) {
  152. netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
  153. mcdi->resprc = -EIO;
  154. } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
  155. netif_err(efx, hw, efx->net_dev,
  156. "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
  157. respseq, mcdi->seqno);
  158. mcdi->resprc = -EIO;
  159. } else if (error) {
  160. efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
  161. mcdi->resprc =
  162. efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
  163. } else {
  164. mcdi->resprc = 0;
  165. }
  166. }
  167. static int efx_mcdi_poll(struct efx_nic *efx)
  168. {
  169. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  170. unsigned long time, finish;
  171. unsigned int spins;
  172. int rc;
  173. /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
  174. rc = efx_mcdi_poll_reboot(efx);
  175. if (rc) {
  176. mcdi->resprc = rc;
  177. mcdi->resp_hdr_len = 0;
  178. mcdi->resp_data_len = 0;
  179. return 0;
  180. }
  181. /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
  182. * because generally mcdi responses are fast. After that, back off
  183. * and poll once a jiffy (approximately)
  184. */
  185. spins = TICK_USEC;
  186. finish = jiffies + MCDI_RPC_TIMEOUT;
  187. while (1) {
  188. if (spins != 0) {
  189. --spins;
  190. udelay(1);
  191. } else {
  192. schedule_timeout_uninterruptible(1);
  193. }
  194. time = jiffies;
  195. rmb();
  196. if (efx->type->mcdi_poll_response(efx))
  197. break;
  198. if (time_after(time, finish))
  199. return -ETIMEDOUT;
  200. }
  201. efx_mcdi_read_response_header(efx);
  202. /* Return rc=0 like wait_event_timeout() */
  203. return 0;
  204. }
  205. /* Test and clear MC-rebooted flag for this port/function; reset
  206. * software state as necessary.
  207. */
  208. int efx_mcdi_poll_reboot(struct efx_nic *efx)
  209. {
  210. int rc;
  211. if (!efx->mcdi)
  212. return 0;
  213. rc = efx->type->mcdi_poll_reboot(efx);
  214. if (!rc)
  215. return 0;
  216. /* MAC statistics have been cleared on the NIC; clear our copy
  217. * so that efx_update_diff_stat() can continue to work.
  218. */
  219. memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
  220. return rc;
  221. }
  222. static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
  223. {
  224. /* Wait until the interface becomes QUIESCENT and we win the race
  225. * to mark it RUNNING. */
  226. wait_event(mcdi->wq,
  227. atomic_cmpxchg(&mcdi->state,
  228. MCDI_STATE_QUIESCENT,
  229. MCDI_STATE_RUNNING)
  230. == MCDI_STATE_QUIESCENT);
  231. }
  232. static int efx_mcdi_await_completion(struct efx_nic *efx)
  233. {
  234. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  235. if (wait_event_timeout(
  236. mcdi->wq,
  237. atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
  238. MCDI_RPC_TIMEOUT) == 0)
  239. return -ETIMEDOUT;
  240. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
  241. * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
  242. * completed the request first, then we'll just end up completing the
  243. * request again, which is safe.
  244. *
  245. * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
  246. * wait_event_timeout() implicitly provides.
  247. */
  248. if (mcdi->mode == MCDI_MODE_POLL)
  249. return efx_mcdi_poll(efx);
  250. return 0;
  251. }
  252. static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
  253. {
  254. /* If the interface is RUNNING, then move to COMPLETED and wake any
  255. * waiters. If the interface isn't in RUNNING then we've received a
  256. * duplicate completion after we've already transitioned back to
  257. * QUIESCENT. [A subsequent invocation would increment seqno, so would
  258. * have failed the seqno check].
  259. */
  260. if (atomic_cmpxchg(&mcdi->state,
  261. MCDI_STATE_RUNNING,
  262. MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
  263. wake_up(&mcdi->wq);
  264. return true;
  265. }
  266. return false;
  267. }
  268. static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
  269. {
  270. atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
  271. wake_up(&mcdi->wq);
  272. }
  273. static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
  274. unsigned int datalen, unsigned int mcdi_err)
  275. {
  276. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  277. bool wake = false;
  278. spin_lock(&mcdi->iface_lock);
  279. if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
  280. if (mcdi->credits)
  281. /* The request has been cancelled */
  282. --mcdi->credits;
  283. else
  284. netif_err(efx, hw, efx->net_dev,
  285. "MC response mismatch tx seq 0x%x rx "
  286. "seq 0x%x\n", seqno, mcdi->seqno);
  287. } else {
  288. if (efx->type->mcdi_max_ver >= 2) {
  289. /* MCDI v2 responses don't fit in an event */
  290. efx_mcdi_read_response_header(efx);
  291. } else {
  292. mcdi->resprc = efx_mcdi_errno(mcdi_err);
  293. mcdi->resp_hdr_len = 4;
  294. mcdi->resp_data_len = datalen;
  295. }
  296. wake = true;
  297. }
  298. spin_unlock(&mcdi->iface_lock);
  299. if (wake)
  300. efx_mcdi_complete(mcdi);
  301. }
  302. int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
  303. const efx_dword_t *inbuf, size_t inlen,
  304. efx_dword_t *outbuf, size_t outlen,
  305. size_t *outlen_actual)
  306. {
  307. int rc;
  308. rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
  309. if (rc)
  310. return rc;
  311. return efx_mcdi_rpc_finish(efx, cmd, inlen,
  312. outbuf, outlen, outlen_actual);
  313. }
  314. int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
  315. const efx_dword_t *inbuf, size_t inlen)
  316. {
  317. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  318. if (efx->type->mcdi_max_ver < 0 ||
  319. (efx->type->mcdi_max_ver < 2 &&
  320. cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
  321. return -EINVAL;
  322. if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
  323. (efx->type->mcdi_max_ver < 2 &&
  324. inlen > MCDI_CTL_SDU_LEN_MAX_V1))
  325. return -EMSGSIZE;
  326. efx_mcdi_acquire(mcdi);
  327. /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
  328. spin_lock_bh(&mcdi->iface_lock);
  329. ++mcdi->seqno;
  330. spin_unlock_bh(&mcdi->iface_lock);
  331. efx_mcdi_copyin(efx, cmd, inbuf, inlen);
  332. return 0;
  333. }
  334. int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
  335. efx_dword_t *outbuf, size_t outlen,
  336. size_t *outlen_actual)
  337. {
  338. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  339. int rc;
  340. if (mcdi->mode == MCDI_MODE_POLL)
  341. rc = efx_mcdi_poll(efx);
  342. else
  343. rc = efx_mcdi_await_completion(efx);
  344. if (rc != 0) {
  345. /* Close the race with efx_mcdi_ev_cpl() executing just too late
  346. * and completing a request we've just cancelled, by ensuring
  347. * that the seqno check therein fails.
  348. */
  349. spin_lock_bh(&mcdi->iface_lock);
  350. ++mcdi->seqno;
  351. ++mcdi->credits;
  352. spin_unlock_bh(&mcdi->iface_lock);
  353. netif_err(efx, hw, efx->net_dev,
  354. "MC command 0x%x inlen %d mode %d timed out\n",
  355. cmd, (int)inlen, mcdi->mode);
  356. } else {
  357. size_t resplen;
  358. /* At the very least we need a memory barrier here to ensure
  359. * we pick up changes from efx_mcdi_ev_cpl(). Protect against
  360. * a spurious efx_mcdi_ev_cpl() running concurrently by
  361. * acquiring the iface_lock. */
  362. spin_lock_bh(&mcdi->iface_lock);
  363. rc = mcdi->resprc;
  364. resplen = mcdi->resp_data_len;
  365. spin_unlock_bh(&mcdi->iface_lock);
  366. BUG_ON(rc > 0);
  367. if (rc == 0) {
  368. efx_mcdi_copyout(efx, outbuf,
  369. min(outlen, mcdi->resp_data_len));
  370. if (outlen_actual != NULL)
  371. *outlen_actual = resplen;
  372. } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
  373. ; /* Don't reset if MC_CMD_REBOOT returns EIO */
  374. else if (rc == -EIO || rc == -EINTR) {
  375. netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
  376. -rc);
  377. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  378. } else
  379. netif_dbg(efx, hw, efx->net_dev,
  380. "MC command 0x%x inlen %d failed rc=%d\n",
  381. cmd, (int)inlen, -rc);
  382. if (rc == -EIO || rc == -EINTR) {
  383. msleep(MCDI_STATUS_SLEEP_MS);
  384. efx_mcdi_poll_reboot(efx);
  385. }
  386. }
  387. efx_mcdi_release(mcdi);
  388. return rc;
  389. }
  390. void efx_mcdi_mode_poll(struct efx_nic *efx)
  391. {
  392. struct efx_mcdi_iface *mcdi;
  393. if (!efx->mcdi)
  394. return;
  395. mcdi = efx_mcdi(efx);
  396. if (mcdi->mode == MCDI_MODE_POLL)
  397. return;
  398. /* We can switch from event completion to polled completion, because
  399. * mcdi requests are always completed in shared memory. We do this by
  400. * switching the mode to POLL'd then completing the request.
  401. * efx_mcdi_await_completion() will then call efx_mcdi_poll().
  402. *
  403. * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
  404. * which efx_mcdi_complete() provides for us.
  405. */
  406. mcdi->mode = MCDI_MODE_POLL;
  407. efx_mcdi_complete(mcdi);
  408. }
  409. void efx_mcdi_mode_event(struct efx_nic *efx)
  410. {
  411. struct efx_mcdi_iface *mcdi;
  412. if (!efx->mcdi)
  413. return;
  414. mcdi = efx_mcdi(efx);
  415. if (mcdi->mode == MCDI_MODE_EVENTS)
  416. return;
  417. /* We can't switch from polled to event completion in the middle of a
  418. * request, because the completion method is specified in the request.
  419. * So acquire the interface to serialise the requestors. We don't need
  420. * to acquire the iface_lock to change the mode here, but we do need a
  421. * write memory barrier ensure that efx_mcdi_rpc() sees it, which
  422. * efx_mcdi_acquire() provides.
  423. */
  424. efx_mcdi_acquire(mcdi);
  425. mcdi->mode = MCDI_MODE_EVENTS;
  426. efx_mcdi_release(mcdi);
  427. }
  428. static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
  429. {
  430. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  431. /* If there is an outstanding MCDI request, it has been terminated
  432. * either by a BADASSERT or REBOOT event. If the mcdi interface is
  433. * in polled mode, then do nothing because the MC reboot handler will
  434. * set the header correctly. However, if the mcdi interface is waiting
  435. * for a CMDDONE event it won't receive it [and since all MCDI events
  436. * are sent to the same queue, we can't be racing with
  437. * efx_mcdi_ev_cpl()]
  438. *
  439. * There's a race here with efx_mcdi_rpc(), because we might receive
  440. * a REBOOT event *before* the request has been copied out. In polled
  441. * mode (during startup) this is irrelevant, because efx_mcdi_complete()
  442. * is ignored. In event mode, this condition is just an edge-case of
  443. * receiving a REBOOT event after posting the MCDI request. Did the mc
  444. * reboot before or after the copyout? The best we can do always is
  445. * just return failure.
  446. */
  447. spin_lock(&mcdi->iface_lock);
  448. if (efx_mcdi_complete(mcdi)) {
  449. if (mcdi->mode == MCDI_MODE_EVENTS) {
  450. mcdi->resprc = rc;
  451. mcdi->resp_hdr_len = 0;
  452. mcdi->resp_data_len = 0;
  453. ++mcdi->credits;
  454. }
  455. } else {
  456. int count;
  457. /* Nobody was waiting for an MCDI request, so trigger a reset */
  458. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  459. /* Consume the status word since efx_mcdi_rpc_finish() won't */
  460. for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
  461. if (efx_mcdi_poll_reboot(efx))
  462. break;
  463. udelay(MCDI_STATUS_DELAY_US);
  464. }
  465. }
  466. spin_unlock(&mcdi->iface_lock);
  467. }
  468. /* Called from falcon_process_eventq for MCDI events */
  469. void efx_mcdi_process_event(struct efx_channel *channel,
  470. efx_qword_t *event)
  471. {
  472. struct efx_nic *efx = channel->efx;
  473. int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
  474. u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
  475. switch (code) {
  476. case MCDI_EVENT_CODE_BADSSERT:
  477. netif_err(efx, hw, efx->net_dev,
  478. "MC watchdog or assertion failure at 0x%x\n", data);
  479. efx_mcdi_ev_death(efx, -EINTR);
  480. break;
  481. case MCDI_EVENT_CODE_PMNOTICE:
  482. netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
  483. break;
  484. case MCDI_EVENT_CODE_CMDDONE:
  485. efx_mcdi_ev_cpl(efx,
  486. MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
  487. MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
  488. MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
  489. break;
  490. case MCDI_EVENT_CODE_LINKCHANGE:
  491. efx_mcdi_process_link_change(efx, event);
  492. break;
  493. case MCDI_EVENT_CODE_SENSOREVT:
  494. efx_mcdi_sensor_event(efx, event);
  495. break;
  496. case MCDI_EVENT_CODE_SCHEDERR:
  497. netif_info(efx, hw, efx->net_dev,
  498. "MC Scheduler error address=0x%x\n", data);
  499. break;
  500. case MCDI_EVENT_CODE_REBOOT:
  501. netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
  502. efx_mcdi_ev_death(efx, -EIO);
  503. break;
  504. case MCDI_EVENT_CODE_MAC_STATS_DMA:
  505. /* MAC stats are gather lazily. We can ignore this. */
  506. break;
  507. case MCDI_EVENT_CODE_FLR:
  508. efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
  509. break;
  510. case MCDI_EVENT_CODE_PTP_RX:
  511. case MCDI_EVENT_CODE_PTP_FAULT:
  512. case MCDI_EVENT_CODE_PTP_PPS:
  513. efx_ptp_event(efx, event);
  514. break;
  515. default:
  516. netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
  517. code);
  518. }
  519. }
  520. /**************************************************************************
  521. *
  522. * Specific request functions
  523. *
  524. **************************************************************************
  525. */
  526. void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
  527. {
  528. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
  529. size_t outlength;
  530. const __le16 *ver_words;
  531. int rc;
  532. BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
  533. rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
  534. outbuf, sizeof(outbuf), &outlength);
  535. if (rc)
  536. goto fail;
  537. if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
  538. rc = -EIO;
  539. goto fail;
  540. }
  541. ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
  542. snprintf(buf, len, "%u.%u.%u.%u",
  543. le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
  544. le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
  545. return;
  546. fail:
  547. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  548. buf[0] = 0;
  549. }
  550. int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  551. bool *was_attached)
  552. {
  553. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
  554. MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
  555. size_t outlen;
  556. int rc;
  557. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
  558. driver_operating ? 1 : 0);
  559. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
  560. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
  561. rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
  562. outbuf, sizeof(outbuf), &outlen);
  563. if (rc)
  564. goto fail;
  565. if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
  566. rc = -EIO;
  567. goto fail;
  568. }
  569. if (was_attached != NULL)
  570. *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
  571. return 0;
  572. fail:
  573. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  574. return rc;
  575. }
  576. int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
  577. u16 *fw_subtype_list, u32 *capabilities)
  578. {
  579. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
  580. size_t outlen, i;
  581. int port_num = efx_port_num(efx);
  582. int rc;
  583. BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
  584. rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
  585. outbuf, sizeof(outbuf), &outlen);
  586. if (rc)
  587. goto fail;
  588. if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
  589. rc = -EIO;
  590. goto fail;
  591. }
  592. if (mac_address)
  593. memcpy(mac_address,
  594. port_num ?
  595. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
  596. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
  597. ETH_ALEN);
  598. if (fw_subtype_list) {
  599. for (i = 0;
  600. i < MCDI_VAR_ARRAY_LEN(outlen,
  601. GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
  602. i++)
  603. fw_subtype_list[i] = MCDI_ARRAY_WORD(
  604. outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
  605. for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
  606. fw_subtype_list[i] = 0;
  607. }
  608. if (capabilities) {
  609. if (port_num)
  610. *capabilities = MCDI_DWORD(outbuf,
  611. GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
  612. else
  613. *capabilities = MCDI_DWORD(outbuf,
  614. GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
  615. }
  616. return 0;
  617. fail:
  618. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
  619. __func__, rc, (int)outlen);
  620. return rc;
  621. }
  622. int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
  623. {
  624. MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
  625. u32 dest = 0;
  626. int rc;
  627. if (uart)
  628. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
  629. if (evq)
  630. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
  631. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
  632. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
  633. BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
  634. rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
  635. NULL, 0, NULL);
  636. if (rc)
  637. goto fail;
  638. return 0;
  639. fail:
  640. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  641. return rc;
  642. }
  643. int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
  644. {
  645. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
  646. size_t outlen;
  647. int rc;
  648. BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
  649. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
  650. outbuf, sizeof(outbuf), &outlen);
  651. if (rc)
  652. goto fail;
  653. if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
  654. rc = -EIO;
  655. goto fail;
  656. }
  657. *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
  658. return 0;
  659. fail:
  660. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  661. __func__, rc);
  662. return rc;
  663. }
  664. int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
  665. size_t *size_out, size_t *erase_size_out,
  666. bool *protected_out)
  667. {
  668. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
  669. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
  670. size_t outlen;
  671. int rc;
  672. MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
  673. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
  674. outbuf, sizeof(outbuf), &outlen);
  675. if (rc)
  676. goto fail;
  677. if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
  678. rc = -EIO;
  679. goto fail;
  680. }
  681. *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
  682. *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
  683. *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
  684. (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
  685. return 0;
  686. fail:
  687. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  688. return rc;
  689. }
  690. int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
  691. {
  692. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
  693. int rc;
  694. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
  695. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
  696. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
  697. NULL, 0, NULL);
  698. if (rc)
  699. goto fail;
  700. return 0;
  701. fail:
  702. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  703. return rc;
  704. }
  705. int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
  706. loff_t offset, u8 *buffer, size_t length)
  707. {
  708. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
  709. MCDI_DECLARE_BUF(outbuf,
  710. MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  711. size_t outlen;
  712. int rc;
  713. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
  714. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
  715. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
  716. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
  717. outbuf, sizeof(outbuf), &outlen);
  718. if (rc)
  719. goto fail;
  720. memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
  721. return 0;
  722. fail:
  723. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  724. return rc;
  725. }
  726. int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
  727. loff_t offset, const u8 *buffer, size_t length)
  728. {
  729. MCDI_DECLARE_BUF(inbuf,
  730. MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  731. int rc;
  732. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
  733. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
  734. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
  735. memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
  736. BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
  737. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
  738. ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
  739. NULL, 0, NULL);
  740. if (rc)
  741. goto fail;
  742. return 0;
  743. fail:
  744. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  745. return rc;
  746. }
  747. int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
  748. loff_t offset, size_t length)
  749. {
  750. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
  751. int rc;
  752. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
  753. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
  754. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
  755. BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
  756. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
  757. NULL, 0, NULL);
  758. if (rc)
  759. goto fail;
  760. return 0;
  761. fail:
  762. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  763. return rc;
  764. }
  765. int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
  766. {
  767. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
  768. int rc;
  769. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
  770. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
  771. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
  772. NULL, 0, NULL);
  773. if (rc)
  774. goto fail;
  775. return 0;
  776. fail:
  777. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  778. return rc;
  779. }
  780. static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
  781. {
  782. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
  783. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
  784. int rc;
  785. MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
  786. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
  787. outbuf, sizeof(outbuf), NULL);
  788. if (rc)
  789. return rc;
  790. switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
  791. case MC_CMD_NVRAM_TEST_PASS:
  792. case MC_CMD_NVRAM_TEST_NOTSUPP:
  793. return 0;
  794. default:
  795. return -EIO;
  796. }
  797. }
  798. int efx_mcdi_nvram_test_all(struct efx_nic *efx)
  799. {
  800. u32 nvram_types;
  801. unsigned int type;
  802. int rc;
  803. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  804. if (rc)
  805. goto fail1;
  806. type = 0;
  807. while (nvram_types != 0) {
  808. if (nvram_types & 1) {
  809. rc = efx_mcdi_nvram_test(efx, type);
  810. if (rc)
  811. goto fail2;
  812. }
  813. type++;
  814. nvram_types >>= 1;
  815. }
  816. return 0;
  817. fail2:
  818. netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
  819. __func__, type);
  820. fail1:
  821. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  822. return rc;
  823. }
  824. static int efx_mcdi_read_assertion(struct efx_nic *efx)
  825. {
  826. MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
  827. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
  828. unsigned int flags, index;
  829. const char *reason;
  830. size_t outlen;
  831. int retry;
  832. int rc;
  833. /* Attempt to read any stored assertion state before we reboot
  834. * the mcfw out of the assertion handler. Retry twice, once
  835. * because a boot-time assertion might cause this command to fail
  836. * with EINTR. And once again because GET_ASSERTS can race with
  837. * MC_CMD_REBOOT running on the other port. */
  838. retry = 2;
  839. do {
  840. MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
  841. rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
  842. inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
  843. outbuf, sizeof(outbuf), &outlen);
  844. } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
  845. if (rc)
  846. return rc;
  847. if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
  848. return -EIO;
  849. /* Print out any recorded assertion state */
  850. flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
  851. if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
  852. return 0;
  853. reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
  854. ? "system-level assertion"
  855. : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
  856. ? "thread-level assertion"
  857. : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
  858. ? "watchdog reset"
  859. : "unknown assertion";
  860. netif_err(efx, hw, efx->net_dev,
  861. "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
  862. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
  863. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
  864. /* Print out the registers */
  865. for (index = 0;
  866. index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
  867. index++)
  868. netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
  869. 1 + index,
  870. MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
  871. index));
  872. return 0;
  873. }
  874. static void efx_mcdi_exit_assertion(struct efx_nic *efx)
  875. {
  876. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  877. /* If the MC is running debug firmware, it might now be
  878. * waiting for a debugger to attach, but we just want it to
  879. * reboot. We set a flag that makes the command a no-op if it
  880. * has already done so. We don't know what return code to
  881. * expect (0 or -EIO), so ignore it.
  882. */
  883. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  884. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
  885. MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
  886. (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
  887. NULL, 0, NULL);
  888. }
  889. int efx_mcdi_handle_assertion(struct efx_nic *efx)
  890. {
  891. int rc;
  892. rc = efx_mcdi_read_assertion(efx);
  893. if (rc)
  894. return rc;
  895. efx_mcdi_exit_assertion(efx);
  896. return 0;
  897. }
  898. void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
  899. {
  900. MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
  901. int rc;
  902. BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
  903. BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
  904. BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
  905. BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
  906. MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
  907. rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
  908. NULL, 0, NULL);
  909. if (rc)
  910. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  911. __func__, rc);
  912. }
  913. static int efx_mcdi_reset_port(struct efx_nic *efx)
  914. {
  915. int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
  916. if (rc)
  917. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  918. __func__, rc);
  919. return rc;
  920. }
  921. static int efx_mcdi_reset_mc(struct efx_nic *efx)
  922. {
  923. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  924. int rc;
  925. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  926. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
  927. rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
  928. NULL, 0, NULL);
  929. /* White is black, and up is down */
  930. if (rc == -EIO)
  931. return 0;
  932. if (rc == 0)
  933. rc = -EIO;
  934. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  935. return rc;
  936. }
  937. enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
  938. {
  939. return RESET_TYPE_RECOVER_OR_ALL;
  940. }
  941. int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
  942. {
  943. int rc;
  944. /* Recover from a failed assertion pre-reset */
  945. rc = efx_mcdi_handle_assertion(efx);
  946. if (rc)
  947. return rc;
  948. if (method == RESET_TYPE_WORLD)
  949. return efx_mcdi_reset_mc(efx);
  950. else
  951. return efx_mcdi_reset_port(efx);
  952. }
  953. static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
  954. const u8 *mac, int *id_out)
  955. {
  956. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
  957. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
  958. size_t outlen;
  959. int rc;
  960. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
  961. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
  962. MC_CMD_FILTER_MODE_SIMPLE);
  963. memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
  964. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
  965. outbuf, sizeof(outbuf), &outlen);
  966. if (rc)
  967. goto fail;
  968. if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
  969. rc = -EIO;
  970. goto fail;
  971. }
  972. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
  973. return 0;
  974. fail:
  975. *id_out = -1;
  976. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  977. return rc;
  978. }
  979. int
  980. efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
  981. {
  982. return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
  983. }
  984. int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
  985. {
  986. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
  987. size_t outlen;
  988. int rc;
  989. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
  990. outbuf, sizeof(outbuf), &outlen);
  991. if (rc)
  992. goto fail;
  993. if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
  994. rc = -EIO;
  995. goto fail;
  996. }
  997. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
  998. return 0;
  999. fail:
  1000. *id_out = -1;
  1001. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1002. return rc;
  1003. }
  1004. int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
  1005. {
  1006. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
  1007. int rc;
  1008. MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
  1009. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
  1010. NULL, 0, NULL);
  1011. if (rc)
  1012. goto fail;
  1013. return 0;
  1014. fail:
  1015. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1016. return rc;
  1017. }
  1018. int efx_mcdi_flush_rxqs(struct efx_nic *efx)
  1019. {
  1020. struct efx_channel *channel;
  1021. struct efx_rx_queue *rx_queue;
  1022. MCDI_DECLARE_BUF(inbuf,
  1023. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
  1024. int rc, count;
  1025. BUILD_BUG_ON(EFX_MAX_CHANNELS >
  1026. MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
  1027. count = 0;
  1028. efx_for_each_channel(channel, efx) {
  1029. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1030. if (rx_queue->flush_pending) {
  1031. rx_queue->flush_pending = false;
  1032. atomic_dec(&efx->rxq_flush_pending);
  1033. MCDI_SET_ARRAY_DWORD(
  1034. inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
  1035. count, efx_rx_queue_index(rx_queue));
  1036. count++;
  1037. }
  1038. }
  1039. }
  1040. rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
  1041. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
  1042. WARN_ON(rc < 0);
  1043. return rc;
  1044. }
  1045. int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
  1046. {
  1047. int rc;
  1048. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
  1049. if (rc)
  1050. goto fail;
  1051. return 0;
  1052. fail:
  1053. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1054. return rc;
  1055. }