mcdi.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2008-2011 Solarflare Communications Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation, incorporated herein by reference.
  8. */
  9. #include <linux/delay.h>
  10. #include <asm/cmpxchg.h>
  11. #include "net_driver.h"
  12. #include "nic.h"
  13. #include "io.h"
  14. #include "farch_regs.h"
  15. #include "mcdi_pcol.h"
  16. #include "phy.h"
  17. /**************************************************************************
  18. *
  19. * Management-Controller-to-Driver Interface
  20. *
  21. **************************************************************************
  22. */
  23. #define MCDI_RPC_TIMEOUT (10 * HZ)
  24. /* A reboot/assertion causes the MCDI status word to be set after the
  25. * command word is set or a REBOOT event is sent. If we notice a reboot
  26. * via these mechanisms then wait 20ms for the status word to be set.
  27. */
  28. #define MCDI_STATUS_DELAY_US 100
  29. #define MCDI_STATUS_DELAY_COUNT 200
  30. #define MCDI_STATUS_SLEEP_MS \
  31. (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  32. #define SEQ_MASK \
  33. EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  34. static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
  35. {
  36. EFX_BUG_ON_PARANOID(!efx->mcdi);
  37. return &efx->mcdi->iface;
  38. }
  39. int efx_mcdi_init(struct efx_nic *efx)
  40. {
  41. struct efx_mcdi_iface *mcdi;
  42. efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  43. if (!efx->mcdi)
  44. return -ENOMEM;
  45. mcdi = efx_mcdi(efx);
  46. init_waitqueue_head(&mcdi->wq);
  47. spin_lock_init(&mcdi->iface_lock);
  48. mcdi->state = MCDI_STATE_QUIESCENT;
  49. mcdi->mode = MCDI_MODE_POLL;
  50. (void) efx_mcdi_poll_reboot(efx);
  51. mcdi->new_epoch = true;
  52. /* Recover from a failed assertion before probing */
  53. return efx_mcdi_handle_assertion(efx);
  54. }
  55. void efx_mcdi_fini(struct efx_nic *efx)
  56. {
  57. BUG_ON(efx->mcdi && efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
  58. kfree(efx->mcdi);
  59. }
  60. static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
  61. const efx_dword_t *inbuf, size_t inlen)
  62. {
  63. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  64. efx_dword_t hdr[2];
  65. size_t hdr_len;
  66. u32 xflags, seqno;
  67. BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
  68. /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
  69. spin_lock_bh(&mcdi->iface_lock);
  70. ++mcdi->seqno;
  71. spin_unlock_bh(&mcdi->iface_lock);
  72. seqno = mcdi->seqno & SEQ_MASK;
  73. xflags = 0;
  74. if (mcdi->mode == MCDI_MODE_EVENTS)
  75. xflags |= MCDI_HEADER_XFLAGS_EVREQ;
  76. if (efx->type->mcdi_max_ver == 1) {
  77. /* MCDI v1 */
  78. EFX_POPULATE_DWORD_7(hdr[0],
  79. MCDI_HEADER_RESPONSE, 0,
  80. MCDI_HEADER_RESYNC, 1,
  81. MCDI_HEADER_CODE, cmd,
  82. MCDI_HEADER_DATALEN, inlen,
  83. MCDI_HEADER_SEQ, seqno,
  84. MCDI_HEADER_XFLAGS, xflags,
  85. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  86. hdr_len = 4;
  87. } else {
  88. /* MCDI v2 */
  89. BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
  90. EFX_POPULATE_DWORD_7(hdr[0],
  91. MCDI_HEADER_RESPONSE, 0,
  92. MCDI_HEADER_RESYNC, 1,
  93. MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
  94. MCDI_HEADER_DATALEN, 0,
  95. MCDI_HEADER_SEQ, seqno,
  96. MCDI_HEADER_XFLAGS, xflags,
  97. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  98. EFX_POPULATE_DWORD_2(hdr[1],
  99. MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
  100. MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
  101. hdr_len = 8;
  102. }
  103. efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
  104. mcdi->new_epoch = false;
  105. }
  106. static int efx_mcdi_errno(unsigned int mcdi_err)
  107. {
  108. switch (mcdi_err) {
  109. case 0:
  110. return 0;
  111. #define TRANSLATE_ERROR(name) \
  112. case MC_CMD_ERR_ ## name: \
  113. return -name;
  114. TRANSLATE_ERROR(EPERM);
  115. TRANSLATE_ERROR(ENOENT);
  116. TRANSLATE_ERROR(EINTR);
  117. TRANSLATE_ERROR(EAGAIN);
  118. TRANSLATE_ERROR(EACCES);
  119. TRANSLATE_ERROR(EBUSY);
  120. TRANSLATE_ERROR(EINVAL);
  121. TRANSLATE_ERROR(EDEADLK);
  122. TRANSLATE_ERROR(ENOSYS);
  123. TRANSLATE_ERROR(ETIME);
  124. TRANSLATE_ERROR(EALREADY);
  125. TRANSLATE_ERROR(ENOSPC);
  126. #undef TRANSLATE_ERROR
  127. case MC_CMD_ERR_ALLOC_FAIL:
  128. return -ENOBUFS;
  129. case MC_CMD_ERR_MAC_EXIST:
  130. return -EADDRINUSE;
  131. default:
  132. return -EPROTO;
  133. }
  134. }
  135. static void efx_mcdi_read_response_header(struct efx_nic *efx)
  136. {
  137. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  138. unsigned int respseq, respcmd, error;
  139. efx_dword_t hdr;
  140. efx->type->mcdi_read_response(efx, &hdr, 0, 4);
  141. respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
  142. respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
  143. error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
  144. if (respcmd != MC_CMD_V2_EXTN) {
  145. mcdi->resp_hdr_len = 4;
  146. mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
  147. } else {
  148. efx->type->mcdi_read_response(efx, &hdr, 4, 4);
  149. mcdi->resp_hdr_len = 8;
  150. mcdi->resp_data_len =
  151. EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
  152. }
  153. if (error && mcdi->resp_data_len == 0) {
  154. netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
  155. mcdi->resprc = -EIO;
  156. } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
  157. netif_err(efx, hw, efx->net_dev,
  158. "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
  159. respseq, mcdi->seqno);
  160. mcdi->resprc = -EIO;
  161. } else if (error) {
  162. efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
  163. mcdi->resprc =
  164. efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
  165. } else {
  166. mcdi->resprc = 0;
  167. }
  168. }
  169. static int efx_mcdi_poll(struct efx_nic *efx)
  170. {
  171. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  172. unsigned long time, finish;
  173. unsigned int spins;
  174. int rc;
  175. /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
  176. rc = efx_mcdi_poll_reboot(efx);
  177. if (rc) {
  178. spin_lock_bh(&mcdi->iface_lock);
  179. mcdi->resprc = rc;
  180. mcdi->resp_hdr_len = 0;
  181. mcdi->resp_data_len = 0;
  182. spin_unlock_bh(&mcdi->iface_lock);
  183. return 0;
  184. }
  185. /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
  186. * because generally mcdi responses are fast. After that, back off
  187. * and poll once a jiffy (approximately)
  188. */
  189. spins = TICK_USEC;
  190. finish = jiffies + MCDI_RPC_TIMEOUT;
  191. while (1) {
  192. if (spins != 0) {
  193. --spins;
  194. udelay(1);
  195. } else {
  196. schedule_timeout_uninterruptible(1);
  197. }
  198. time = jiffies;
  199. rmb();
  200. if (efx->type->mcdi_poll_response(efx))
  201. break;
  202. if (time_after(time, finish))
  203. return -ETIMEDOUT;
  204. }
  205. spin_lock_bh(&mcdi->iface_lock);
  206. efx_mcdi_read_response_header(efx);
  207. spin_unlock_bh(&mcdi->iface_lock);
  208. /* Return rc=0 like wait_event_timeout() */
  209. return 0;
  210. }
  211. /* Test and clear MC-rebooted flag for this port/function; reset
  212. * software state as necessary.
  213. */
  214. int efx_mcdi_poll_reboot(struct efx_nic *efx)
  215. {
  216. if (!efx->mcdi)
  217. return 0;
  218. return efx->type->mcdi_poll_reboot(efx);
  219. }
  220. static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
  221. {
  222. /* Wait until the interface becomes QUIESCENT and we win the race
  223. * to mark it RUNNING. */
  224. wait_event(mcdi->wq,
  225. cmpxchg(&mcdi->state,
  226. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING) ==
  227. MCDI_STATE_QUIESCENT);
  228. }
  229. static int efx_mcdi_await_completion(struct efx_nic *efx)
  230. {
  231. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  232. if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
  233. MCDI_RPC_TIMEOUT) == 0)
  234. return -ETIMEDOUT;
  235. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
  236. * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
  237. * completed the request first, then we'll just end up completing the
  238. * request again, which is safe.
  239. *
  240. * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
  241. * wait_event_timeout() implicitly provides.
  242. */
  243. if (mcdi->mode == MCDI_MODE_POLL)
  244. return efx_mcdi_poll(efx);
  245. return 0;
  246. }
  247. static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
  248. {
  249. /* If the interface is RUNNING, then move to COMPLETED and wake any
  250. * waiters. If the interface isn't in RUNNING then we've received a
  251. * duplicate completion after we've already transitioned back to
  252. * QUIESCENT. [A subsequent invocation would increment seqno, so would
  253. * have failed the seqno check].
  254. */
  255. if (cmpxchg(&mcdi->state, MCDI_STATE_RUNNING, MCDI_STATE_COMPLETED) ==
  256. MCDI_STATE_RUNNING) {
  257. wake_up(&mcdi->wq);
  258. return true;
  259. }
  260. return false;
  261. }
  262. static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
  263. {
  264. mcdi->state = MCDI_STATE_QUIESCENT;
  265. wake_up(&mcdi->wq);
  266. }
  267. static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
  268. unsigned int datalen, unsigned int mcdi_err)
  269. {
  270. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  271. bool wake = false;
  272. spin_lock(&mcdi->iface_lock);
  273. if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
  274. if (mcdi->credits)
  275. /* The request has been cancelled */
  276. --mcdi->credits;
  277. else
  278. netif_err(efx, hw, efx->net_dev,
  279. "MC response mismatch tx seq 0x%x rx "
  280. "seq 0x%x\n", seqno, mcdi->seqno);
  281. } else {
  282. if (efx->type->mcdi_max_ver >= 2) {
  283. /* MCDI v2 responses don't fit in an event */
  284. efx_mcdi_read_response_header(efx);
  285. } else {
  286. mcdi->resprc = efx_mcdi_errno(mcdi_err);
  287. mcdi->resp_hdr_len = 4;
  288. mcdi->resp_data_len = datalen;
  289. }
  290. wake = true;
  291. }
  292. spin_unlock(&mcdi->iface_lock);
  293. if (wake)
  294. efx_mcdi_complete(mcdi);
  295. }
  296. static int
  297. efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
  298. {
  299. if (efx->type->mcdi_max_ver < 0 ||
  300. (efx->type->mcdi_max_ver < 2 &&
  301. cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
  302. return -EINVAL;
  303. if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
  304. (efx->type->mcdi_max_ver < 2 &&
  305. inlen > MCDI_CTL_SDU_LEN_MAX_V1))
  306. return -EMSGSIZE;
  307. return 0;
  308. }
  309. int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
  310. const efx_dword_t *inbuf, size_t inlen,
  311. efx_dword_t *outbuf, size_t outlen,
  312. size_t *outlen_actual)
  313. {
  314. int rc;
  315. rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
  316. if (rc)
  317. return rc;
  318. return efx_mcdi_rpc_finish(efx, cmd, inlen,
  319. outbuf, outlen, outlen_actual);
  320. }
  321. int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
  322. const efx_dword_t *inbuf, size_t inlen)
  323. {
  324. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  325. int rc;
  326. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  327. if (rc)
  328. return rc;
  329. efx_mcdi_acquire(mcdi);
  330. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  331. return 0;
  332. }
  333. int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
  334. efx_dword_t *outbuf, size_t outlen,
  335. size_t *outlen_actual)
  336. {
  337. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  338. int rc;
  339. if (mcdi->mode == MCDI_MODE_POLL)
  340. rc = efx_mcdi_poll(efx);
  341. else
  342. rc = efx_mcdi_await_completion(efx);
  343. if (rc != 0) {
  344. /* Close the race with efx_mcdi_ev_cpl() executing just too late
  345. * and completing a request we've just cancelled, by ensuring
  346. * that the seqno check therein fails.
  347. */
  348. spin_lock_bh(&mcdi->iface_lock);
  349. ++mcdi->seqno;
  350. ++mcdi->credits;
  351. spin_unlock_bh(&mcdi->iface_lock);
  352. netif_err(efx, hw, efx->net_dev,
  353. "MC command 0x%x inlen %d mode %d timed out\n",
  354. cmd, (int)inlen, mcdi->mode);
  355. } else {
  356. size_t hdr_len, data_len;
  357. /* At the very least we need a memory barrier here to ensure
  358. * we pick up changes from efx_mcdi_ev_cpl(). Protect against
  359. * a spurious efx_mcdi_ev_cpl() running concurrently by
  360. * acquiring the iface_lock. */
  361. spin_lock_bh(&mcdi->iface_lock);
  362. rc = mcdi->resprc;
  363. hdr_len = mcdi->resp_hdr_len;
  364. data_len = mcdi->resp_data_len;
  365. spin_unlock_bh(&mcdi->iface_lock);
  366. BUG_ON(rc > 0);
  367. if (rc == 0) {
  368. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  369. min(outlen, data_len));
  370. if (outlen_actual != NULL)
  371. *outlen_actual = data_len;
  372. } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
  373. ; /* Don't reset if MC_CMD_REBOOT returns EIO */
  374. else if (rc == -EIO || rc == -EINTR) {
  375. netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
  376. -rc);
  377. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  378. } else
  379. netif_dbg(efx, hw, efx->net_dev,
  380. "MC command 0x%x inlen %d failed rc=%d\n",
  381. cmd, (int)inlen, -rc);
  382. if (rc == -EIO || rc == -EINTR) {
  383. msleep(MCDI_STATUS_SLEEP_MS);
  384. efx_mcdi_poll_reboot(efx);
  385. mcdi->new_epoch = true;
  386. }
  387. }
  388. efx_mcdi_release(mcdi);
  389. return rc;
  390. }
  391. void efx_mcdi_mode_poll(struct efx_nic *efx)
  392. {
  393. struct efx_mcdi_iface *mcdi;
  394. if (!efx->mcdi)
  395. return;
  396. mcdi = efx_mcdi(efx);
  397. if (mcdi->mode == MCDI_MODE_POLL)
  398. return;
  399. /* We can switch from event completion to polled completion, because
  400. * mcdi requests are always completed in shared memory. We do this by
  401. * switching the mode to POLL'd then completing the request.
  402. * efx_mcdi_await_completion() will then call efx_mcdi_poll().
  403. *
  404. * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
  405. * which efx_mcdi_complete() provides for us.
  406. */
  407. mcdi->mode = MCDI_MODE_POLL;
  408. efx_mcdi_complete(mcdi);
  409. }
  410. void efx_mcdi_mode_event(struct efx_nic *efx)
  411. {
  412. struct efx_mcdi_iface *mcdi;
  413. if (!efx->mcdi)
  414. return;
  415. mcdi = efx_mcdi(efx);
  416. if (mcdi->mode == MCDI_MODE_EVENTS)
  417. return;
  418. /* We can't switch from polled to event completion in the middle of a
  419. * request, because the completion method is specified in the request.
  420. * So acquire the interface to serialise the requestors. We don't need
  421. * to acquire the iface_lock to change the mode here, but we do need a
  422. * write memory barrier ensure that efx_mcdi_rpc() sees it, which
  423. * efx_mcdi_acquire() provides.
  424. */
  425. efx_mcdi_acquire(mcdi);
  426. mcdi->mode = MCDI_MODE_EVENTS;
  427. efx_mcdi_release(mcdi);
  428. }
  429. static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
  430. {
  431. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  432. /* If there is an outstanding MCDI request, it has been terminated
  433. * either by a BADASSERT or REBOOT event. If the mcdi interface is
  434. * in polled mode, then do nothing because the MC reboot handler will
  435. * set the header correctly. However, if the mcdi interface is waiting
  436. * for a CMDDONE event it won't receive it [and since all MCDI events
  437. * are sent to the same queue, we can't be racing with
  438. * efx_mcdi_ev_cpl()]
  439. *
  440. * There's a race here with efx_mcdi_rpc(), because we might receive
  441. * a REBOOT event *before* the request has been copied out. In polled
  442. * mode (during startup) this is irrelevant, because efx_mcdi_complete()
  443. * is ignored. In event mode, this condition is just an edge-case of
  444. * receiving a REBOOT event after posting the MCDI request. Did the mc
  445. * reboot before or after the copyout? The best we can do always is
  446. * just return failure.
  447. */
  448. spin_lock(&mcdi->iface_lock);
  449. if (efx_mcdi_complete(mcdi)) {
  450. if (mcdi->mode == MCDI_MODE_EVENTS) {
  451. mcdi->resprc = rc;
  452. mcdi->resp_hdr_len = 0;
  453. mcdi->resp_data_len = 0;
  454. ++mcdi->credits;
  455. }
  456. } else {
  457. int count;
  458. /* Nobody was waiting for an MCDI request, so trigger a reset */
  459. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  460. /* Consume the status word since efx_mcdi_rpc_finish() won't */
  461. for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
  462. if (efx_mcdi_poll_reboot(efx))
  463. break;
  464. udelay(MCDI_STATUS_DELAY_US);
  465. }
  466. mcdi->new_epoch = true;
  467. }
  468. spin_unlock(&mcdi->iface_lock);
  469. }
  470. /* Called from falcon_process_eventq for MCDI events */
  471. void efx_mcdi_process_event(struct efx_channel *channel,
  472. efx_qword_t *event)
  473. {
  474. struct efx_nic *efx = channel->efx;
  475. int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
  476. u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
  477. switch (code) {
  478. case MCDI_EVENT_CODE_BADSSERT:
  479. netif_err(efx, hw, efx->net_dev,
  480. "MC watchdog or assertion failure at 0x%x\n", data);
  481. efx_mcdi_ev_death(efx, -EINTR);
  482. break;
  483. case MCDI_EVENT_CODE_PMNOTICE:
  484. netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
  485. break;
  486. case MCDI_EVENT_CODE_CMDDONE:
  487. efx_mcdi_ev_cpl(efx,
  488. MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
  489. MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
  490. MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
  491. break;
  492. case MCDI_EVENT_CODE_LINKCHANGE:
  493. efx_mcdi_process_link_change(efx, event);
  494. break;
  495. case MCDI_EVENT_CODE_SENSOREVT:
  496. efx_mcdi_sensor_event(efx, event);
  497. break;
  498. case MCDI_EVENT_CODE_SCHEDERR:
  499. netif_info(efx, hw, efx->net_dev,
  500. "MC Scheduler error address=0x%x\n", data);
  501. break;
  502. case MCDI_EVENT_CODE_REBOOT:
  503. netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
  504. efx_mcdi_ev_death(efx, -EIO);
  505. break;
  506. case MCDI_EVENT_CODE_MAC_STATS_DMA:
  507. /* MAC stats are gather lazily. We can ignore this. */
  508. break;
  509. case MCDI_EVENT_CODE_FLR:
  510. efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
  511. break;
  512. case MCDI_EVENT_CODE_PTP_RX:
  513. case MCDI_EVENT_CODE_PTP_FAULT:
  514. case MCDI_EVENT_CODE_PTP_PPS:
  515. efx_ptp_event(efx, event);
  516. break;
  517. case MCDI_EVENT_CODE_TX_ERR:
  518. case MCDI_EVENT_CODE_RX_ERR:
  519. netif_err(efx, hw, efx->net_dev,
  520. "%s DMA error (event: "EFX_QWORD_FMT")\n",
  521. code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
  522. EFX_QWORD_VAL(*event));
  523. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  524. break;
  525. default:
  526. netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
  527. code);
  528. }
  529. }
  530. /**************************************************************************
  531. *
  532. * Specific request functions
  533. *
  534. **************************************************************************
  535. */
  536. void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
  537. {
  538. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
  539. size_t outlength;
  540. const __le16 *ver_words;
  541. int rc;
  542. BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
  543. rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
  544. outbuf, sizeof(outbuf), &outlength);
  545. if (rc)
  546. goto fail;
  547. if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
  548. rc = -EIO;
  549. goto fail;
  550. }
  551. ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
  552. snprintf(buf, len, "%u.%u.%u.%u",
  553. le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
  554. le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
  555. return;
  556. fail:
  557. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  558. buf[0] = 0;
  559. }
  560. int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  561. bool *was_attached)
  562. {
  563. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
  564. MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
  565. size_t outlen;
  566. int rc;
  567. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
  568. driver_operating ? 1 : 0);
  569. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
  570. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
  571. rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
  572. outbuf, sizeof(outbuf), &outlen);
  573. if (rc)
  574. goto fail;
  575. if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
  576. rc = -EIO;
  577. goto fail;
  578. }
  579. if (was_attached != NULL)
  580. *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
  581. return 0;
  582. fail:
  583. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  584. return rc;
  585. }
  586. int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
  587. u16 *fw_subtype_list, u32 *capabilities)
  588. {
  589. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
  590. size_t outlen, i;
  591. int port_num = efx_port_num(efx);
  592. int rc;
  593. BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
  594. rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
  595. outbuf, sizeof(outbuf), &outlen);
  596. if (rc)
  597. goto fail;
  598. if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
  599. rc = -EIO;
  600. goto fail;
  601. }
  602. if (mac_address)
  603. memcpy(mac_address,
  604. port_num ?
  605. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
  606. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
  607. ETH_ALEN);
  608. if (fw_subtype_list) {
  609. for (i = 0;
  610. i < MCDI_VAR_ARRAY_LEN(outlen,
  611. GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
  612. i++)
  613. fw_subtype_list[i] = MCDI_ARRAY_WORD(
  614. outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
  615. for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
  616. fw_subtype_list[i] = 0;
  617. }
  618. if (capabilities) {
  619. if (port_num)
  620. *capabilities = MCDI_DWORD(outbuf,
  621. GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
  622. else
  623. *capabilities = MCDI_DWORD(outbuf,
  624. GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
  625. }
  626. return 0;
  627. fail:
  628. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
  629. __func__, rc, (int)outlen);
  630. return rc;
  631. }
  632. int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
  633. {
  634. MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
  635. u32 dest = 0;
  636. int rc;
  637. if (uart)
  638. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
  639. if (evq)
  640. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
  641. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
  642. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
  643. BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
  644. rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
  645. NULL, 0, NULL);
  646. if (rc)
  647. goto fail;
  648. return 0;
  649. fail:
  650. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  651. return rc;
  652. }
  653. int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
  654. {
  655. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
  656. size_t outlen;
  657. int rc;
  658. BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
  659. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
  660. outbuf, sizeof(outbuf), &outlen);
  661. if (rc)
  662. goto fail;
  663. if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
  664. rc = -EIO;
  665. goto fail;
  666. }
  667. *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
  668. return 0;
  669. fail:
  670. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  671. __func__, rc);
  672. return rc;
  673. }
  674. int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
  675. size_t *size_out, size_t *erase_size_out,
  676. bool *protected_out)
  677. {
  678. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
  679. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
  680. size_t outlen;
  681. int rc;
  682. MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
  683. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
  684. outbuf, sizeof(outbuf), &outlen);
  685. if (rc)
  686. goto fail;
  687. if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
  688. rc = -EIO;
  689. goto fail;
  690. }
  691. *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
  692. *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
  693. *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
  694. (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
  695. return 0;
  696. fail:
  697. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  698. return rc;
  699. }
  700. static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
  701. {
  702. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
  703. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
  704. int rc;
  705. MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
  706. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
  707. outbuf, sizeof(outbuf), NULL);
  708. if (rc)
  709. return rc;
  710. switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
  711. case MC_CMD_NVRAM_TEST_PASS:
  712. case MC_CMD_NVRAM_TEST_NOTSUPP:
  713. return 0;
  714. default:
  715. return -EIO;
  716. }
  717. }
  718. int efx_mcdi_nvram_test_all(struct efx_nic *efx)
  719. {
  720. u32 nvram_types;
  721. unsigned int type;
  722. int rc;
  723. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  724. if (rc)
  725. goto fail1;
  726. type = 0;
  727. while (nvram_types != 0) {
  728. if (nvram_types & 1) {
  729. rc = efx_mcdi_nvram_test(efx, type);
  730. if (rc)
  731. goto fail2;
  732. }
  733. type++;
  734. nvram_types >>= 1;
  735. }
  736. return 0;
  737. fail2:
  738. netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
  739. __func__, type);
  740. fail1:
  741. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  742. return rc;
  743. }
  744. static int efx_mcdi_read_assertion(struct efx_nic *efx)
  745. {
  746. MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
  747. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
  748. unsigned int flags, index;
  749. const char *reason;
  750. size_t outlen;
  751. int retry;
  752. int rc;
  753. /* Attempt to read any stored assertion state before we reboot
  754. * the mcfw out of the assertion handler. Retry twice, once
  755. * because a boot-time assertion might cause this command to fail
  756. * with EINTR. And once again because GET_ASSERTS can race with
  757. * MC_CMD_REBOOT running on the other port. */
  758. retry = 2;
  759. do {
  760. MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
  761. rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
  762. inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
  763. outbuf, sizeof(outbuf), &outlen);
  764. } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
  765. if (rc)
  766. return rc;
  767. if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
  768. return -EIO;
  769. /* Print out any recorded assertion state */
  770. flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
  771. if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
  772. return 0;
  773. reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
  774. ? "system-level assertion"
  775. : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
  776. ? "thread-level assertion"
  777. : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
  778. ? "watchdog reset"
  779. : "unknown assertion";
  780. netif_err(efx, hw, efx->net_dev,
  781. "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
  782. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
  783. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
  784. /* Print out the registers */
  785. for (index = 0;
  786. index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
  787. index++)
  788. netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
  789. 1 + index,
  790. MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
  791. index));
  792. return 0;
  793. }
  794. static void efx_mcdi_exit_assertion(struct efx_nic *efx)
  795. {
  796. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  797. /* If the MC is running debug firmware, it might now be
  798. * waiting for a debugger to attach, but we just want it to
  799. * reboot. We set a flag that makes the command a no-op if it
  800. * has already done so. We don't know what return code to
  801. * expect (0 or -EIO), so ignore it.
  802. */
  803. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  804. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
  805. MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
  806. (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
  807. NULL, 0, NULL);
  808. }
  809. int efx_mcdi_handle_assertion(struct efx_nic *efx)
  810. {
  811. int rc;
  812. rc = efx_mcdi_read_assertion(efx);
  813. if (rc)
  814. return rc;
  815. efx_mcdi_exit_assertion(efx);
  816. return 0;
  817. }
  818. void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
  819. {
  820. MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
  821. int rc;
  822. BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
  823. BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
  824. BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
  825. BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
  826. MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
  827. rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
  828. NULL, 0, NULL);
  829. if (rc)
  830. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  831. __func__, rc);
  832. }
  833. static int efx_mcdi_reset_port(struct efx_nic *efx)
  834. {
  835. int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
  836. if (rc)
  837. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  838. __func__, rc);
  839. return rc;
  840. }
  841. static int efx_mcdi_reset_mc(struct efx_nic *efx)
  842. {
  843. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  844. int rc;
  845. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  846. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
  847. rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
  848. NULL, 0, NULL);
  849. /* White is black, and up is down */
  850. if (rc == -EIO)
  851. return 0;
  852. if (rc == 0)
  853. rc = -EIO;
  854. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  855. return rc;
  856. }
  857. enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
  858. {
  859. return RESET_TYPE_RECOVER_OR_ALL;
  860. }
  861. int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
  862. {
  863. int rc;
  864. /* Recover from a failed assertion pre-reset */
  865. rc = efx_mcdi_handle_assertion(efx);
  866. if (rc)
  867. return rc;
  868. if (method == RESET_TYPE_WORLD)
  869. return efx_mcdi_reset_mc(efx);
  870. else
  871. return efx_mcdi_reset_port(efx);
  872. }
  873. static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
  874. const u8 *mac, int *id_out)
  875. {
  876. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
  877. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
  878. size_t outlen;
  879. int rc;
  880. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
  881. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
  882. MC_CMD_FILTER_MODE_SIMPLE);
  883. memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
  884. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
  885. outbuf, sizeof(outbuf), &outlen);
  886. if (rc)
  887. goto fail;
  888. if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
  889. rc = -EIO;
  890. goto fail;
  891. }
  892. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
  893. return 0;
  894. fail:
  895. *id_out = -1;
  896. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  897. return rc;
  898. }
  899. int
  900. efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
  901. {
  902. return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
  903. }
  904. int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
  905. {
  906. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
  907. size_t outlen;
  908. int rc;
  909. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
  910. outbuf, sizeof(outbuf), &outlen);
  911. if (rc)
  912. goto fail;
  913. if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
  914. rc = -EIO;
  915. goto fail;
  916. }
  917. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
  918. return 0;
  919. fail:
  920. *id_out = -1;
  921. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  922. return rc;
  923. }
  924. int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
  925. {
  926. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
  927. int rc;
  928. MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
  929. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
  930. NULL, 0, NULL);
  931. if (rc)
  932. goto fail;
  933. return 0;
  934. fail:
  935. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  936. return rc;
  937. }
  938. int efx_mcdi_flush_rxqs(struct efx_nic *efx)
  939. {
  940. struct efx_channel *channel;
  941. struct efx_rx_queue *rx_queue;
  942. MCDI_DECLARE_BUF(inbuf,
  943. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
  944. int rc, count;
  945. BUILD_BUG_ON(EFX_MAX_CHANNELS >
  946. MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
  947. count = 0;
  948. efx_for_each_channel(channel, efx) {
  949. efx_for_each_channel_rx_queue(rx_queue, channel) {
  950. if (rx_queue->flush_pending) {
  951. rx_queue->flush_pending = false;
  952. atomic_dec(&efx->rxq_flush_pending);
  953. MCDI_SET_ARRAY_DWORD(
  954. inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
  955. count, efx_rx_queue_index(rx_queue));
  956. count++;
  957. }
  958. }
  959. }
  960. rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
  961. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
  962. WARN_ON(rc < 0);
  963. return rc;
  964. }
  965. int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
  966. {
  967. int rc;
  968. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
  969. if (rc)
  970. goto fail;
  971. return 0;
  972. fail:
  973. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  974. return rc;
  975. }
  976. #ifdef CONFIG_SFC_MTD
  977. #define EFX_MCDI_NVRAM_LEN_MAX 128
  978. static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
  979. {
  980. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
  981. int rc;
  982. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
  983. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
  984. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
  985. NULL, 0, NULL);
  986. if (rc)
  987. goto fail;
  988. return 0;
  989. fail:
  990. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  991. return rc;
  992. }
  993. static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
  994. loff_t offset, u8 *buffer, size_t length)
  995. {
  996. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
  997. MCDI_DECLARE_BUF(outbuf,
  998. MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  999. size_t outlen;
  1000. int rc;
  1001. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
  1002. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
  1003. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
  1004. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
  1005. outbuf, sizeof(outbuf), &outlen);
  1006. if (rc)
  1007. goto fail;
  1008. memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
  1009. return 0;
  1010. fail:
  1011. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1012. return rc;
  1013. }
  1014. static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
  1015. loff_t offset, const u8 *buffer, size_t length)
  1016. {
  1017. MCDI_DECLARE_BUF(inbuf,
  1018. MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1019. int rc;
  1020. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
  1021. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
  1022. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
  1023. memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
  1024. BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
  1025. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
  1026. ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
  1027. NULL, 0, NULL);
  1028. if (rc)
  1029. goto fail;
  1030. return 0;
  1031. fail:
  1032. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1033. return rc;
  1034. }
  1035. static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
  1036. loff_t offset, size_t length)
  1037. {
  1038. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
  1039. int rc;
  1040. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
  1041. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
  1042. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
  1043. BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
  1044. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
  1045. NULL, 0, NULL);
  1046. if (rc)
  1047. goto fail;
  1048. return 0;
  1049. fail:
  1050. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1051. return rc;
  1052. }
  1053. static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
  1054. {
  1055. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
  1056. int rc;
  1057. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
  1058. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
  1059. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
  1060. NULL, 0, NULL);
  1061. if (rc)
  1062. goto fail;
  1063. return 0;
  1064. fail:
  1065. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1066. return rc;
  1067. }
  1068. int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
  1069. size_t len, size_t *retlen, u8 *buffer)
  1070. {
  1071. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1072. struct efx_nic *efx = mtd->priv;
  1073. loff_t offset = start;
  1074. loff_t end = min_t(loff_t, start + len, mtd->size);
  1075. size_t chunk;
  1076. int rc = 0;
  1077. while (offset < end) {
  1078. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1079. rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
  1080. buffer, chunk);
  1081. if (rc)
  1082. goto out;
  1083. offset += chunk;
  1084. buffer += chunk;
  1085. }
  1086. out:
  1087. *retlen = offset - start;
  1088. return rc;
  1089. }
  1090. int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  1091. {
  1092. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1093. struct efx_nic *efx = mtd->priv;
  1094. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  1095. loff_t end = min_t(loff_t, start + len, mtd->size);
  1096. size_t chunk = part->common.mtd.erasesize;
  1097. int rc = 0;
  1098. if (!part->updating) {
  1099. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1100. if (rc)
  1101. goto out;
  1102. part->updating = true;
  1103. }
  1104. /* The MCDI interface can in fact do multiple erase blocks at once;
  1105. * but erasing may be slow, so we make multiple calls here to avoid
  1106. * tripping the MCDI RPC timeout. */
  1107. while (offset < end) {
  1108. rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
  1109. chunk);
  1110. if (rc)
  1111. goto out;
  1112. offset += chunk;
  1113. }
  1114. out:
  1115. return rc;
  1116. }
  1117. int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
  1118. size_t len, size_t *retlen, const u8 *buffer)
  1119. {
  1120. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1121. struct efx_nic *efx = mtd->priv;
  1122. loff_t offset = start;
  1123. loff_t end = min_t(loff_t, start + len, mtd->size);
  1124. size_t chunk;
  1125. int rc = 0;
  1126. if (!part->updating) {
  1127. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1128. if (rc)
  1129. goto out;
  1130. part->updating = true;
  1131. }
  1132. while (offset < end) {
  1133. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1134. rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
  1135. buffer, chunk);
  1136. if (rc)
  1137. goto out;
  1138. offset += chunk;
  1139. buffer += chunk;
  1140. }
  1141. out:
  1142. *retlen = offset - start;
  1143. return rc;
  1144. }
  1145. int efx_mcdi_mtd_sync(struct mtd_info *mtd)
  1146. {
  1147. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1148. struct efx_nic *efx = mtd->priv;
  1149. int rc = 0;
  1150. if (part->updating) {
  1151. part->updating = false;
  1152. rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
  1153. }
  1154. return rc;
  1155. }
  1156. void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
  1157. {
  1158. struct efx_mcdi_mtd_partition *mcdi_part =
  1159. container_of(part, struct efx_mcdi_mtd_partition, common);
  1160. struct efx_nic *efx = part->mtd.priv;
  1161. snprintf(part->name, sizeof(part->name), "%s %s:%02x",
  1162. efx->name, part->type_name, mcdi_part->fw_subtype);
  1163. }
  1164. #endif /* CONFIG_SFC_MTD */