mcdi.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2008-2013 Solarflare Communications Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation, incorporated herein by reference.
  8. */
  9. #include <linux/delay.h>
  10. #include <asm/cmpxchg.h>
  11. #include "net_driver.h"
  12. #include "nic.h"
  13. #include "io.h"
  14. #include "farch_regs.h"
  15. #include "mcdi_pcol.h"
  16. #include "phy.h"
  17. /**************************************************************************
  18. *
  19. * Management-Controller-to-Driver Interface
  20. *
  21. **************************************************************************
  22. */
  23. #define MCDI_RPC_TIMEOUT (10 * HZ)
  24. /* A reboot/assertion causes the MCDI status word to be set after the
  25. * command word is set or a REBOOT event is sent. If we notice a reboot
  26. * via these mechanisms then wait 250ms for the status word to be set.
  27. */
  28. #define MCDI_STATUS_DELAY_US 100
  29. #define MCDI_STATUS_DELAY_COUNT 2500
  30. #define MCDI_STATUS_SLEEP_MS \
  31. (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  32. #define SEQ_MASK \
  33. EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  34. struct efx_mcdi_async_param {
  35. struct list_head list;
  36. unsigned int cmd;
  37. size_t inlen;
  38. size_t outlen;
  39. efx_mcdi_async_completer *complete;
  40. unsigned long cookie;
  41. /* followed by request/response buffer */
  42. };
  43. static void efx_mcdi_timeout_async(unsigned long context);
  44. static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  45. bool *was_attached_out);
  46. static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
  47. {
  48. EFX_BUG_ON_PARANOID(!efx->mcdi);
  49. return &efx->mcdi->iface;
  50. }
  51. int efx_mcdi_init(struct efx_nic *efx)
  52. {
  53. struct efx_mcdi_iface *mcdi;
  54. bool already_attached;
  55. int rc;
  56. efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  57. if (!efx->mcdi)
  58. return -ENOMEM;
  59. mcdi = efx_mcdi(efx);
  60. mcdi->efx = efx;
  61. init_waitqueue_head(&mcdi->wq);
  62. spin_lock_init(&mcdi->iface_lock);
  63. mcdi->state = MCDI_STATE_QUIESCENT;
  64. mcdi->mode = MCDI_MODE_POLL;
  65. spin_lock_init(&mcdi->async_lock);
  66. INIT_LIST_HEAD(&mcdi->async_list);
  67. setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
  68. (unsigned long)mcdi);
  69. (void) efx_mcdi_poll_reboot(efx);
  70. mcdi->new_epoch = true;
  71. /* Recover from a failed assertion before probing */
  72. rc = efx_mcdi_handle_assertion(efx);
  73. if (rc)
  74. return rc;
  75. /* Let the MC (and BMC, if this is a LOM) know that the driver
  76. * is loaded. We should do this before we reset the NIC.
  77. */
  78. rc = efx_mcdi_drv_attach(efx, true, &already_attached);
  79. if (rc) {
  80. netif_err(efx, probe, efx->net_dev,
  81. "Unable to register driver with MCPU\n");
  82. return rc;
  83. }
  84. if (already_attached)
  85. /* Not a fatal error */
  86. netif_err(efx, probe, efx->net_dev,
  87. "Host already registered with MCPU\n");
  88. return 0;
  89. }
  90. void efx_mcdi_fini(struct efx_nic *efx)
  91. {
  92. if (!efx->mcdi)
  93. return;
  94. BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
  95. /* Relinquish the device (back to the BMC, if this is a LOM) */
  96. efx_mcdi_drv_attach(efx, false, NULL);
  97. kfree(efx->mcdi);
  98. }
  99. static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
  100. const efx_dword_t *inbuf, size_t inlen)
  101. {
  102. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  103. efx_dword_t hdr[2];
  104. size_t hdr_len;
  105. u32 xflags, seqno;
  106. BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
  107. /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
  108. spin_lock_bh(&mcdi->iface_lock);
  109. ++mcdi->seqno;
  110. spin_unlock_bh(&mcdi->iface_lock);
  111. seqno = mcdi->seqno & SEQ_MASK;
  112. xflags = 0;
  113. if (mcdi->mode == MCDI_MODE_EVENTS)
  114. xflags |= MCDI_HEADER_XFLAGS_EVREQ;
  115. if (efx->type->mcdi_max_ver == 1) {
  116. /* MCDI v1 */
  117. EFX_POPULATE_DWORD_7(hdr[0],
  118. MCDI_HEADER_RESPONSE, 0,
  119. MCDI_HEADER_RESYNC, 1,
  120. MCDI_HEADER_CODE, cmd,
  121. MCDI_HEADER_DATALEN, inlen,
  122. MCDI_HEADER_SEQ, seqno,
  123. MCDI_HEADER_XFLAGS, xflags,
  124. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  125. hdr_len = 4;
  126. } else {
  127. /* MCDI v2 */
  128. BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
  129. EFX_POPULATE_DWORD_7(hdr[0],
  130. MCDI_HEADER_RESPONSE, 0,
  131. MCDI_HEADER_RESYNC, 1,
  132. MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
  133. MCDI_HEADER_DATALEN, 0,
  134. MCDI_HEADER_SEQ, seqno,
  135. MCDI_HEADER_XFLAGS, xflags,
  136. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  137. EFX_POPULATE_DWORD_2(hdr[1],
  138. MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
  139. MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
  140. hdr_len = 8;
  141. }
  142. efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
  143. mcdi->new_epoch = false;
  144. }
  145. static int efx_mcdi_errno(unsigned int mcdi_err)
  146. {
  147. switch (mcdi_err) {
  148. case 0:
  149. return 0;
  150. #define TRANSLATE_ERROR(name) \
  151. case MC_CMD_ERR_ ## name: \
  152. return -name;
  153. TRANSLATE_ERROR(EPERM);
  154. TRANSLATE_ERROR(ENOENT);
  155. TRANSLATE_ERROR(EINTR);
  156. TRANSLATE_ERROR(EAGAIN);
  157. TRANSLATE_ERROR(EACCES);
  158. TRANSLATE_ERROR(EBUSY);
  159. TRANSLATE_ERROR(EINVAL);
  160. TRANSLATE_ERROR(EDEADLK);
  161. TRANSLATE_ERROR(ENOSYS);
  162. TRANSLATE_ERROR(ETIME);
  163. TRANSLATE_ERROR(EALREADY);
  164. TRANSLATE_ERROR(ENOSPC);
  165. #undef TRANSLATE_ERROR
  166. case MC_CMD_ERR_ALLOC_FAIL:
  167. return -ENOBUFS;
  168. case MC_CMD_ERR_MAC_EXIST:
  169. return -EADDRINUSE;
  170. default:
  171. return -EPROTO;
  172. }
  173. }
  174. static void efx_mcdi_read_response_header(struct efx_nic *efx)
  175. {
  176. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  177. unsigned int respseq, respcmd, error;
  178. efx_dword_t hdr;
  179. efx->type->mcdi_read_response(efx, &hdr, 0, 4);
  180. respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
  181. respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
  182. error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
  183. if (respcmd != MC_CMD_V2_EXTN) {
  184. mcdi->resp_hdr_len = 4;
  185. mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
  186. } else {
  187. efx->type->mcdi_read_response(efx, &hdr, 4, 4);
  188. mcdi->resp_hdr_len = 8;
  189. mcdi->resp_data_len =
  190. EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
  191. }
  192. if (error && mcdi->resp_data_len == 0) {
  193. netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
  194. mcdi->resprc = -EIO;
  195. } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
  196. netif_err(efx, hw, efx->net_dev,
  197. "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
  198. respseq, mcdi->seqno);
  199. mcdi->resprc = -EIO;
  200. } else if (error) {
  201. efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
  202. mcdi->resprc =
  203. efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
  204. } else {
  205. mcdi->resprc = 0;
  206. }
  207. }
  208. static int efx_mcdi_poll(struct efx_nic *efx)
  209. {
  210. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  211. unsigned long time, finish;
  212. unsigned int spins;
  213. int rc;
  214. /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
  215. rc = efx_mcdi_poll_reboot(efx);
  216. if (rc) {
  217. spin_lock_bh(&mcdi->iface_lock);
  218. mcdi->resprc = rc;
  219. mcdi->resp_hdr_len = 0;
  220. mcdi->resp_data_len = 0;
  221. spin_unlock_bh(&mcdi->iface_lock);
  222. return 0;
  223. }
  224. /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
  225. * because generally mcdi responses are fast. After that, back off
  226. * and poll once a jiffy (approximately)
  227. */
  228. spins = TICK_USEC;
  229. finish = jiffies + MCDI_RPC_TIMEOUT;
  230. while (1) {
  231. if (spins != 0) {
  232. --spins;
  233. udelay(1);
  234. } else {
  235. schedule_timeout_uninterruptible(1);
  236. }
  237. time = jiffies;
  238. rmb();
  239. if (efx->type->mcdi_poll_response(efx))
  240. break;
  241. if (time_after(time, finish))
  242. return -ETIMEDOUT;
  243. }
  244. spin_lock_bh(&mcdi->iface_lock);
  245. efx_mcdi_read_response_header(efx);
  246. spin_unlock_bh(&mcdi->iface_lock);
  247. /* Return rc=0 like wait_event_timeout() */
  248. return 0;
  249. }
  250. /* Test and clear MC-rebooted flag for this port/function; reset
  251. * software state as necessary.
  252. */
  253. int efx_mcdi_poll_reboot(struct efx_nic *efx)
  254. {
  255. if (!efx->mcdi)
  256. return 0;
  257. return efx->type->mcdi_poll_reboot(efx);
  258. }
  259. static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
  260. {
  261. return cmpxchg(&mcdi->state,
  262. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
  263. MCDI_STATE_QUIESCENT;
  264. }
  265. static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
  266. {
  267. /* Wait until the interface becomes QUIESCENT and we win the race
  268. * to mark it RUNNING_SYNC.
  269. */
  270. wait_event(mcdi->wq,
  271. cmpxchg(&mcdi->state,
  272. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
  273. MCDI_STATE_QUIESCENT);
  274. }
  275. static int efx_mcdi_await_completion(struct efx_nic *efx)
  276. {
  277. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  278. if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
  279. MCDI_RPC_TIMEOUT) == 0)
  280. return -ETIMEDOUT;
  281. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
  282. * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
  283. * completed the request first, then we'll just end up completing the
  284. * request again, which is safe.
  285. *
  286. * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
  287. * wait_event_timeout() implicitly provides.
  288. */
  289. if (mcdi->mode == MCDI_MODE_POLL)
  290. return efx_mcdi_poll(efx);
  291. return 0;
  292. }
  293. /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
  294. * requester. Return whether this was done. Does not take any locks.
  295. */
  296. static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
  297. {
  298. if (cmpxchg(&mcdi->state,
  299. MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
  300. MCDI_STATE_RUNNING_SYNC) {
  301. wake_up(&mcdi->wq);
  302. return true;
  303. }
  304. return false;
  305. }
  306. static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
  307. {
  308. if (mcdi->mode == MCDI_MODE_EVENTS) {
  309. struct efx_mcdi_async_param *async;
  310. struct efx_nic *efx = mcdi->efx;
  311. /* Process the asynchronous request queue */
  312. spin_lock_bh(&mcdi->async_lock);
  313. async = list_first_entry_or_null(
  314. &mcdi->async_list, struct efx_mcdi_async_param, list);
  315. if (async) {
  316. mcdi->state = MCDI_STATE_RUNNING_ASYNC;
  317. efx_mcdi_send_request(efx, async->cmd,
  318. (const efx_dword_t *)(async + 1),
  319. async->inlen);
  320. mod_timer(&mcdi->async_timer,
  321. jiffies + MCDI_RPC_TIMEOUT);
  322. }
  323. spin_unlock_bh(&mcdi->async_lock);
  324. if (async)
  325. return;
  326. }
  327. mcdi->state = MCDI_STATE_QUIESCENT;
  328. wake_up(&mcdi->wq);
  329. }
  330. /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
  331. * asynchronous completion function, and release the interface.
  332. * Return whether this was done. Must be called in bh-disabled
  333. * context. Will take iface_lock and async_lock.
  334. */
  335. static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
  336. {
  337. struct efx_nic *efx = mcdi->efx;
  338. struct efx_mcdi_async_param *async;
  339. size_t hdr_len, data_len;
  340. efx_dword_t *outbuf;
  341. int rc;
  342. if (cmpxchg(&mcdi->state,
  343. MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
  344. MCDI_STATE_RUNNING_ASYNC)
  345. return false;
  346. spin_lock(&mcdi->iface_lock);
  347. if (timeout) {
  348. /* Ensure that if the completion event arrives later,
  349. * the seqno check in efx_mcdi_ev_cpl() will fail
  350. */
  351. ++mcdi->seqno;
  352. ++mcdi->credits;
  353. rc = -ETIMEDOUT;
  354. hdr_len = 0;
  355. data_len = 0;
  356. } else {
  357. rc = mcdi->resprc;
  358. hdr_len = mcdi->resp_hdr_len;
  359. data_len = mcdi->resp_data_len;
  360. }
  361. spin_unlock(&mcdi->iface_lock);
  362. /* Stop the timer. In case the timer function is running, we
  363. * must wait for it to return so that there is no possibility
  364. * of it aborting the next request.
  365. */
  366. if (!timeout)
  367. del_timer_sync(&mcdi->async_timer);
  368. spin_lock(&mcdi->async_lock);
  369. async = list_first_entry(&mcdi->async_list,
  370. struct efx_mcdi_async_param, list);
  371. list_del(&async->list);
  372. spin_unlock(&mcdi->async_lock);
  373. outbuf = (efx_dword_t *)(async + 1);
  374. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  375. min(async->outlen, data_len));
  376. async->complete(efx, async->cookie, rc, outbuf, data_len);
  377. kfree(async);
  378. efx_mcdi_release(mcdi);
  379. return true;
  380. }
  381. static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
  382. unsigned int datalen, unsigned int mcdi_err)
  383. {
  384. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  385. bool wake = false;
  386. spin_lock(&mcdi->iface_lock);
  387. if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
  388. if (mcdi->credits)
  389. /* The request has been cancelled */
  390. --mcdi->credits;
  391. else
  392. netif_err(efx, hw, efx->net_dev,
  393. "MC response mismatch tx seq 0x%x rx "
  394. "seq 0x%x\n", seqno, mcdi->seqno);
  395. } else {
  396. if (efx->type->mcdi_max_ver >= 2) {
  397. /* MCDI v2 responses don't fit in an event */
  398. efx_mcdi_read_response_header(efx);
  399. } else {
  400. mcdi->resprc = efx_mcdi_errno(mcdi_err);
  401. mcdi->resp_hdr_len = 4;
  402. mcdi->resp_data_len = datalen;
  403. }
  404. wake = true;
  405. }
  406. spin_unlock(&mcdi->iface_lock);
  407. if (wake) {
  408. if (!efx_mcdi_complete_async(mcdi, false))
  409. (void) efx_mcdi_complete_sync(mcdi);
  410. /* If the interface isn't RUNNING_ASYNC or
  411. * RUNNING_SYNC then we've received a duplicate
  412. * completion after we've already transitioned back to
  413. * QUIESCENT. [A subsequent invocation would increment
  414. * seqno, so would have failed the seqno check].
  415. */
  416. }
  417. }
  418. static void efx_mcdi_timeout_async(unsigned long context)
  419. {
  420. struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
  421. efx_mcdi_complete_async(mcdi, true);
  422. }
  423. static int
  424. efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
  425. {
  426. if (efx->type->mcdi_max_ver < 0 ||
  427. (efx->type->mcdi_max_ver < 2 &&
  428. cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
  429. return -EINVAL;
  430. if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
  431. (efx->type->mcdi_max_ver < 2 &&
  432. inlen > MCDI_CTL_SDU_LEN_MAX_V1))
  433. return -EMSGSIZE;
  434. return 0;
  435. }
  436. int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
  437. const efx_dword_t *inbuf, size_t inlen,
  438. efx_dword_t *outbuf, size_t outlen,
  439. size_t *outlen_actual)
  440. {
  441. int rc;
  442. rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
  443. if (rc)
  444. return rc;
  445. return efx_mcdi_rpc_finish(efx, cmd, inlen,
  446. outbuf, outlen, outlen_actual);
  447. }
  448. int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
  449. const efx_dword_t *inbuf, size_t inlen)
  450. {
  451. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  452. int rc;
  453. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  454. if (rc)
  455. return rc;
  456. efx_mcdi_acquire_sync(mcdi);
  457. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  458. return 0;
  459. }
  460. /**
  461. * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
  462. * @efx: NIC through which to issue the command
  463. * @cmd: Command type number
  464. * @inbuf: Command parameters
  465. * @inlen: Length of command parameters, in bytes
  466. * @outlen: Length to allocate for response buffer, in bytes
  467. * @complete: Function to be called on completion or cancellation.
  468. * @cookie: Arbitrary value to be passed to @complete.
  469. *
  470. * This function does not sleep and therefore may be called in atomic
  471. * context. It will fail if event queues are disabled or if MCDI
  472. * event completions have been disabled due to an error.
  473. *
  474. * If it succeeds, the @complete function will be called exactly once
  475. * in atomic context, when one of the following occurs:
  476. * (a) the completion event is received (in NAPI context)
  477. * (b) event queues are disabled (in the process that disables them)
  478. * (c) the request times-out (in timer context)
  479. */
  480. int
  481. efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
  482. const efx_dword_t *inbuf, size_t inlen, size_t outlen,
  483. efx_mcdi_async_completer *complete, unsigned long cookie)
  484. {
  485. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  486. struct efx_mcdi_async_param *async;
  487. int rc;
  488. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  489. if (rc)
  490. return rc;
  491. async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
  492. GFP_ATOMIC);
  493. if (!async)
  494. return -ENOMEM;
  495. async->cmd = cmd;
  496. async->inlen = inlen;
  497. async->outlen = outlen;
  498. async->complete = complete;
  499. async->cookie = cookie;
  500. memcpy(async + 1, inbuf, inlen);
  501. spin_lock_bh(&mcdi->async_lock);
  502. if (mcdi->mode == MCDI_MODE_EVENTS) {
  503. list_add_tail(&async->list, &mcdi->async_list);
  504. /* If this is at the front of the queue, try to start it
  505. * immediately
  506. */
  507. if (mcdi->async_list.next == &async->list &&
  508. efx_mcdi_acquire_async(mcdi)) {
  509. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  510. mod_timer(&mcdi->async_timer,
  511. jiffies + MCDI_RPC_TIMEOUT);
  512. }
  513. } else {
  514. kfree(async);
  515. rc = -ENETDOWN;
  516. }
  517. spin_unlock_bh(&mcdi->async_lock);
  518. return rc;
  519. }
  520. int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
  521. efx_dword_t *outbuf, size_t outlen,
  522. size_t *outlen_actual)
  523. {
  524. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  525. int rc;
  526. if (mcdi->mode == MCDI_MODE_POLL)
  527. rc = efx_mcdi_poll(efx);
  528. else
  529. rc = efx_mcdi_await_completion(efx);
  530. if (rc != 0) {
  531. /* Close the race with efx_mcdi_ev_cpl() executing just too late
  532. * and completing a request we've just cancelled, by ensuring
  533. * that the seqno check therein fails.
  534. */
  535. spin_lock_bh(&mcdi->iface_lock);
  536. ++mcdi->seqno;
  537. ++mcdi->credits;
  538. spin_unlock_bh(&mcdi->iface_lock);
  539. netif_err(efx, hw, efx->net_dev,
  540. "MC command 0x%x inlen %d mode %d timed out\n",
  541. cmd, (int)inlen, mcdi->mode);
  542. } else {
  543. size_t hdr_len, data_len;
  544. /* At the very least we need a memory barrier here to ensure
  545. * we pick up changes from efx_mcdi_ev_cpl(). Protect against
  546. * a spurious efx_mcdi_ev_cpl() running concurrently by
  547. * acquiring the iface_lock. */
  548. spin_lock_bh(&mcdi->iface_lock);
  549. rc = mcdi->resprc;
  550. hdr_len = mcdi->resp_hdr_len;
  551. data_len = mcdi->resp_data_len;
  552. spin_unlock_bh(&mcdi->iface_lock);
  553. BUG_ON(rc > 0);
  554. if (rc == 0) {
  555. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  556. min(outlen, data_len));
  557. if (outlen_actual != NULL)
  558. *outlen_actual = data_len;
  559. } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
  560. ; /* Don't reset if MC_CMD_REBOOT returns EIO */
  561. else if (rc == -EIO || rc == -EINTR) {
  562. netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
  563. -rc);
  564. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  565. } else
  566. netif_dbg(efx, hw, efx->net_dev,
  567. "MC command 0x%x inlen %d failed rc=%d\n",
  568. cmd, (int)inlen, -rc);
  569. if (rc == -EIO || rc == -EINTR) {
  570. msleep(MCDI_STATUS_SLEEP_MS);
  571. efx_mcdi_poll_reboot(efx);
  572. mcdi->new_epoch = true;
  573. }
  574. }
  575. efx_mcdi_release(mcdi);
  576. return rc;
  577. }
  578. /* Switch to polled MCDI completions. This can be called in various
  579. * error conditions with various locks held, so it must be lockless.
  580. * Caller is responsible for flushing asynchronous requests later.
  581. */
  582. void efx_mcdi_mode_poll(struct efx_nic *efx)
  583. {
  584. struct efx_mcdi_iface *mcdi;
  585. if (!efx->mcdi)
  586. return;
  587. mcdi = efx_mcdi(efx);
  588. if (mcdi->mode == MCDI_MODE_POLL)
  589. return;
  590. /* We can switch from event completion to polled completion, because
  591. * mcdi requests are always completed in shared memory. We do this by
  592. * switching the mode to POLL'd then completing the request.
  593. * efx_mcdi_await_completion() will then call efx_mcdi_poll().
  594. *
  595. * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
  596. * which efx_mcdi_complete_sync() provides for us.
  597. */
  598. mcdi->mode = MCDI_MODE_POLL;
  599. efx_mcdi_complete_sync(mcdi);
  600. }
  601. /* Flush any running or queued asynchronous requests, after event processing
  602. * is stopped
  603. */
  604. void efx_mcdi_flush_async(struct efx_nic *efx)
  605. {
  606. struct efx_mcdi_async_param *async, *next;
  607. struct efx_mcdi_iface *mcdi;
  608. if (!efx->mcdi)
  609. return;
  610. mcdi = efx_mcdi(efx);
  611. /* We must be in polling mode so no more requests can be queued */
  612. BUG_ON(mcdi->mode != MCDI_MODE_POLL);
  613. del_timer_sync(&mcdi->async_timer);
  614. /* If a request is still running, make sure we give the MC
  615. * time to complete it so that the response won't overwrite our
  616. * next request.
  617. */
  618. if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
  619. efx_mcdi_poll(efx);
  620. mcdi->state = MCDI_STATE_QUIESCENT;
  621. }
  622. /* Nothing else will access the async list now, so it is safe
  623. * to walk it without holding async_lock. If we hold it while
  624. * calling a completer then lockdep may warn that we have
  625. * acquired locks in the wrong order.
  626. */
  627. list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
  628. async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
  629. list_del(&async->list);
  630. kfree(async);
  631. }
  632. }
  633. void efx_mcdi_mode_event(struct efx_nic *efx)
  634. {
  635. struct efx_mcdi_iface *mcdi;
  636. if (!efx->mcdi)
  637. return;
  638. mcdi = efx_mcdi(efx);
  639. if (mcdi->mode == MCDI_MODE_EVENTS)
  640. return;
  641. /* We can't switch from polled to event completion in the middle of a
  642. * request, because the completion method is specified in the request.
  643. * So acquire the interface to serialise the requestors. We don't need
  644. * to acquire the iface_lock to change the mode here, but we do need a
  645. * write memory barrier ensure that efx_mcdi_rpc() sees it, which
  646. * efx_mcdi_acquire() provides.
  647. */
  648. efx_mcdi_acquire_sync(mcdi);
  649. mcdi->mode = MCDI_MODE_EVENTS;
  650. efx_mcdi_release(mcdi);
  651. }
  652. static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
  653. {
  654. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  655. /* If there is an outstanding MCDI request, it has been terminated
  656. * either by a BADASSERT or REBOOT event. If the mcdi interface is
  657. * in polled mode, then do nothing because the MC reboot handler will
  658. * set the header correctly. However, if the mcdi interface is waiting
  659. * for a CMDDONE event it won't receive it [and since all MCDI events
  660. * are sent to the same queue, we can't be racing with
  661. * efx_mcdi_ev_cpl()]
  662. *
  663. * If there is an outstanding asynchronous request, we can't
  664. * complete it now (efx_mcdi_complete() would deadlock). The
  665. * reset process will take care of this.
  666. *
  667. * There's a race here with efx_mcdi_send_request(), because
  668. * we might receive a REBOOT event *before* the request has
  669. * been copied out. In polled mode (during startup) this is
  670. * irrelevant, because efx_mcdi_complete_sync() is ignored. In
  671. * event mode, this condition is just an edge-case of
  672. * receiving a REBOOT event after posting the MCDI
  673. * request. Did the mc reboot before or after the copyout? The
  674. * best we can do always is just return failure.
  675. */
  676. spin_lock(&mcdi->iface_lock);
  677. if (efx_mcdi_complete_sync(mcdi)) {
  678. if (mcdi->mode == MCDI_MODE_EVENTS) {
  679. mcdi->resprc = rc;
  680. mcdi->resp_hdr_len = 0;
  681. mcdi->resp_data_len = 0;
  682. ++mcdi->credits;
  683. }
  684. } else {
  685. int count;
  686. /* Consume the status word since efx_mcdi_rpc_finish() won't */
  687. for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
  688. if (efx_mcdi_poll_reboot(efx))
  689. break;
  690. udelay(MCDI_STATUS_DELAY_US);
  691. }
  692. mcdi->new_epoch = true;
  693. /* Nobody was waiting for an MCDI request, so trigger a reset */
  694. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  695. }
  696. spin_unlock(&mcdi->iface_lock);
  697. }
  698. /* Called from falcon_process_eventq for MCDI events */
  699. void efx_mcdi_process_event(struct efx_channel *channel,
  700. efx_qword_t *event)
  701. {
  702. struct efx_nic *efx = channel->efx;
  703. int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
  704. u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
  705. switch (code) {
  706. case MCDI_EVENT_CODE_BADSSERT:
  707. netif_err(efx, hw, efx->net_dev,
  708. "MC watchdog or assertion failure at 0x%x\n", data);
  709. efx_mcdi_ev_death(efx, -EINTR);
  710. break;
  711. case MCDI_EVENT_CODE_PMNOTICE:
  712. netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
  713. break;
  714. case MCDI_EVENT_CODE_CMDDONE:
  715. efx_mcdi_ev_cpl(efx,
  716. MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
  717. MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
  718. MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
  719. break;
  720. case MCDI_EVENT_CODE_LINKCHANGE:
  721. efx_mcdi_process_link_change(efx, event);
  722. break;
  723. case MCDI_EVENT_CODE_SENSOREVT:
  724. efx_mcdi_sensor_event(efx, event);
  725. break;
  726. case MCDI_EVENT_CODE_SCHEDERR:
  727. netif_info(efx, hw, efx->net_dev,
  728. "MC Scheduler error address=0x%x\n", data);
  729. break;
  730. case MCDI_EVENT_CODE_REBOOT:
  731. case MCDI_EVENT_CODE_MC_REBOOT:
  732. netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
  733. efx_mcdi_ev_death(efx, -EIO);
  734. break;
  735. case MCDI_EVENT_CODE_MAC_STATS_DMA:
  736. /* MAC stats are gather lazily. We can ignore this. */
  737. break;
  738. case MCDI_EVENT_CODE_FLR:
  739. efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
  740. break;
  741. case MCDI_EVENT_CODE_PTP_RX:
  742. case MCDI_EVENT_CODE_PTP_FAULT:
  743. case MCDI_EVENT_CODE_PTP_PPS:
  744. efx_ptp_event(efx, event);
  745. break;
  746. case MCDI_EVENT_CODE_TX_FLUSH:
  747. case MCDI_EVENT_CODE_RX_FLUSH:
  748. /* Two flush events will be sent: one to the same event
  749. * queue as completions, and one to event queue 0.
  750. * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
  751. * flag will be set, and we should ignore the event
  752. * because we want to wait for all completions.
  753. */
  754. BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
  755. MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
  756. if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
  757. efx_ef10_handle_drain_event(efx);
  758. break;
  759. case MCDI_EVENT_CODE_TX_ERR:
  760. case MCDI_EVENT_CODE_RX_ERR:
  761. netif_err(efx, hw, efx->net_dev,
  762. "%s DMA error (event: "EFX_QWORD_FMT")\n",
  763. code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
  764. EFX_QWORD_VAL(*event));
  765. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  766. break;
  767. default:
  768. netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
  769. code);
  770. }
  771. }
  772. /**************************************************************************
  773. *
  774. * Specific request functions
  775. *
  776. **************************************************************************
  777. */
  778. void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
  779. {
  780. MCDI_DECLARE_BUF(outbuf,
  781. max(MC_CMD_GET_VERSION_OUT_LEN,
  782. MC_CMD_GET_CAPABILITIES_OUT_LEN));
  783. size_t outlength;
  784. const __le16 *ver_words;
  785. size_t offset;
  786. int rc;
  787. BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
  788. rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
  789. outbuf, sizeof(outbuf), &outlength);
  790. if (rc)
  791. goto fail;
  792. if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
  793. rc = -EIO;
  794. goto fail;
  795. }
  796. ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
  797. offset = snprintf(buf, len, "%u.%u.%u.%u",
  798. le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
  799. le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
  800. /* EF10 may have multiple datapath firmware variants within a
  801. * single version. Report which variants are running.
  802. */
  803. if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
  804. BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
  805. rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
  806. outbuf, sizeof(outbuf), &outlength);
  807. if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
  808. offset += snprintf(
  809. buf + offset, len - offset, " rx? tx?");
  810. else
  811. offset += snprintf(
  812. buf + offset, len - offset, " rx%x tx%x",
  813. MCDI_WORD(outbuf,
  814. GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
  815. MCDI_WORD(outbuf,
  816. GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
  817. /* It's theoretically possible for the string to exceed 31
  818. * characters, though in practice the first three version
  819. * components are short enough that this doesn't happen.
  820. */
  821. if (WARN_ON(offset >= len))
  822. buf[0] = 0;
  823. }
  824. return;
  825. fail:
  826. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  827. buf[0] = 0;
  828. }
  829. static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  830. bool *was_attached)
  831. {
  832. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
  833. MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
  834. size_t outlen;
  835. int rc;
  836. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
  837. driver_operating ? 1 : 0);
  838. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
  839. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
  840. rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
  841. outbuf, sizeof(outbuf), &outlen);
  842. if (rc)
  843. goto fail;
  844. if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
  845. rc = -EIO;
  846. goto fail;
  847. }
  848. if (was_attached != NULL)
  849. *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
  850. return 0;
  851. fail:
  852. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  853. return rc;
  854. }
  855. int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
  856. u16 *fw_subtype_list, u32 *capabilities)
  857. {
  858. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
  859. size_t outlen, i;
  860. int port_num = efx_port_num(efx);
  861. int rc;
  862. BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
  863. rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
  864. outbuf, sizeof(outbuf), &outlen);
  865. if (rc)
  866. goto fail;
  867. if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
  868. rc = -EIO;
  869. goto fail;
  870. }
  871. if (mac_address)
  872. memcpy(mac_address,
  873. port_num ?
  874. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
  875. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
  876. ETH_ALEN);
  877. if (fw_subtype_list) {
  878. for (i = 0;
  879. i < MCDI_VAR_ARRAY_LEN(outlen,
  880. GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
  881. i++)
  882. fw_subtype_list[i] = MCDI_ARRAY_WORD(
  883. outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
  884. for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
  885. fw_subtype_list[i] = 0;
  886. }
  887. if (capabilities) {
  888. if (port_num)
  889. *capabilities = MCDI_DWORD(outbuf,
  890. GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
  891. else
  892. *capabilities = MCDI_DWORD(outbuf,
  893. GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
  894. }
  895. return 0;
  896. fail:
  897. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
  898. __func__, rc, (int)outlen);
  899. return rc;
  900. }
  901. int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
  902. {
  903. MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
  904. u32 dest = 0;
  905. int rc;
  906. if (uart)
  907. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
  908. if (evq)
  909. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
  910. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
  911. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
  912. BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
  913. rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
  914. NULL, 0, NULL);
  915. if (rc)
  916. goto fail;
  917. return 0;
  918. fail:
  919. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  920. return rc;
  921. }
  922. int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
  923. {
  924. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
  925. size_t outlen;
  926. int rc;
  927. BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
  928. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
  929. outbuf, sizeof(outbuf), &outlen);
  930. if (rc)
  931. goto fail;
  932. if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
  933. rc = -EIO;
  934. goto fail;
  935. }
  936. *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
  937. return 0;
  938. fail:
  939. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  940. __func__, rc);
  941. return rc;
  942. }
  943. int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
  944. size_t *size_out, size_t *erase_size_out,
  945. bool *protected_out)
  946. {
  947. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
  948. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
  949. size_t outlen;
  950. int rc;
  951. MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
  952. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
  953. outbuf, sizeof(outbuf), &outlen);
  954. if (rc)
  955. goto fail;
  956. if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
  957. rc = -EIO;
  958. goto fail;
  959. }
  960. *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
  961. *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
  962. *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
  963. (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
  964. return 0;
  965. fail:
  966. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  967. return rc;
  968. }
  969. static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
  970. {
  971. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
  972. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
  973. int rc;
  974. MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
  975. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
  976. outbuf, sizeof(outbuf), NULL);
  977. if (rc)
  978. return rc;
  979. switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
  980. case MC_CMD_NVRAM_TEST_PASS:
  981. case MC_CMD_NVRAM_TEST_NOTSUPP:
  982. return 0;
  983. default:
  984. return -EIO;
  985. }
  986. }
  987. int efx_mcdi_nvram_test_all(struct efx_nic *efx)
  988. {
  989. u32 nvram_types;
  990. unsigned int type;
  991. int rc;
  992. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  993. if (rc)
  994. goto fail1;
  995. type = 0;
  996. while (nvram_types != 0) {
  997. if (nvram_types & 1) {
  998. rc = efx_mcdi_nvram_test(efx, type);
  999. if (rc)
  1000. goto fail2;
  1001. }
  1002. type++;
  1003. nvram_types >>= 1;
  1004. }
  1005. return 0;
  1006. fail2:
  1007. netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
  1008. __func__, type);
  1009. fail1:
  1010. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1011. return rc;
  1012. }
  1013. static int efx_mcdi_read_assertion(struct efx_nic *efx)
  1014. {
  1015. MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
  1016. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
  1017. unsigned int flags, index;
  1018. const char *reason;
  1019. size_t outlen;
  1020. int retry;
  1021. int rc;
  1022. /* Attempt to read any stored assertion state before we reboot
  1023. * the mcfw out of the assertion handler. Retry twice, once
  1024. * because a boot-time assertion might cause this command to fail
  1025. * with EINTR. And once again because GET_ASSERTS can race with
  1026. * MC_CMD_REBOOT running on the other port. */
  1027. retry = 2;
  1028. do {
  1029. MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
  1030. rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
  1031. inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
  1032. outbuf, sizeof(outbuf), &outlen);
  1033. } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
  1034. if (rc)
  1035. return rc;
  1036. if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
  1037. return -EIO;
  1038. /* Print out any recorded assertion state */
  1039. flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
  1040. if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
  1041. return 0;
  1042. reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
  1043. ? "system-level assertion"
  1044. : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
  1045. ? "thread-level assertion"
  1046. : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
  1047. ? "watchdog reset"
  1048. : "unknown assertion";
  1049. netif_err(efx, hw, efx->net_dev,
  1050. "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
  1051. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
  1052. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
  1053. /* Print out the registers */
  1054. for (index = 0;
  1055. index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
  1056. index++)
  1057. netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
  1058. 1 + index,
  1059. MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
  1060. index));
  1061. return 0;
  1062. }
  1063. static void efx_mcdi_exit_assertion(struct efx_nic *efx)
  1064. {
  1065. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  1066. /* If the MC is running debug firmware, it might now be
  1067. * waiting for a debugger to attach, but we just want it to
  1068. * reboot. We set a flag that makes the command a no-op if it
  1069. * has already done so. We don't know what return code to
  1070. * expect (0 or -EIO), so ignore it.
  1071. */
  1072. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  1073. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
  1074. MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
  1075. (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
  1076. NULL, 0, NULL);
  1077. }
  1078. int efx_mcdi_handle_assertion(struct efx_nic *efx)
  1079. {
  1080. int rc;
  1081. rc = efx_mcdi_read_assertion(efx);
  1082. if (rc)
  1083. return rc;
  1084. efx_mcdi_exit_assertion(efx);
  1085. return 0;
  1086. }
  1087. void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
  1088. {
  1089. MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
  1090. int rc;
  1091. BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
  1092. BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
  1093. BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
  1094. BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
  1095. MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
  1096. rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
  1097. NULL, 0, NULL);
  1098. if (rc)
  1099. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  1100. __func__, rc);
  1101. }
  1102. static int efx_mcdi_reset_port(struct efx_nic *efx)
  1103. {
  1104. int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
  1105. if (rc)
  1106. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  1107. __func__, rc);
  1108. return rc;
  1109. }
  1110. static int efx_mcdi_reset_mc(struct efx_nic *efx)
  1111. {
  1112. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  1113. int rc;
  1114. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  1115. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
  1116. rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
  1117. NULL, 0, NULL);
  1118. /* White is black, and up is down */
  1119. if (rc == -EIO)
  1120. return 0;
  1121. if (rc == 0)
  1122. rc = -EIO;
  1123. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1124. return rc;
  1125. }
  1126. enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
  1127. {
  1128. return RESET_TYPE_RECOVER_OR_ALL;
  1129. }
  1130. int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
  1131. {
  1132. int rc;
  1133. /* Recover from a failed assertion pre-reset */
  1134. rc = efx_mcdi_handle_assertion(efx);
  1135. if (rc)
  1136. return rc;
  1137. if (method == RESET_TYPE_WORLD)
  1138. return efx_mcdi_reset_mc(efx);
  1139. else
  1140. return efx_mcdi_reset_port(efx);
  1141. }
  1142. static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
  1143. const u8 *mac, int *id_out)
  1144. {
  1145. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
  1146. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
  1147. size_t outlen;
  1148. int rc;
  1149. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
  1150. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
  1151. MC_CMD_FILTER_MODE_SIMPLE);
  1152. memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
  1153. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
  1154. outbuf, sizeof(outbuf), &outlen);
  1155. if (rc)
  1156. goto fail;
  1157. if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
  1158. rc = -EIO;
  1159. goto fail;
  1160. }
  1161. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
  1162. return 0;
  1163. fail:
  1164. *id_out = -1;
  1165. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1166. return rc;
  1167. }
  1168. int
  1169. efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
  1170. {
  1171. return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
  1172. }
  1173. int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
  1174. {
  1175. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
  1176. size_t outlen;
  1177. int rc;
  1178. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
  1179. outbuf, sizeof(outbuf), &outlen);
  1180. if (rc)
  1181. goto fail;
  1182. if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
  1183. rc = -EIO;
  1184. goto fail;
  1185. }
  1186. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
  1187. return 0;
  1188. fail:
  1189. *id_out = -1;
  1190. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1191. return rc;
  1192. }
  1193. int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
  1194. {
  1195. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
  1196. int rc;
  1197. MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
  1198. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
  1199. NULL, 0, NULL);
  1200. if (rc)
  1201. goto fail;
  1202. return 0;
  1203. fail:
  1204. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1205. return rc;
  1206. }
  1207. int efx_mcdi_flush_rxqs(struct efx_nic *efx)
  1208. {
  1209. struct efx_channel *channel;
  1210. struct efx_rx_queue *rx_queue;
  1211. MCDI_DECLARE_BUF(inbuf,
  1212. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
  1213. int rc, count;
  1214. BUILD_BUG_ON(EFX_MAX_CHANNELS >
  1215. MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
  1216. count = 0;
  1217. efx_for_each_channel(channel, efx) {
  1218. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1219. if (rx_queue->flush_pending) {
  1220. rx_queue->flush_pending = false;
  1221. atomic_dec(&efx->rxq_flush_pending);
  1222. MCDI_SET_ARRAY_DWORD(
  1223. inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
  1224. count, efx_rx_queue_index(rx_queue));
  1225. count++;
  1226. }
  1227. }
  1228. }
  1229. rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
  1230. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
  1231. WARN_ON(rc < 0);
  1232. return rc;
  1233. }
  1234. int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
  1235. {
  1236. int rc;
  1237. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
  1238. if (rc)
  1239. goto fail;
  1240. return 0;
  1241. fail:
  1242. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1243. return rc;
  1244. }
  1245. int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
  1246. {
  1247. MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
  1248. BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
  1249. MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
  1250. MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
  1251. return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
  1252. NULL, 0, NULL);
  1253. }
  1254. #ifdef CONFIG_SFC_MTD
  1255. #define EFX_MCDI_NVRAM_LEN_MAX 128
  1256. static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
  1257. {
  1258. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
  1259. int rc;
  1260. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
  1261. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
  1262. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
  1263. NULL, 0, NULL);
  1264. if (rc)
  1265. goto fail;
  1266. return 0;
  1267. fail:
  1268. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1269. return rc;
  1270. }
  1271. static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
  1272. loff_t offset, u8 *buffer, size_t length)
  1273. {
  1274. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
  1275. MCDI_DECLARE_BUF(outbuf,
  1276. MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1277. size_t outlen;
  1278. int rc;
  1279. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
  1280. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
  1281. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
  1282. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
  1283. outbuf, sizeof(outbuf), &outlen);
  1284. if (rc)
  1285. goto fail;
  1286. memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
  1287. return 0;
  1288. fail:
  1289. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1290. return rc;
  1291. }
  1292. static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
  1293. loff_t offset, const u8 *buffer, size_t length)
  1294. {
  1295. MCDI_DECLARE_BUF(inbuf,
  1296. MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1297. int rc;
  1298. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
  1299. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
  1300. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
  1301. memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
  1302. BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
  1303. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
  1304. ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
  1305. NULL, 0, NULL);
  1306. if (rc)
  1307. goto fail;
  1308. return 0;
  1309. fail:
  1310. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1311. return rc;
  1312. }
  1313. static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
  1314. loff_t offset, size_t length)
  1315. {
  1316. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
  1317. int rc;
  1318. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
  1319. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
  1320. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
  1321. BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
  1322. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
  1323. NULL, 0, NULL);
  1324. if (rc)
  1325. goto fail;
  1326. return 0;
  1327. fail:
  1328. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1329. return rc;
  1330. }
  1331. static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
  1332. {
  1333. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
  1334. int rc;
  1335. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
  1336. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
  1337. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
  1338. NULL, 0, NULL);
  1339. if (rc)
  1340. goto fail;
  1341. return 0;
  1342. fail:
  1343. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1344. return rc;
  1345. }
  1346. int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
  1347. size_t len, size_t *retlen, u8 *buffer)
  1348. {
  1349. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1350. struct efx_nic *efx = mtd->priv;
  1351. loff_t offset = start;
  1352. loff_t end = min_t(loff_t, start + len, mtd->size);
  1353. size_t chunk;
  1354. int rc = 0;
  1355. while (offset < end) {
  1356. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1357. rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
  1358. buffer, chunk);
  1359. if (rc)
  1360. goto out;
  1361. offset += chunk;
  1362. buffer += chunk;
  1363. }
  1364. out:
  1365. *retlen = offset - start;
  1366. return rc;
  1367. }
  1368. int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  1369. {
  1370. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1371. struct efx_nic *efx = mtd->priv;
  1372. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  1373. loff_t end = min_t(loff_t, start + len, mtd->size);
  1374. size_t chunk = part->common.mtd.erasesize;
  1375. int rc = 0;
  1376. if (!part->updating) {
  1377. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1378. if (rc)
  1379. goto out;
  1380. part->updating = true;
  1381. }
  1382. /* The MCDI interface can in fact do multiple erase blocks at once;
  1383. * but erasing may be slow, so we make multiple calls here to avoid
  1384. * tripping the MCDI RPC timeout. */
  1385. while (offset < end) {
  1386. rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
  1387. chunk);
  1388. if (rc)
  1389. goto out;
  1390. offset += chunk;
  1391. }
  1392. out:
  1393. return rc;
  1394. }
  1395. int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
  1396. size_t len, size_t *retlen, const u8 *buffer)
  1397. {
  1398. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1399. struct efx_nic *efx = mtd->priv;
  1400. loff_t offset = start;
  1401. loff_t end = min_t(loff_t, start + len, mtd->size);
  1402. size_t chunk;
  1403. int rc = 0;
  1404. if (!part->updating) {
  1405. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1406. if (rc)
  1407. goto out;
  1408. part->updating = true;
  1409. }
  1410. while (offset < end) {
  1411. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1412. rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
  1413. buffer, chunk);
  1414. if (rc)
  1415. goto out;
  1416. offset += chunk;
  1417. buffer += chunk;
  1418. }
  1419. out:
  1420. *retlen = offset - start;
  1421. return rc;
  1422. }
  1423. int efx_mcdi_mtd_sync(struct mtd_info *mtd)
  1424. {
  1425. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1426. struct efx_nic *efx = mtd->priv;
  1427. int rc = 0;
  1428. if (part->updating) {
  1429. part->updating = false;
  1430. rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
  1431. }
  1432. return rc;
  1433. }
  1434. void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
  1435. {
  1436. struct efx_mcdi_mtd_partition *mcdi_part =
  1437. container_of(part, struct efx_mcdi_mtd_partition, common);
  1438. struct efx_nic *efx = part->mtd.priv;
  1439. snprintf(part->name, sizeof(part->name), "%s %s:%02x",
  1440. efx->name, part->type_name, mcdi_part->fw_subtype);
  1441. }
  1442. #endif /* CONFIG_SFC_MTD */