mcdi.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2008-2011 Solarflare Communications Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation, incorporated herein by reference.
  8. */
  9. #include <linux/delay.h>
  10. #include <asm/cmpxchg.h>
  11. #include "net_driver.h"
  12. #include "nic.h"
  13. #include "io.h"
  14. #include "farch_regs.h"
  15. #include "mcdi_pcol.h"
  16. #include "phy.h"
  17. /**************************************************************************
  18. *
  19. * Management-Controller-to-Driver Interface
  20. *
  21. **************************************************************************
  22. */
  23. #define MCDI_RPC_TIMEOUT (10 * HZ)
  24. /* A reboot/assertion causes the MCDI status word to be set after the
  25. * command word is set or a REBOOT event is sent. If we notice a reboot
  26. * via these mechanisms then wait 20ms for the status word to be set.
  27. */
  28. #define MCDI_STATUS_DELAY_US 100
  29. #define MCDI_STATUS_DELAY_COUNT 200
  30. #define MCDI_STATUS_SLEEP_MS \
  31. (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  32. #define SEQ_MASK \
  33. EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  34. struct efx_mcdi_async_param {
  35. struct list_head list;
  36. unsigned int cmd;
  37. size_t inlen;
  38. size_t outlen;
  39. efx_mcdi_async_completer *complete;
  40. unsigned long cookie;
  41. /* followed by request/response buffer */
  42. };
  43. static void efx_mcdi_timeout_async(unsigned long context);
  44. static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
  45. {
  46. EFX_BUG_ON_PARANOID(!efx->mcdi);
  47. return &efx->mcdi->iface;
  48. }
  49. int efx_mcdi_init(struct efx_nic *efx)
  50. {
  51. struct efx_mcdi_iface *mcdi;
  52. efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  53. if (!efx->mcdi)
  54. return -ENOMEM;
  55. mcdi = efx_mcdi(efx);
  56. mcdi->efx = efx;
  57. init_waitqueue_head(&mcdi->wq);
  58. spin_lock_init(&mcdi->iface_lock);
  59. mcdi->state = MCDI_STATE_QUIESCENT;
  60. mcdi->mode = MCDI_MODE_POLL;
  61. spin_lock_init(&mcdi->async_lock);
  62. INIT_LIST_HEAD(&mcdi->async_list);
  63. setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
  64. (unsigned long)mcdi);
  65. (void) efx_mcdi_poll_reboot(efx);
  66. mcdi->new_epoch = true;
  67. /* Recover from a failed assertion before probing */
  68. return efx_mcdi_handle_assertion(efx);
  69. }
  70. void efx_mcdi_fini(struct efx_nic *efx)
  71. {
  72. BUG_ON(efx->mcdi && efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
  73. kfree(efx->mcdi);
  74. }
  75. static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
  76. const efx_dword_t *inbuf, size_t inlen)
  77. {
  78. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  79. efx_dword_t hdr[2];
  80. size_t hdr_len;
  81. u32 xflags, seqno;
  82. BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
  83. /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
  84. spin_lock_bh(&mcdi->iface_lock);
  85. ++mcdi->seqno;
  86. spin_unlock_bh(&mcdi->iface_lock);
  87. seqno = mcdi->seqno & SEQ_MASK;
  88. xflags = 0;
  89. if (mcdi->mode == MCDI_MODE_EVENTS)
  90. xflags |= MCDI_HEADER_XFLAGS_EVREQ;
  91. if (efx->type->mcdi_max_ver == 1) {
  92. /* MCDI v1 */
  93. EFX_POPULATE_DWORD_7(hdr[0],
  94. MCDI_HEADER_RESPONSE, 0,
  95. MCDI_HEADER_RESYNC, 1,
  96. MCDI_HEADER_CODE, cmd,
  97. MCDI_HEADER_DATALEN, inlen,
  98. MCDI_HEADER_SEQ, seqno,
  99. MCDI_HEADER_XFLAGS, xflags,
  100. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  101. hdr_len = 4;
  102. } else {
  103. /* MCDI v2 */
  104. BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
  105. EFX_POPULATE_DWORD_7(hdr[0],
  106. MCDI_HEADER_RESPONSE, 0,
  107. MCDI_HEADER_RESYNC, 1,
  108. MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
  109. MCDI_HEADER_DATALEN, 0,
  110. MCDI_HEADER_SEQ, seqno,
  111. MCDI_HEADER_XFLAGS, xflags,
  112. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  113. EFX_POPULATE_DWORD_2(hdr[1],
  114. MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
  115. MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
  116. hdr_len = 8;
  117. }
  118. efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
  119. mcdi->new_epoch = false;
  120. }
  121. static int efx_mcdi_errno(unsigned int mcdi_err)
  122. {
  123. switch (mcdi_err) {
  124. case 0:
  125. return 0;
  126. #define TRANSLATE_ERROR(name) \
  127. case MC_CMD_ERR_ ## name: \
  128. return -name;
  129. TRANSLATE_ERROR(EPERM);
  130. TRANSLATE_ERROR(ENOENT);
  131. TRANSLATE_ERROR(EINTR);
  132. TRANSLATE_ERROR(EAGAIN);
  133. TRANSLATE_ERROR(EACCES);
  134. TRANSLATE_ERROR(EBUSY);
  135. TRANSLATE_ERROR(EINVAL);
  136. TRANSLATE_ERROR(EDEADLK);
  137. TRANSLATE_ERROR(ENOSYS);
  138. TRANSLATE_ERROR(ETIME);
  139. TRANSLATE_ERROR(EALREADY);
  140. TRANSLATE_ERROR(ENOSPC);
  141. #undef TRANSLATE_ERROR
  142. case MC_CMD_ERR_ALLOC_FAIL:
  143. return -ENOBUFS;
  144. case MC_CMD_ERR_MAC_EXIST:
  145. return -EADDRINUSE;
  146. default:
  147. return -EPROTO;
  148. }
  149. }
  150. static void efx_mcdi_read_response_header(struct efx_nic *efx)
  151. {
  152. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  153. unsigned int respseq, respcmd, error;
  154. efx_dword_t hdr;
  155. efx->type->mcdi_read_response(efx, &hdr, 0, 4);
  156. respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
  157. respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
  158. error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
  159. if (respcmd != MC_CMD_V2_EXTN) {
  160. mcdi->resp_hdr_len = 4;
  161. mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
  162. } else {
  163. efx->type->mcdi_read_response(efx, &hdr, 4, 4);
  164. mcdi->resp_hdr_len = 8;
  165. mcdi->resp_data_len =
  166. EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
  167. }
  168. if (error && mcdi->resp_data_len == 0) {
  169. netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
  170. mcdi->resprc = -EIO;
  171. } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
  172. netif_err(efx, hw, efx->net_dev,
  173. "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
  174. respseq, mcdi->seqno);
  175. mcdi->resprc = -EIO;
  176. } else if (error) {
  177. efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
  178. mcdi->resprc =
  179. efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
  180. } else {
  181. mcdi->resprc = 0;
  182. }
  183. }
  184. static int efx_mcdi_poll(struct efx_nic *efx)
  185. {
  186. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  187. unsigned long time, finish;
  188. unsigned int spins;
  189. int rc;
  190. /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
  191. rc = efx_mcdi_poll_reboot(efx);
  192. if (rc) {
  193. spin_lock_bh(&mcdi->iface_lock);
  194. mcdi->resprc = rc;
  195. mcdi->resp_hdr_len = 0;
  196. mcdi->resp_data_len = 0;
  197. spin_unlock_bh(&mcdi->iface_lock);
  198. return 0;
  199. }
  200. /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
  201. * because generally mcdi responses are fast. After that, back off
  202. * and poll once a jiffy (approximately)
  203. */
  204. spins = TICK_USEC;
  205. finish = jiffies + MCDI_RPC_TIMEOUT;
  206. while (1) {
  207. if (spins != 0) {
  208. --spins;
  209. udelay(1);
  210. } else {
  211. schedule_timeout_uninterruptible(1);
  212. }
  213. time = jiffies;
  214. rmb();
  215. if (efx->type->mcdi_poll_response(efx))
  216. break;
  217. if (time_after(time, finish))
  218. return -ETIMEDOUT;
  219. }
  220. spin_lock_bh(&mcdi->iface_lock);
  221. efx_mcdi_read_response_header(efx);
  222. spin_unlock_bh(&mcdi->iface_lock);
  223. /* Return rc=0 like wait_event_timeout() */
  224. return 0;
  225. }
  226. /* Test and clear MC-rebooted flag for this port/function; reset
  227. * software state as necessary.
  228. */
  229. int efx_mcdi_poll_reboot(struct efx_nic *efx)
  230. {
  231. if (!efx->mcdi)
  232. return 0;
  233. return efx->type->mcdi_poll_reboot(efx);
  234. }
  235. static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
  236. {
  237. return cmpxchg(&mcdi->state,
  238. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
  239. MCDI_STATE_QUIESCENT;
  240. }
  241. static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
  242. {
  243. /* Wait until the interface becomes QUIESCENT and we win the race
  244. * to mark it RUNNING_SYNC.
  245. */
  246. wait_event(mcdi->wq,
  247. cmpxchg(&mcdi->state,
  248. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
  249. MCDI_STATE_QUIESCENT);
  250. }
  251. static int efx_mcdi_await_completion(struct efx_nic *efx)
  252. {
  253. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  254. if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
  255. MCDI_RPC_TIMEOUT) == 0)
  256. return -ETIMEDOUT;
  257. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
  258. * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
  259. * completed the request first, then we'll just end up completing the
  260. * request again, which is safe.
  261. *
  262. * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
  263. * wait_event_timeout() implicitly provides.
  264. */
  265. if (mcdi->mode == MCDI_MODE_POLL)
  266. return efx_mcdi_poll(efx);
  267. return 0;
  268. }
  269. /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
  270. * requester. Return whether this was done. Does not take any locks.
  271. */
  272. static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
  273. {
  274. if (cmpxchg(&mcdi->state,
  275. MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
  276. MCDI_STATE_RUNNING_SYNC) {
  277. wake_up(&mcdi->wq);
  278. return true;
  279. }
  280. return false;
  281. }
  282. static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
  283. {
  284. if (mcdi->mode == MCDI_MODE_EVENTS) {
  285. struct efx_mcdi_async_param *async;
  286. struct efx_nic *efx = mcdi->efx;
  287. /* Process the asynchronous request queue */
  288. spin_lock_bh(&mcdi->async_lock);
  289. async = list_first_entry_or_null(
  290. &mcdi->async_list, struct efx_mcdi_async_param, list);
  291. if (async) {
  292. mcdi->state = MCDI_STATE_RUNNING_ASYNC;
  293. efx_mcdi_send_request(efx, async->cmd,
  294. (const efx_dword_t *)(async + 1),
  295. async->inlen);
  296. mod_timer(&mcdi->async_timer,
  297. jiffies + MCDI_RPC_TIMEOUT);
  298. }
  299. spin_unlock_bh(&mcdi->async_lock);
  300. if (async)
  301. return;
  302. }
  303. mcdi->state = MCDI_STATE_QUIESCENT;
  304. wake_up(&mcdi->wq);
  305. }
  306. /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
  307. * asynchronous completion function, and release the interface.
  308. * Return whether this was done. Must be called in bh-disabled
  309. * context. Will take iface_lock and async_lock.
  310. */
  311. static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
  312. {
  313. struct efx_nic *efx = mcdi->efx;
  314. struct efx_mcdi_async_param *async;
  315. size_t hdr_len, data_len;
  316. efx_dword_t *outbuf;
  317. int rc;
  318. if (cmpxchg(&mcdi->state,
  319. MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
  320. MCDI_STATE_RUNNING_ASYNC)
  321. return false;
  322. spin_lock(&mcdi->iface_lock);
  323. if (timeout) {
  324. /* Ensure that if the completion event arrives later,
  325. * the seqno check in efx_mcdi_ev_cpl() will fail
  326. */
  327. ++mcdi->seqno;
  328. ++mcdi->credits;
  329. rc = -ETIMEDOUT;
  330. hdr_len = 0;
  331. data_len = 0;
  332. } else {
  333. rc = mcdi->resprc;
  334. hdr_len = mcdi->resp_hdr_len;
  335. data_len = mcdi->resp_data_len;
  336. }
  337. spin_unlock(&mcdi->iface_lock);
  338. /* Stop the timer. In case the timer function is running, we
  339. * must wait for it to return so that there is no possibility
  340. * of it aborting the next request.
  341. */
  342. if (!timeout)
  343. del_timer_sync(&mcdi->async_timer);
  344. spin_lock(&mcdi->async_lock);
  345. async = list_first_entry(&mcdi->async_list,
  346. struct efx_mcdi_async_param, list);
  347. list_del(&async->list);
  348. spin_unlock(&mcdi->async_lock);
  349. outbuf = (efx_dword_t *)(async + 1);
  350. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  351. min(async->outlen, data_len));
  352. async->complete(efx, async->cookie, rc, outbuf, data_len);
  353. kfree(async);
  354. efx_mcdi_release(mcdi);
  355. return true;
  356. }
  357. static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
  358. unsigned int datalen, unsigned int mcdi_err)
  359. {
  360. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  361. bool wake = false;
  362. spin_lock(&mcdi->iface_lock);
  363. if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
  364. if (mcdi->credits)
  365. /* The request has been cancelled */
  366. --mcdi->credits;
  367. else
  368. netif_err(efx, hw, efx->net_dev,
  369. "MC response mismatch tx seq 0x%x rx "
  370. "seq 0x%x\n", seqno, mcdi->seqno);
  371. } else {
  372. if (efx->type->mcdi_max_ver >= 2) {
  373. /* MCDI v2 responses don't fit in an event */
  374. efx_mcdi_read_response_header(efx);
  375. } else {
  376. mcdi->resprc = efx_mcdi_errno(mcdi_err);
  377. mcdi->resp_hdr_len = 4;
  378. mcdi->resp_data_len = datalen;
  379. }
  380. wake = true;
  381. }
  382. spin_unlock(&mcdi->iface_lock);
  383. if (wake) {
  384. if (!efx_mcdi_complete_async(mcdi, false))
  385. (void) efx_mcdi_complete_sync(mcdi);
  386. /* If the interface isn't RUNNING_ASYNC or
  387. * RUNNING_SYNC then we've received a duplicate
  388. * completion after we've already transitioned back to
  389. * QUIESCENT. [A subsequent invocation would increment
  390. * seqno, so would have failed the seqno check].
  391. */
  392. }
  393. }
  394. static void efx_mcdi_timeout_async(unsigned long context)
  395. {
  396. struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
  397. efx_mcdi_complete_async(mcdi, true);
  398. }
  399. static int
  400. efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
  401. {
  402. if (efx->type->mcdi_max_ver < 0 ||
  403. (efx->type->mcdi_max_ver < 2 &&
  404. cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
  405. return -EINVAL;
  406. if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
  407. (efx->type->mcdi_max_ver < 2 &&
  408. inlen > MCDI_CTL_SDU_LEN_MAX_V1))
  409. return -EMSGSIZE;
  410. return 0;
  411. }
  412. int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
  413. const efx_dword_t *inbuf, size_t inlen,
  414. efx_dword_t *outbuf, size_t outlen,
  415. size_t *outlen_actual)
  416. {
  417. int rc;
  418. rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
  419. if (rc)
  420. return rc;
  421. return efx_mcdi_rpc_finish(efx, cmd, inlen,
  422. outbuf, outlen, outlen_actual);
  423. }
  424. int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
  425. const efx_dword_t *inbuf, size_t inlen)
  426. {
  427. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  428. int rc;
  429. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  430. if (rc)
  431. return rc;
  432. efx_mcdi_acquire_sync(mcdi);
  433. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  434. return 0;
  435. }
  436. /**
  437. * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
  438. * @efx: NIC through which to issue the command
  439. * @cmd: Command type number
  440. * @inbuf: Command parameters
  441. * @inlen: Length of command parameters, in bytes
  442. * @outlen: Length to allocate for response buffer, in bytes
  443. * @complete: Function to be called on completion or cancellation.
  444. * @cookie: Arbitrary value to be passed to @complete.
  445. *
  446. * This function does not sleep and therefore may be called in atomic
  447. * context. It will fail if event queues are disabled or if MCDI
  448. * event completions have been disabled due to an error.
  449. *
  450. * If it succeeds, the @complete function will be called exactly once
  451. * in atomic context, when one of the following occurs:
  452. * (a) the completion event is received (in NAPI context)
  453. * (b) event queues are disabled (in the process that disables them)
  454. * (c) the request times-out (in timer context)
  455. */
  456. int
  457. efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
  458. const efx_dword_t *inbuf, size_t inlen, size_t outlen,
  459. efx_mcdi_async_completer *complete, unsigned long cookie)
  460. {
  461. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  462. struct efx_mcdi_async_param *async;
  463. int rc;
  464. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  465. if (rc)
  466. return rc;
  467. async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
  468. GFP_ATOMIC);
  469. if (!async)
  470. return -ENOMEM;
  471. async->cmd = cmd;
  472. async->inlen = inlen;
  473. async->outlen = outlen;
  474. async->complete = complete;
  475. async->cookie = cookie;
  476. memcpy(async + 1, inbuf, inlen);
  477. spin_lock_bh(&mcdi->async_lock);
  478. if (mcdi->mode == MCDI_MODE_EVENTS) {
  479. list_add_tail(&async->list, &mcdi->async_list);
  480. /* If this is at the front of the queue, try to start it
  481. * immediately
  482. */
  483. if (mcdi->async_list.next == &async->list &&
  484. efx_mcdi_acquire_async(mcdi)) {
  485. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  486. mod_timer(&mcdi->async_timer,
  487. jiffies + MCDI_RPC_TIMEOUT);
  488. }
  489. } else {
  490. kfree(async);
  491. rc = -ENETDOWN;
  492. }
  493. spin_unlock_bh(&mcdi->async_lock);
  494. return rc;
  495. }
  496. int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
  497. efx_dword_t *outbuf, size_t outlen,
  498. size_t *outlen_actual)
  499. {
  500. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  501. int rc;
  502. if (mcdi->mode == MCDI_MODE_POLL)
  503. rc = efx_mcdi_poll(efx);
  504. else
  505. rc = efx_mcdi_await_completion(efx);
  506. if (rc != 0) {
  507. /* Close the race with efx_mcdi_ev_cpl() executing just too late
  508. * and completing a request we've just cancelled, by ensuring
  509. * that the seqno check therein fails.
  510. */
  511. spin_lock_bh(&mcdi->iface_lock);
  512. ++mcdi->seqno;
  513. ++mcdi->credits;
  514. spin_unlock_bh(&mcdi->iface_lock);
  515. netif_err(efx, hw, efx->net_dev,
  516. "MC command 0x%x inlen %d mode %d timed out\n",
  517. cmd, (int)inlen, mcdi->mode);
  518. } else {
  519. size_t hdr_len, data_len;
  520. /* At the very least we need a memory barrier here to ensure
  521. * we pick up changes from efx_mcdi_ev_cpl(). Protect against
  522. * a spurious efx_mcdi_ev_cpl() running concurrently by
  523. * acquiring the iface_lock. */
  524. spin_lock_bh(&mcdi->iface_lock);
  525. rc = mcdi->resprc;
  526. hdr_len = mcdi->resp_hdr_len;
  527. data_len = mcdi->resp_data_len;
  528. spin_unlock_bh(&mcdi->iface_lock);
  529. BUG_ON(rc > 0);
  530. if (rc == 0) {
  531. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  532. min(outlen, data_len));
  533. if (outlen_actual != NULL)
  534. *outlen_actual = data_len;
  535. } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
  536. ; /* Don't reset if MC_CMD_REBOOT returns EIO */
  537. else if (rc == -EIO || rc == -EINTR) {
  538. netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
  539. -rc);
  540. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  541. } else
  542. netif_dbg(efx, hw, efx->net_dev,
  543. "MC command 0x%x inlen %d failed rc=%d\n",
  544. cmd, (int)inlen, -rc);
  545. if (rc == -EIO || rc == -EINTR) {
  546. msleep(MCDI_STATUS_SLEEP_MS);
  547. efx_mcdi_poll_reboot(efx);
  548. mcdi->new_epoch = true;
  549. }
  550. }
  551. efx_mcdi_release(mcdi);
  552. return rc;
  553. }
  554. /* Switch to polled MCDI completions. This can be called in various
  555. * error conditions with various locks held, so it must be lockless.
  556. * Caller is responsible for flushing asynchronous requests later.
  557. */
  558. void efx_mcdi_mode_poll(struct efx_nic *efx)
  559. {
  560. struct efx_mcdi_iface *mcdi;
  561. if (!efx->mcdi)
  562. return;
  563. mcdi = efx_mcdi(efx);
  564. if (mcdi->mode == MCDI_MODE_POLL)
  565. return;
  566. /* We can switch from event completion to polled completion, because
  567. * mcdi requests are always completed in shared memory. We do this by
  568. * switching the mode to POLL'd then completing the request.
  569. * efx_mcdi_await_completion() will then call efx_mcdi_poll().
  570. *
  571. * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
  572. * which efx_mcdi_complete_sync() provides for us.
  573. */
  574. mcdi->mode = MCDI_MODE_POLL;
  575. efx_mcdi_complete_sync(mcdi);
  576. }
  577. /* Flush any running or queued asynchronous requests, after event processing
  578. * is stopped
  579. */
  580. void efx_mcdi_flush_async(struct efx_nic *efx)
  581. {
  582. struct efx_mcdi_async_param *async, *next;
  583. struct efx_mcdi_iface *mcdi;
  584. if (!efx->mcdi)
  585. return;
  586. mcdi = efx_mcdi(efx);
  587. /* We must be in polling mode so no more requests can be queued */
  588. BUG_ON(mcdi->mode != MCDI_MODE_POLL);
  589. del_timer_sync(&mcdi->async_timer);
  590. /* If a request is still running, make sure we give the MC
  591. * time to complete it so that the response won't overwrite our
  592. * next request.
  593. */
  594. if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
  595. efx_mcdi_poll(efx);
  596. mcdi->state = MCDI_STATE_QUIESCENT;
  597. }
  598. /* Nothing else will access the async list now, so it is safe
  599. * to walk it without holding async_lock. If we hold it while
  600. * calling a completer then lockdep may warn that we have
  601. * acquired locks in the wrong order.
  602. */
  603. list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
  604. async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
  605. list_del(&async->list);
  606. kfree(async);
  607. }
  608. }
  609. void efx_mcdi_mode_event(struct efx_nic *efx)
  610. {
  611. struct efx_mcdi_iface *mcdi;
  612. if (!efx->mcdi)
  613. return;
  614. mcdi = efx_mcdi(efx);
  615. if (mcdi->mode == MCDI_MODE_EVENTS)
  616. return;
  617. /* We can't switch from polled to event completion in the middle of a
  618. * request, because the completion method is specified in the request.
  619. * So acquire the interface to serialise the requestors. We don't need
  620. * to acquire the iface_lock to change the mode here, but we do need a
  621. * write memory barrier ensure that efx_mcdi_rpc() sees it, which
  622. * efx_mcdi_acquire() provides.
  623. */
  624. efx_mcdi_acquire_sync(mcdi);
  625. mcdi->mode = MCDI_MODE_EVENTS;
  626. efx_mcdi_release(mcdi);
  627. }
  628. static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
  629. {
  630. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  631. /* If there is an outstanding MCDI request, it has been terminated
  632. * either by a BADASSERT or REBOOT event. If the mcdi interface is
  633. * in polled mode, then do nothing because the MC reboot handler will
  634. * set the header correctly. However, if the mcdi interface is waiting
  635. * for a CMDDONE event it won't receive it [and since all MCDI events
  636. * are sent to the same queue, we can't be racing with
  637. * efx_mcdi_ev_cpl()]
  638. *
  639. * If there is an outstanding asynchronous request, we can't
  640. * complete it now (efx_mcdi_complete() would deadlock). The
  641. * reset process will take care of this.
  642. *
  643. * There's a race here with efx_mcdi_send_request(), because
  644. * we might receive a REBOOT event *before* the request has
  645. * been copied out. In polled mode (during startup) this is
  646. * irrelevant, because efx_mcdi_complete_sync() is ignored. In
  647. * event mode, this condition is just an edge-case of
  648. * receiving a REBOOT event after posting the MCDI
  649. * request. Did the mc reboot before or after the copyout? The
  650. * best we can do always is just return failure.
  651. */
  652. spin_lock(&mcdi->iface_lock);
  653. if (efx_mcdi_complete_sync(mcdi)) {
  654. if (mcdi->mode == MCDI_MODE_EVENTS) {
  655. mcdi->resprc = rc;
  656. mcdi->resp_hdr_len = 0;
  657. mcdi->resp_data_len = 0;
  658. ++mcdi->credits;
  659. }
  660. } else {
  661. int count;
  662. /* Nobody was waiting for an MCDI request, so trigger a reset */
  663. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  664. /* Consume the status word since efx_mcdi_rpc_finish() won't */
  665. for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
  666. if (efx_mcdi_poll_reboot(efx))
  667. break;
  668. udelay(MCDI_STATUS_DELAY_US);
  669. }
  670. mcdi->new_epoch = true;
  671. }
  672. spin_unlock(&mcdi->iface_lock);
  673. }
  674. /* Called from falcon_process_eventq for MCDI events */
  675. void efx_mcdi_process_event(struct efx_channel *channel,
  676. efx_qword_t *event)
  677. {
  678. struct efx_nic *efx = channel->efx;
  679. int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
  680. u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
  681. switch (code) {
  682. case MCDI_EVENT_CODE_BADSSERT:
  683. netif_err(efx, hw, efx->net_dev,
  684. "MC watchdog or assertion failure at 0x%x\n", data);
  685. efx_mcdi_ev_death(efx, -EINTR);
  686. break;
  687. case MCDI_EVENT_CODE_PMNOTICE:
  688. netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
  689. break;
  690. case MCDI_EVENT_CODE_CMDDONE:
  691. efx_mcdi_ev_cpl(efx,
  692. MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
  693. MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
  694. MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
  695. break;
  696. case MCDI_EVENT_CODE_LINKCHANGE:
  697. efx_mcdi_process_link_change(efx, event);
  698. break;
  699. case MCDI_EVENT_CODE_SENSOREVT:
  700. efx_mcdi_sensor_event(efx, event);
  701. break;
  702. case MCDI_EVENT_CODE_SCHEDERR:
  703. netif_info(efx, hw, efx->net_dev,
  704. "MC Scheduler error address=0x%x\n", data);
  705. break;
  706. case MCDI_EVENT_CODE_REBOOT:
  707. netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
  708. efx_mcdi_ev_death(efx, -EIO);
  709. break;
  710. case MCDI_EVENT_CODE_MAC_STATS_DMA:
  711. /* MAC stats are gather lazily. We can ignore this. */
  712. break;
  713. case MCDI_EVENT_CODE_FLR:
  714. efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
  715. break;
  716. case MCDI_EVENT_CODE_PTP_RX:
  717. case MCDI_EVENT_CODE_PTP_FAULT:
  718. case MCDI_EVENT_CODE_PTP_PPS:
  719. efx_ptp_event(efx, event);
  720. break;
  721. case MCDI_EVENT_CODE_TX_ERR:
  722. case MCDI_EVENT_CODE_RX_ERR:
  723. netif_err(efx, hw, efx->net_dev,
  724. "%s DMA error (event: "EFX_QWORD_FMT")\n",
  725. code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
  726. EFX_QWORD_VAL(*event));
  727. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  728. break;
  729. default:
  730. netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
  731. code);
  732. }
  733. }
  734. /**************************************************************************
  735. *
  736. * Specific request functions
  737. *
  738. **************************************************************************
  739. */
  740. void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
  741. {
  742. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
  743. size_t outlength;
  744. const __le16 *ver_words;
  745. int rc;
  746. BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
  747. rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
  748. outbuf, sizeof(outbuf), &outlength);
  749. if (rc)
  750. goto fail;
  751. if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
  752. rc = -EIO;
  753. goto fail;
  754. }
  755. ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
  756. snprintf(buf, len, "%u.%u.%u.%u",
  757. le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
  758. le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
  759. return;
  760. fail:
  761. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  762. buf[0] = 0;
  763. }
  764. int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  765. bool *was_attached)
  766. {
  767. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
  768. MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
  769. size_t outlen;
  770. int rc;
  771. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
  772. driver_operating ? 1 : 0);
  773. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
  774. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
  775. rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
  776. outbuf, sizeof(outbuf), &outlen);
  777. if (rc)
  778. goto fail;
  779. if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
  780. rc = -EIO;
  781. goto fail;
  782. }
  783. if (was_attached != NULL)
  784. *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
  785. return 0;
  786. fail:
  787. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  788. return rc;
  789. }
  790. int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
  791. u16 *fw_subtype_list, u32 *capabilities)
  792. {
  793. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
  794. size_t outlen, i;
  795. int port_num = efx_port_num(efx);
  796. int rc;
  797. BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
  798. rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
  799. outbuf, sizeof(outbuf), &outlen);
  800. if (rc)
  801. goto fail;
  802. if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
  803. rc = -EIO;
  804. goto fail;
  805. }
  806. if (mac_address)
  807. memcpy(mac_address,
  808. port_num ?
  809. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
  810. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
  811. ETH_ALEN);
  812. if (fw_subtype_list) {
  813. for (i = 0;
  814. i < MCDI_VAR_ARRAY_LEN(outlen,
  815. GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
  816. i++)
  817. fw_subtype_list[i] = MCDI_ARRAY_WORD(
  818. outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
  819. for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
  820. fw_subtype_list[i] = 0;
  821. }
  822. if (capabilities) {
  823. if (port_num)
  824. *capabilities = MCDI_DWORD(outbuf,
  825. GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
  826. else
  827. *capabilities = MCDI_DWORD(outbuf,
  828. GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
  829. }
  830. return 0;
  831. fail:
  832. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
  833. __func__, rc, (int)outlen);
  834. return rc;
  835. }
  836. int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
  837. {
  838. MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
  839. u32 dest = 0;
  840. int rc;
  841. if (uart)
  842. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
  843. if (evq)
  844. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
  845. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
  846. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
  847. BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
  848. rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
  849. NULL, 0, NULL);
  850. if (rc)
  851. goto fail;
  852. return 0;
  853. fail:
  854. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  855. return rc;
  856. }
  857. int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
  858. {
  859. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
  860. size_t outlen;
  861. int rc;
  862. BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
  863. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
  864. outbuf, sizeof(outbuf), &outlen);
  865. if (rc)
  866. goto fail;
  867. if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
  868. rc = -EIO;
  869. goto fail;
  870. }
  871. *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
  872. return 0;
  873. fail:
  874. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  875. __func__, rc);
  876. return rc;
  877. }
  878. int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
  879. size_t *size_out, size_t *erase_size_out,
  880. bool *protected_out)
  881. {
  882. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
  883. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
  884. size_t outlen;
  885. int rc;
  886. MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
  887. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
  888. outbuf, sizeof(outbuf), &outlen);
  889. if (rc)
  890. goto fail;
  891. if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
  892. rc = -EIO;
  893. goto fail;
  894. }
  895. *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
  896. *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
  897. *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
  898. (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
  899. return 0;
  900. fail:
  901. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  902. return rc;
  903. }
  904. static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
  905. {
  906. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
  907. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
  908. int rc;
  909. MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
  910. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
  911. outbuf, sizeof(outbuf), NULL);
  912. if (rc)
  913. return rc;
  914. switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
  915. case MC_CMD_NVRAM_TEST_PASS:
  916. case MC_CMD_NVRAM_TEST_NOTSUPP:
  917. return 0;
  918. default:
  919. return -EIO;
  920. }
  921. }
  922. int efx_mcdi_nvram_test_all(struct efx_nic *efx)
  923. {
  924. u32 nvram_types;
  925. unsigned int type;
  926. int rc;
  927. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  928. if (rc)
  929. goto fail1;
  930. type = 0;
  931. while (nvram_types != 0) {
  932. if (nvram_types & 1) {
  933. rc = efx_mcdi_nvram_test(efx, type);
  934. if (rc)
  935. goto fail2;
  936. }
  937. type++;
  938. nvram_types >>= 1;
  939. }
  940. return 0;
  941. fail2:
  942. netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
  943. __func__, type);
  944. fail1:
  945. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  946. return rc;
  947. }
  948. static int efx_mcdi_read_assertion(struct efx_nic *efx)
  949. {
  950. MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
  951. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
  952. unsigned int flags, index;
  953. const char *reason;
  954. size_t outlen;
  955. int retry;
  956. int rc;
  957. /* Attempt to read any stored assertion state before we reboot
  958. * the mcfw out of the assertion handler. Retry twice, once
  959. * because a boot-time assertion might cause this command to fail
  960. * with EINTR. And once again because GET_ASSERTS can race with
  961. * MC_CMD_REBOOT running on the other port. */
  962. retry = 2;
  963. do {
  964. MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
  965. rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
  966. inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
  967. outbuf, sizeof(outbuf), &outlen);
  968. } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
  969. if (rc)
  970. return rc;
  971. if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
  972. return -EIO;
  973. /* Print out any recorded assertion state */
  974. flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
  975. if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
  976. return 0;
  977. reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
  978. ? "system-level assertion"
  979. : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
  980. ? "thread-level assertion"
  981. : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
  982. ? "watchdog reset"
  983. : "unknown assertion";
  984. netif_err(efx, hw, efx->net_dev,
  985. "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
  986. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
  987. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
  988. /* Print out the registers */
  989. for (index = 0;
  990. index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
  991. index++)
  992. netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
  993. 1 + index,
  994. MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
  995. index));
  996. return 0;
  997. }
  998. static void efx_mcdi_exit_assertion(struct efx_nic *efx)
  999. {
  1000. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  1001. /* If the MC is running debug firmware, it might now be
  1002. * waiting for a debugger to attach, but we just want it to
  1003. * reboot. We set a flag that makes the command a no-op if it
  1004. * has already done so. We don't know what return code to
  1005. * expect (0 or -EIO), so ignore it.
  1006. */
  1007. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  1008. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
  1009. MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
  1010. (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
  1011. NULL, 0, NULL);
  1012. }
  1013. int efx_mcdi_handle_assertion(struct efx_nic *efx)
  1014. {
  1015. int rc;
  1016. rc = efx_mcdi_read_assertion(efx);
  1017. if (rc)
  1018. return rc;
  1019. efx_mcdi_exit_assertion(efx);
  1020. return 0;
  1021. }
  1022. void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
  1023. {
  1024. MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
  1025. int rc;
  1026. BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
  1027. BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
  1028. BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
  1029. BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
  1030. MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
  1031. rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
  1032. NULL, 0, NULL);
  1033. if (rc)
  1034. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  1035. __func__, rc);
  1036. }
  1037. static int efx_mcdi_reset_port(struct efx_nic *efx)
  1038. {
  1039. int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
  1040. if (rc)
  1041. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  1042. __func__, rc);
  1043. return rc;
  1044. }
  1045. static int efx_mcdi_reset_mc(struct efx_nic *efx)
  1046. {
  1047. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  1048. int rc;
  1049. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  1050. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
  1051. rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
  1052. NULL, 0, NULL);
  1053. /* White is black, and up is down */
  1054. if (rc == -EIO)
  1055. return 0;
  1056. if (rc == 0)
  1057. rc = -EIO;
  1058. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1059. return rc;
  1060. }
  1061. enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
  1062. {
  1063. return RESET_TYPE_RECOVER_OR_ALL;
  1064. }
  1065. int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
  1066. {
  1067. int rc;
  1068. /* Recover from a failed assertion pre-reset */
  1069. rc = efx_mcdi_handle_assertion(efx);
  1070. if (rc)
  1071. return rc;
  1072. if (method == RESET_TYPE_WORLD)
  1073. return efx_mcdi_reset_mc(efx);
  1074. else
  1075. return efx_mcdi_reset_port(efx);
  1076. }
  1077. static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
  1078. const u8 *mac, int *id_out)
  1079. {
  1080. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
  1081. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
  1082. size_t outlen;
  1083. int rc;
  1084. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
  1085. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
  1086. MC_CMD_FILTER_MODE_SIMPLE);
  1087. memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
  1088. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
  1089. outbuf, sizeof(outbuf), &outlen);
  1090. if (rc)
  1091. goto fail;
  1092. if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
  1093. rc = -EIO;
  1094. goto fail;
  1095. }
  1096. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
  1097. return 0;
  1098. fail:
  1099. *id_out = -1;
  1100. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1101. return rc;
  1102. }
  1103. int
  1104. efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
  1105. {
  1106. return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
  1107. }
  1108. int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
  1109. {
  1110. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
  1111. size_t outlen;
  1112. int rc;
  1113. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
  1114. outbuf, sizeof(outbuf), &outlen);
  1115. if (rc)
  1116. goto fail;
  1117. if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
  1118. rc = -EIO;
  1119. goto fail;
  1120. }
  1121. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
  1122. return 0;
  1123. fail:
  1124. *id_out = -1;
  1125. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1126. return rc;
  1127. }
  1128. int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
  1129. {
  1130. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
  1131. int rc;
  1132. MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
  1133. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
  1134. NULL, 0, NULL);
  1135. if (rc)
  1136. goto fail;
  1137. return 0;
  1138. fail:
  1139. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1140. return rc;
  1141. }
  1142. int efx_mcdi_flush_rxqs(struct efx_nic *efx)
  1143. {
  1144. struct efx_channel *channel;
  1145. struct efx_rx_queue *rx_queue;
  1146. MCDI_DECLARE_BUF(inbuf,
  1147. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
  1148. int rc, count;
  1149. BUILD_BUG_ON(EFX_MAX_CHANNELS >
  1150. MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
  1151. count = 0;
  1152. efx_for_each_channel(channel, efx) {
  1153. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1154. if (rx_queue->flush_pending) {
  1155. rx_queue->flush_pending = false;
  1156. atomic_dec(&efx->rxq_flush_pending);
  1157. MCDI_SET_ARRAY_DWORD(
  1158. inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
  1159. count, efx_rx_queue_index(rx_queue));
  1160. count++;
  1161. }
  1162. }
  1163. }
  1164. rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
  1165. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
  1166. WARN_ON(rc < 0);
  1167. return rc;
  1168. }
  1169. int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
  1170. {
  1171. int rc;
  1172. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
  1173. if (rc)
  1174. goto fail;
  1175. return 0;
  1176. fail:
  1177. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1178. return rc;
  1179. }
  1180. #ifdef CONFIG_SFC_MTD
  1181. #define EFX_MCDI_NVRAM_LEN_MAX 128
  1182. static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
  1183. {
  1184. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
  1185. int rc;
  1186. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
  1187. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
  1188. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
  1189. NULL, 0, NULL);
  1190. if (rc)
  1191. goto fail;
  1192. return 0;
  1193. fail:
  1194. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1195. return rc;
  1196. }
  1197. static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
  1198. loff_t offset, u8 *buffer, size_t length)
  1199. {
  1200. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
  1201. MCDI_DECLARE_BUF(outbuf,
  1202. MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1203. size_t outlen;
  1204. int rc;
  1205. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
  1206. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
  1207. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
  1208. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
  1209. outbuf, sizeof(outbuf), &outlen);
  1210. if (rc)
  1211. goto fail;
  1212. memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
  1213. return 0;
  1214. fail:
  1215. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1216. return rc;
  1217. }
  1218. static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
  1219. loff_t offset, const u8 *buffer, size_t length)
  1220. {
  1221. MCDI_DECLARE_BUF(inbuf,
  1222. MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1223. int rc;
  1224. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
  1225. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
  1226. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
  1227. memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
  1228. BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
  1229. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
  1230. ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
  1231. NULL, 0, NULL);
  1232. if (rc)
  1233. goto fail;
  1234. return 0;
  1235. fail:
  1236. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1237. return rc;
  1238. }
  1239. static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
  1240. loff_t offset, size_t length)
  1241. {
  1242. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
  1243. int rc;
  1244. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
  1245. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
  1246. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
  1247. BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
  1248. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
  1249. NULL, 0, NULL);
  1250. if (rc)
  1251. goto fail;
  1252. return 0;
  1253. fail:
  1254. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1255. return rc;
  1256. }
  1257. static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
  1258. {
  1259. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
  1260. int rc;
  1261. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
  1262. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
  1263. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
  1264. NULL, 0, NULL);
  1265. if (rc)
  1266. goto fail;
  1267. return 0;
  1268. fail:
  1269. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1270. return rc;
  1271. }
  1272. int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
  1273. size_t len, size_t *retlen, u8 *buffer)
  1274. {
  1275. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1276. struct efx_nic *efx = mtd->priv;
  1277. loff_t offset = start;
  1278. loff_t end = min_t(loff_t, start + len, mtd->size);
  1279. size_t chunk;
  1280. int rc = 0;
  1281. while (offset < end) {
  1282. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1283. rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
  1284. buffer, chunk);
  1285. if (rc)
  1286. goto out;
  1287. offset += chunk;
  1288. buffer += chunk;
  1289. }
  1290. out:
  1291. *retlen = offset - start;
  1292. return rc;
  1293. }
  1294. int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  1295. {
  1296. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1297. struct efx_nic *efx = mtd->priv;
  1298. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  1299. loff_t end = min_t(loff_t, start + len, mtd->size);
  1300. size_t chunk = part->common.mtd.erasesize;
  1301. int rc = 0;
  1302. if (!part->updating) {
  1303. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1304. if (rc)
  1305. goto out;
  1306. part->updating = true;
  1307. }
  1308. /* The MCDI interface can in fact do multiple erase blocks at once;
  1309. * but erasing may be slow, so we make multiple calls here to avoid
  1310. * tripping the MCDI RPC timeout. */
  1311. while (offset < end) {
  1312. rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
  1313. chunk);
  1314. if (rc)
  1315. goto out;
  1316. offset += chunk;
  1317. }
  1318. out:
  1319. return rc;
  1320. }
  1321. int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
  1322. size_t len, size_t *retlen, const u8 *buffer)
  1323. {
  1324. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1325. struct efx_nic *efx = mtd->priv;
  1326. loff_t offset = start;
  1327. loff_t end = min_t(loff_t, start + len, mtd->size);
  1328. size_t chunk;
  1329. int rc = 0;
  1330. if (!part->updating) {
  1331. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1332. if (rc)
  1333. goto out;
  1334. part->updating = true;
  1335. }
  1336. while (offset < end) {
  1337. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1338. rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
  1339. buffer, chunk);
  1340. if (rc)
  1341. goto out;
  1342. offset += chunk;
  1343. buffer += chunk;
  1344. }
  1345. out:
  1346. *retlen = offset - start;
  1347. return rc;
  1348. }
  1349. int efx_mcdi_mtd_sync(struct mtd_info *mtd)
  1350. {
  1351. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1352. struct efx_nic *efx = mtd->priv;
  1353. int rc = 0;
  1354. if (part->updating) {
  1355. part->updating = false;
  1356. rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
  1357. }
  1358. return rc;
  1359. }
  1360. void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
  1361. {
  1362. struct efx_mcdi_mtd_partition *mcdi_part =
  1363. container_of(part, struct efx_mcdi_mtd_partition, common);
  1364. struct efx_nic *efx = part->mtd.priv;
  1365. snprintf(part->name, sizeof(part->name), "%s %s:%02x",
  1366. efx->name, part->type_name, mcdi_part->fw_subtype);
  1367. }
  1368. #endif /* CONFIG_SFC_MTD */