mcdi.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2008-2011 Solarflare Communications Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation, incorporated herein by reference.
  8. */
  9. #include <linux/delay.h>
  10. #include "net_driver.h"
  11. #include "nic.h"
  12. #include "io.h"
  13. #include "farch_regs.h"
  14. #include "mcdi_pcol.h"
  15. #include "phy.h"
  16. /**************************************************************************
  17. *
  18. * Management-Controller-to-Driver Interface
  19. *
  20. **************************************************************************
  21. */
  22. #define MCDI_RPC_TIMEOUT (10 * HZ)
  23. /* A reboot/assertion causes the MCDI status word to be set after the
  24. * command word is set or a REBOOT event is sent. If we notice a reboot
  25. * via these mechanisms then wait 20ms for the status word to be set.
  26. */
  27. #define MCDI_STATUS_DELAY_US 100
  28. #define MCDI_STATUS_DELAY_COUNT 200
  29. #define MCDI_STATUS_SLEEP_MS \
  30. (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  31. #define SEQ_MASK \
  32. EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  33. static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
  34. {
  35. EFX_BUG_ON_PARANOID(!efx->mcdi);
  36. return &efx->mcdi->iface;
  37. }
  38. int efx_mcdi_init(struct efx_nic *efx)
  39. {
  40. struct efx_mcdi_iface *mcdi;
  41. efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  42. if (!efx->mcdi)
  43. return -ENOMEM;
  44. mcdi = efx_mcdi(efx);
  45. init_waitqueue_head(&mcdi->wq);
  46. spin_lock_init(&mcdi->iface_lock);
  47. atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
  48. mcdi->mode = MCDI_MODE_POLL;
  49. (void) efx_mcdi_poll_reboot(efx);
  50. mcdi->new_epoch = true;
  51. /* Recover from a failed assertion before probing */
  52. return efx_mcdi_handle_assertion(efx);
  53. }
  54. void efx_mcdi_fini(struct efx_nic *efx)
  55. {
  56. BUG_ON(efx->mcdi &&
  57. atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
  58. kfree(efx->mcdi);
  59. }
  60. static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
  61. const efx_dword_t *inbuf, size_t inlen)
  62. {
  63. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  64. efx_dword_t hdr[2];
  65. size_t hdr_len;
  66. u32 xflags, seqno;
  67. BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
  68. seqno = mcdi->seqno & SEQ_MASK;
  69. xflags = 0;
  70. if (mcdi->mode == MCDI_MODE_EVENTS)
  71. xflags |= MCDI_HEADER_XFLAGS_EVREQ;
  72. if (efx->type->mcdi_max_ver == 1) {
  73. /* MCDI v1 */
  74. EFX_POPULATE_DWORD_7(hdr[0],
  75. MCDI_HEADER_RESPONSE, 0,
  76. MCDI_HEADER_RESYNC, 1,
  77. MCDI_HEADER_CODE, cmd,
  78. MCDI_HEADER_DATALEN, inlen,
  79. MCDI_HEADER_SEQ, seqno,
  80. MCDI_HEADER_XFLAGS, xflags,
  81. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  82. hdr_len = 4;
  83. } else {
  84. /* MCDI v2 */
  85. BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
  86. EFX_POPULATE_DWORD_7(hdr[0],
  87. MCDI_HEADER_RESPONSE, 0,
  88. MCDI_HEADER_RESYNC, 1,
  89. MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
  90. MCDI_HEADER_DATALEN, 0,
  91. MCDI_HEADER_SEQ, seqno,
  92. MCDI_HEADER_XFLAGS, xflags,
  93. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  94. EFX_POPULATE_DWORD_2(hdr[1],
  95. MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
  96. MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
  97. hdr_len = 8;
  98. }
  99. efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
  100. }
  101. static int efx_mcdi_errno(unsigned int mcdi_err)
  102. {
  103. switch (mcdi_err) {
  104. case 0:
  105. return 0;
  106. #define TRANSLATE_ERROR(name) \
  107. case MC_CMD_ERR_ ## name: \
  108. return -name;
  109. TRANSLATE_ERROR(EPERM);
  110. TRANSLATE_ERROR(ENOENT);
  111. TRANSLATE_ERROR(EINTR);
  112. TRANSLATE_ERROR(EAGAIN);
  113. TRANSLATE_ERROR(EACCES);
  114. TRANSLATE_ERROR(EBUSY);
  115. TRANSLATE_ERROR(EINVAL);
  116. TRANSLATE_ERROR(EDEADLK);
  117. TRANSLATE_ERROR(ENOSYS);
  118. TRANSLATE_ERROR(ETIME);
  119. TRANSLATE_ERROR(EALREADY);
  120. TRANSLATE_ERROR(ENOSPC);
  121. #undef TRANSLATE_ERROR
  122. case MC_CMD_ERR_ALLOC_FAIL:
  123. return -ENOBUFS;
  124. case MC_CMD_ERR_MAC_EXIST:
  125. return -EADDRINUSE;
  126. default:
  127. return -EPROTO;
  128. }
  129. }
  130. static void efx_mcdi_read_response_header(struct efx_nic *efx)
  131. {
  132. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  133. unsigned int respseq, respcmd, error;
  134. efx_dword_t hdr;
  135. efx->type->mcdi_read_response(efx, &hdr, 0, 4);
  136. respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
  137. respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
  138. error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
  139. if (respcmd != MC_CMD_V2_EXTN) {
  140. mcdi->resp_hdr_len = 4;
  141. mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
  142. } else {
  143. efx->type->mcdi_read_response(efx, &hdr, 4, 4);
  144. mcdi->resp_hdr_len = 8;
  145. mcdi->resp_data_len =
  146. EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
  147. }
  148. if (error && mcdi->resp_data_len == 0) {
  149. netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
  150. mcdi->resprc = -EIO;
  151. } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
  152. netif_err(efx, hw, efx->net_dev,
  153. "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
  154. respseq, mcdi->seqno);
  155. mcdi->resprc = -EIO;
  156. } else if (error) {
  157. efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
  158. mcdi->resprc =
  159. efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
  160. } else {
  161. mcdi->resprc = 0;
  162. }
  163. }
  164. static int efx_mcdi_poll(struct efx_nic *efx)
  165. {
  166. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  167. unsigned long time, finish;
  168. unsigned int spins;
  169. int rc;
  170. /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
  171. rc = efx_mcdi_poll_reboot(efx);
  172. if (rc) {
  173. spin_lock_bh(&mcdi->iface_lock);
  174. mcdi->resprc = rc;
  175. mcdi->resp_hdr_len = 0;
  176. mcdi->resp_data_len = 0;
  177. spin_unlock_bh(&mcdi->iface_lock);
  178. return 0;
  179. }
  180. /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
  181. * because generally mcdi responses are fast. After that, back off
  182. * and poll once a jiffy (approximately)
  183. */
  184. spins = TICK_USEC;
  185. finish = jiffies + MCDI_RPC_TIMEOUT;
  186. while (1) {
  187. if (spins != 0) {
  188. --spins;
  189. udelay(1);
  190. } else {
  191. schedule_timeout_uninterruptible(1);
  192. }
  193. time = jiffies;
  194. rmb();
  195. if (efx->type->mcdi_poll_response(efx))
  196. break;
  197. if (time_after(time, finish))
  198. return -ETIMEDOUT;
  199. }
  200. spin_lock_bh(&mcdi->iface_lock);
  201. efx_mcdi_read_response_header(efx);
  202. spin_unlock_bh(&mcdi->iface_lock);
  203. /* Return rc=0 like wait_event_timeout() */
  204. return 0;
  205. }
  206. /* Test and clear MC-rebooted flag for this port/function; reset
  207. * software state as necessary.
  208. */
  209. int efx_mcdi_poll_reboot(struct efx_nic *efx)
  210. {
  211. if (!efx->mcdi)
  212. return 0;
  213. return efx->type->mcdi_poll_reboot(efx);
  214. }
  215. static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
  216. {
  217. /* Wait until the interface becomes QUIESCENT and we win the race
  218. * to mark it RUNNING. */
  219. wait_event(mcdi->wq,
  220. atomic_cmpxchg(&mcdi->state,
  221. MCDI_STATE_QUIESCENT,
  222. MCDI_STATE_RUNNING)
  223. == MCDI_STATE_QUIESCENT);
  224. }
  225. static int efx_mcdi_await_completion(struct efx_nic *efx)
  226. {
  227. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  228. if (wait_event_timeout(
  229. mcdi->wq,
  230. atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
  231. MCDI_RPC_TIMEOUT) == 0)
  232. return -ETIMEDOUT;
  233. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
  234. * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
  235. * completed the request first, then we'll just end up completing the
  236. * request again, which is safe.
  237. *
  238. * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
  239. * wait_event_timeout() implicitly provides.
  240. */
  241. if (mcdi->mode == MCDI_MODE_POLL)
  242. return efx_mcdi_poll(efx);
  243. return 0;
  244. }
  245. static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
  246. {
  247. /* If the interface is RUNNING, then move to COMPLETED and wake any
  248. * waiters. If the interface isn't in RUNNING then we've received a
  249. * duplicate completion after we've already transitioned back to
  250. * QUIESCENT. [A subsequent invocation would increment seqno, so would
  251. * have failed the seqno check].
  252. */
  253. if (atomic_cmpxchg(&mcdi->state,
  254. MCDI_STATE_RUNNING,
  255. MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
  256. wake_up(&mcdi->wq);
  257. return true;
  258. }
  259. return false;
  260. }
  261. static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
  262. {
  263. atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
  264. wake_up(&mcdi->wq);
  265. }
  266. static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
  267. unsigned int datalen, unsigned int mcdi_err)
  268. {
  269. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  270. bool wake = false;
  271. spin_lock(&mcdi->iface_lock);
  272. if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
  273. if (mcdi->credits)
  274. /* The request has been cancelled */
  275. --mcdi->credits;
  276. else
  277. netif_err(efx, hw, efx->net_dev,
  278. "MC response mismatch tx seq 0x%x rx "
  279. "seq 0x%x\n", seqno, mcdi->seqno);
  280. } else {
  281. if (efx->type->mcdi_max_ver >= 2) {
  282. /* MCDI v2 responses don't fit in an event */
  283. efx_mcdi_read_response_header(efx);
  284. } else {
  285. mcdi->resprc = efx_mcdi_errno(mcdi_err);
  286. mcdi->resp_hdr_len = 4;
  287. mcdi->resp_data_len = datalen;
  288. }
  289. wake = true;
  290. }
  291. spin_unlock(&mcdi->iface_lock);
  292. if (wake)
  293. efx_mcdi_complete(mcdi);
  294. }
  295. int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
  296. const efx_dword_t *inbuf, size_t inlen,
  297. efx_dword_t *outbuf, size_t outlen,
  298. size_t *outlen_actual)
  299. {
  300. int rc;
  301. rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
  302. if (rc)
  303. return rc;
  304. return efx_mcdi_rpc_finish(efx, cmd, inlen,
  305. outbuf, outlen, outlen_actual);
  306. }
  307. int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
  308. const efx_dword_t *inbuf, size_t inlen)
  309. {
  310. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  311. if (efx->type->mcdi_max_ver < 0 ||
  312. (efx->type->mcdi_max_ver < 2 &&
  313. cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
  314. return -EINVAL;
  315. if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
  316. (efx->type->mcdi_max_ver < 2 &&
  317. inlen > MCDI_CTL_SDU_LEN_MAX_V1))
  318. return -EMSGSIZE;
  319. efx_mcdi_acquire(mcdi);
  320. /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
  321. spin_lock_bh(&mcdi->iface_lock);
  322. ++mcdi->seqno;
  323. spin_unlock_bh(&mcdi->iface_lock);
  324. efx_mcdi_copyin(efx, cmd, inbuf, inlen);
  325. mcdi->new_epoch = false;
  326. return 0;
  327. }
  328. int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
  329. efx_dword_t *outbuf, size_t outlen,
  330. size_t *outlen_actual)
  331. {
  332. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  333. int rc;
  334. if (mcdi->mode == MCDI_MODE_POLL)
  335. rc = efx_mcdi_poll(efx);
  336. else
  337. rc = efx_mcdi_await_completion(efx);
  338. if (rc != 0) {
  339. /* Close the race with efx_mcdi_ev_cpl() executing just too late
  340. * and completing a request we've just cancelled, by ensuring
  341. * that the seqno check therein fails.
  342. */
  343. spin_lock_bh(&mcdi->iface_lock);
  344. ++mcdi->seqno;
  345. ++mcdi->credits;
  346. spin_unlock_bh(&mcdi->iface_lock);
  347. netif_err(efx, hw, efx->net_dev,
  348. "MC command 0x%x inlen %d mode %d timed out\n",
  349. cmd, (int)inlen, mcdi->mode);
  350. } else {
  351. size_t hdr_len, data_len;
  352. /* At the very least we need a memory barrier here to ensure
  353. * we pick up changes from efx_mcdi_ev_cpl(). Protect against
  354. * a spurious efx_mcdi_ev_cpl() running concurrently by
  355. * acquiring the iface_lock. */
  356. spin_lock_bh(&mcdi->iface_lock);
  357. rc = mcdi->resprc;
  358. hdr_len = mcdi->resp_hdr_len;
  359. data_len = mcdi->resp_data_len;
  360. spin_unlock_bh(&mcdi->iface_lock);
  361. BUG_ON(rc > 0);
  362. if (rc == 0) {
  363. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  364. min(outlen, data_len));
  365. if (outlen_actual != NULL)
  366. *outlen_actual = data_len;
  367. } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
  368. ; /* Don't reset if MC_CMD_REBOOT returns EIO */
  369. else if (rc == -EIO || rc == -EINTR) {
  370. netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
  371. -rc);
  372. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  373. } else
  374. netif_dbg(efx, hw, efx->net_dev,
  375. "MC command 0x%x inlen %d failed rc=%d\n",
  376. cmd, (int)inlen, -rc);
  377. if (rc == -EIO || rc == -EINTR) {
  378. msleep(MCDI_STATUS_SLEEP_MS);
  379. efx_mcdi_poll_reboot(efx);
  380. mcdi->new_epoch = true;
  381. }
  382. }
  383. efx_mcdi_release(mcdi);
  384. return rc;
  385. }
  386. void efx_mcdi_mode_poll(struct efx_nic *efx)
  387. {
  388. struct efx_mcdi_iface *mcdi;
  389. if (!efx->mcdi)
  390. return;
  391. mcdi = efx_mcdi(efx);
  392. if (mcdi->mode == MCDI_MODE_POLL)
  393. return;
  394. /* We can switch from event completion to polled completion, because
  395. * mcdi requests are always completed in shared memory. We do this by
  396. * switching the mode to POLL'd then completing the request.
  397. * efx_mcdi_await_completion() will then call efx_mcdi_poll().
  398. *
  399. * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
  400. * which efx_mcdi_complete() provides for us.
  401. */
  402. mcdi->mode = MCDI_MODE_POLL;
  403. efx_mcdi_complete(mcdi);
  404. }
  405. void efx_mcdi_mode_event(struct efx_nic *efx)
  406. {
  407. struct efx_mcdi_iface *mcdi;
  408. if (!efx->mcdi)
  409. return;
  410. mcdi = efx_mcdi(efx);
  411. if (mcdi->mode == MCDI_MODE_EVENTS)
  412. return;
  413. /* We can't switch from polled to event completion in the middle of a
  414. * request, because the completion method is specified in the request.
  415. * So acquire the interface to serialise the requestors. We don't need
  416. * to acquire the iface_lock to change the mode here, but we do need a
  417. * write memory barrier ensure that efx_mcdi_rpc() sees it, which
  418. * efx_mcdi_acquire() provides.
  419. */
  420. efx_mcdi_acquire(mcdi);
  421. mcdi->mode = MCDI_MODE_EVENTS;
  422. efx_mcdi_release(mcdi);
  423. }
  424. static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
  425. {
  426. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  427. /* If there is an outstanding MCDI request, it has been terminated
  428. * either by a BADASSERT or REBOOT event. If the mcdi interface is
  429. * in polled mode, then do nothing because the MC reboot handler will
  430. * set the header correctly. However, if the mcdi interface is waiting
  431. * for a CMDDONE event it won't receive it [and since all MCDI events
  432. * are sent to the same queue, we can't be racing with
  433. * efx_mcdi_ev_cpl()]
  434. *
  435. * There's a race here with efx_mcdi_rpc(), because we might receive
  436. * a REBOOT event *before* the request has been copied out. In polled
  437. * mode (during startup) this is irrelevant, because efx_mcdi_complete()
  438. * is ignored. In event mode, this condition is just an edge-case of
  439. * receiving a REBOOT event after posting the MCDI request. Did the mc
  440. * reboot before or after the copyout? The best we can do always is
  441. * just return failure.
  442. */
  443. spin_lock(&mcdi->iface_lock);
  444. if (efx_mcdi_complete(mcdi)) {
  445. if (mcdi->mode == MCDI_MODE_EVENTS) {
  446. mcdi->resprc = rc;
  447. mcdi->resp_hdr_len = 0;
  448. mcdi->resp_data_len = 0;
  449. ++mcdi->credits;
  450. }
  451. } else {
  452. int count;
  453. /* Nobody was waiting for an MCDI request, so trigger a reset */
  454. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  455. /* Consume the status word since efx_mcdi_rpc_finish() won't */
  456. for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
  457. if (efx_mcdi_poll_reboot(efx))
  458. break;
  459. udelay(MCDI_STATUS_DELAY_US);
  460. }
  461. mcdi->new_epoch = true;
  462. }
  463. spin_unlock(&mcdi->iface_lock);
  464. }
  465. /* Called from falcon_process_eventq for MCDI events */
  466. void efx_mcdi_process_event(struct efx_channel *channel,
  467. efx_qword_t *event)
  468. {
  469. struct efx_nic *efx = channel->efx;
  470. int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
  471. u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
  472. switch (code) {
  473. case MCDI_EVENT_CODE_BADSSERT:
  474. netif_err(efx, hw, efx->net_dev,
  475. "MC watchdog or assertion failure at 0x%x\n", data);
  476. efx_mcdi_ev_death(efx, -EINTR);
  477. break;
  478. case MCDI_EVENT_CODE_PMNOTICE:
  479. netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
  480. break;
  481. case MCDI_EVENT_CODE_CMDDONE:
  482. efx_mcdi_ev_cpl(efx,
  483. MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
  484. MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
  485. MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
  486. break;
  487. case MCDI_EVENT_CODE_LINKCHANGE:
  488. efx_mcdi_process_link_change(efx, event);
  489. break;
  490. case MCDI_EVENT_CODE_SENSOREVT:
  491. efx_mcdi_sensor_event(efx, event);
  492. break;
  493. case MCDI_EVENT_CODE_SCHEDERR:
  494. netif_info(efx, hw, efx->net_dev,
  495. "MC Scheduler error address=0x%x\n", data);
  496. break;
  497. case MCDI_EVENT_CODE_REBOOT:
  498. netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
  499. efx_mcdi_ev_death(efx, -EIO);
  500. break;
  501. case MCDI_EVENT_CODE_MAC_STATS_DMA:
  502. /* MAC stats are gather lazily. We can ignore this. */
  503. break;
  504. case MCDI_EVENT_CODE_FLR:
  505. efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
  506. break;
  507. case MCDI_EVENT_CODE_PTP_RX:
  508. case MCDI_EVENT_CODE_PTP_FAULT:
  509. case MCDI_EVENT_CODE_PTP_PPS:
  510. efx_ptp_event(efx, event);
  511. break;
  512. case MCDI_EVENT_CODE_TX_ERR:
  513. case MCDI_EVENT_CODE_RX_ERR:
  514. netif_err(efx, hw, efx->net_dev,
  515. "%s DMA error (event: "EFX_QWORD_FMT")\n",
  516. code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
  517. EFX_QWORD_VAL(*event));
  518. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  519. break;
  520. default:
  521. netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
  522. code);
  523. }
  524. }
  525. /**************************************************************************
  526. *
  527. * Specific request functions
  528. *
  529. **************************************************************************
  530. */
  531. void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
  532. {
  533. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
  534. size_t outlength;
  535. const __le16 *ver_words;
  536. int rc;
  537. BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
  538. rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
  539. outbuf, sizeof(outbuf), &outlength);
  540. if (rc)
  541. goto fail;
  542. if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
  543. rc = -EIO;
  544. goto fail;
  545. }
  546. ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
  547. snprintf(buf, len, "%u.%u.%u.%u",
  548. le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
  549. le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
  550. return;
  551. fail:
  552. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  553. buf[0] = 0;
  554. }
  555. int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  556. bool *was_attached)
  557. {
  558. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
  559. MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
  560. size_t outlen;
  561. int rc;
  562. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
  563. driver_operating ? 1 : 0);
  564. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
  565. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
  566. rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
  567. outbuf, sizeof(outbuf), &outlen);
  568. if (rc)
  569. goto fail;
  570. if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
  571. rc = -EIO;
  572. goto fail;
  573. }
  574. if (was_attached != NULL)
  575. *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
  576. return 0;
  577. fail:
  578. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  579. return rc;
  580. }
  581. int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
  582. u16 *fw_subtype_list, u32 *capabilities)
  583. {
  584. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
  585. size_t outlen, i;
  586. int port_num = efx_port_num(efx);
  587. int rc;
  588. BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
  589. rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
  590. outbuf, sizeof(outbuf), &outlen);
  591. if (rc)
  592. goto fail;
  593. if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
  594. rc = -EIO;
  595. goto fail;
  596. }
  597. if (mac_address)
  598. memcpy(mac_address,
  599. port_num ?
  600. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
  601. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
  602. ETH_ALEN);
  603. if (fw_subtype_list) {
  604. for (i = 0;
  605. i < MCDI_VAR_ARRAY_LEN(outlen,
  606. GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
  607. i++)
  608. fw_subtype_list[i] = MCDI_ARRAY_WORD(
  609. outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
  610. for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
  611. fw_subtype_list[i] = 0;
  612. }
  613. if (capabilities) {
  614. if (port_num)
  615. *capabilities = MCDI_DWORD(outbuf,
  616. GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
  617. else
  618. *capabilities = MCDI_DWORD(outbuf,
  619. GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
  620. }
  621. return 0;
  622. fail:
  623. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
  624. __func__, rc, (int)outlen);
  625. return rc;
  626. }
  627. int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
  628. {
  629. MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
  630. u32 dest = 0;
  631. int rc;
  632. if (uart)
  633. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
  634. if (evq)
  635. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
  636. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
  637. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
  638. BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
  639. rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
  640. NULL, 0, NULL);
  641. if (rc)
  642. goto fail;
  643. return 0;
  644. fail:
  645. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  646. return rc;
  647. }
  648. int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
  649. {
  650. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
  651. size_t outlen;
  652. int rc;
  653. BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
  654. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
  655. outbuf, sizeof(outbuf), &outlen);
  656. if (rc)
  657. goto fail;
  658. if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
  659. rc = -EIO;
  660. goto fail;
  661. }
  662. *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
  663. return 0;
  664. fail:
  665. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  666. __func__, rc);
  667. return rc;
  668. }
  669. int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
  670. size_t *size_out, size_t *erase_size_out,
  671. bool *protected_out)
  672. {
  673. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
  674. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
  675. size_t outlen;
  676. int rc;
  677. MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
  678. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
  679. outbuf, sizeof(outbuf), &outlen);
  680. if (rc)
  681. goto fail;
  682. if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
  683. rc = -EIO;
  684. goto fail;
  685. }
  686. *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
  687. *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
  688. *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
  689. (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
  690. return 0;
  691. fail:
  692. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  693. return rc;
  694. }
  695. static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
  696. {
  697. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
  698. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
  699. int rc;
  700. MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
  701. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
  702. outbuf, sizeof(outbuf), NULL);
  703. if (rc)
  704. return rc;
  705. switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
  706. case MC_CMD_NVRAM_TEST_PASS:
  707. case MC_CMD_NVRAM_TEST_NOTSUPP:
  708. return 0;
  709. default:
  710. return -EIO;
  711. }
  712. }
  713. int efx_mcdi_nvram_test_all(struct efx_nic *efx)
  714. {
  715. u32 nvram_types;
  716. unsigned int type;
  717. int rc;
  718. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  719. if (rc)
  720. goto fail1;
  721. type = 0;
  722. while (nvram_types != 0) {
  723. if (nvram_types & 1) {
  724. rc = efx_mcdi_nvram_test(efx, type);
  725. if (rc)
  726. goto fail2;
  727. }
  728. type++;
  729. nvram_types >>= 1;
  730. }
  731. return 0;
  732. fail2:
  733. netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
  734. __func__, type);
  735. fail1:
  736. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  737. return rc;
  738. }
  739. static int efx_mcdi_read_assertion(struct efx_nic *efx)
  740. {
  741. MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
  742. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
  743. unsigned int flags, index;
  744. const char *reason;
  745. size_t outlen;
  746. int retry;
  747. int rc;
  748. /* Attempt to read any stored assertion state before we reboot
  749. * the mcfw out of the assertion handler. Retry twice, once
  750. * because a boot-time assertion might cause this command to fail
  751. * with EINTR. And once again because GET_ASSERTS can race with
  752. * MC_CMD_REBOOT running on the other port. */
  753. retry = 2;
  754. do {
  755. MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
  756. rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
  757. inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
  758. outbuf, sizeof(outbuf), &outlen);
  759. } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
  760. if (rc)
  761. return rc;
  762. if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
  763. return -EIO;
  764. /* Print out any recorded assertion state */
  765. flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
  766. if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
  767. return 0;
  768. reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
  769. ? "system-level assertion"
  770. : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
  771. ? "thread-level assertion"
  772. : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
  773. ? "watchdog reset"
  774. : "unknown assertion";
  775. netif_err(efx, hw, efx->net_dev,
  776. "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
  777. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
  778. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
  779. /* Print out the registers */
  780. for (index = 0;
  781. index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
  782. index++)
  783. netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
  784. 1 + index,
  785. MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
  786. index));
  787. return 0;
  788. }
  789. static void efx_mcdi_exit_assertion(struct efx_nic *efx)
  790. {
  791. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  792. /* If the MC is running debug firmware, it might now be
  793. * waiting for a debugger to attach, but we just want it to
  794. * reboot. We set a flag that makes the command a no-op if it
  795. * has already done so. We don't know what return code to
  796. * expect (0 or -EIO), so ignore it.
  797. */
  798. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  799. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
  800. MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
  801. (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
  802. NULL, 0, NULL);
  803. }
  804. int efx_mcdi_handle_assertion(struct efx_nic *efx)
  805. {
  806. int rc;
  807. rc = efx_mcdi_read_assertion(efx);
  808. if (rc)
  809. return rc;
  810. efx_mcdi_exit_assertion(efx);
  811. return 0;
  812. }
  813. void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
  814. {
  815. MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
  816. int rc;
  817. BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
  818. BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
  819. BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
  820. BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
  821. MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
  822. rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
  823. NULL, 0, NULL);
  824. if (rc)
  825. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  826. __func__, rc);
  827. }
  828. static int efx_mcdi_reset_port(struct efx_nic *efx)
  829. {
  830. int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
  831. if (rc)
  832. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  833. __func__, rc);
  834. return rc;
  835. }
  836. static int efx_mcdi_reset_mc(struct efx_nic *efx)
  837. {
  838. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  839. int rc;
  840. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  841. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
  842. rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
  843. NULL, 0, NULL);
  844. /* White is black, and up is down */
  845. if (rc == -EIO)
  846. return 0;
  847. if (rc == 0)
  848. rc = -EIO;
  849. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  850. return rc;
  851. }
  852. enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
  853. {
  854. return RESET_TYPE_RECOVER_OR_ALL;
  855. }
  856. int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
  857. {
  858. int rc;
  859. /* Recover from a failed assertion pre-reset */
  860. rc = efx_mcdi_handle_assertion(efx);
  861. if (rc)
  862. return rc;
  863. if (method == RESET_TYPE_WORLD)
  864. return efx_mcdi_reset_mc(efx);
  865. else
  866. return efx_mcdi_reset_port(efx);
  867. }
  868. static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
  869. const u8 *mac, int *id_out)
  870. {
  871. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
  872. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
  873. size_t outlen;
  874. int rc;
  875. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
  876. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
  877. MC_CMD_FILTER_MODE_SIMPLE);
  878. memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
  879. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
  880. outbuf, sizeof(outbuf), &outlen);
  881. if (rc)
  882. goto fail;
  883. if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
  884. rc = -EIO;
  885. goto fail;
  886. }
  887. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
  888. return 0;
  889. fail:
  890. *id_out = -1;
  891. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  892. return rc;
  893. }
  894. int
  895. efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
  896. {
  897. return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
  898. }
  899. int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
  900. {
  901. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
  902. size_t outlen;
  903. int rc;
  904. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
  905. outbuf, sizeof(outbuf), &outlen);
  906. if (rc)
  907. goto fail;
  908. if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
  909. rc = -EIO;
  910. goto fail;
  911. }
  912. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
  913. return 0;
  914. fail:
  915. *id_out = -1;
  916. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  917. return rc;
  918. }
  919. int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
  920. {
  921. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
  922. int rc;
  923. MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
  924. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
  925. NULL, 0, NULL);
  926. if (rc)
  927. goto fail;
  928. return 0;
  929. fail:
  930. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  931. return rc;
  932. }
  933. int efx_mcdi_flush_rxqs(struct efx_nic *efx)
  934. {
  935. struct efx_channel *channel;
  936. struct efx_rx_queue *rx_queue;
  937. MCDI_DECLARE_BUF(inbuf,
  938. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
  939. int rc, count;
  940. BUILD_BUG_ON(EFX_MAX_CHANNELS >
  941. MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
  942. count = 0;
  943. efx_for_each_channel(channel, efx) {
  944. efx_for_each_channel_rx_queue(rx_queue, channel) {
  945. if (rx_queue->flush_pending) {
  946. rx_queue->flush_pending = false;
  947. atomic_dec(&efx->rxq_flush_pending);
  948. MCDI_SET_ARRAY_DWORD(
  949. inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
  950. count, efx_rx_queue_index(rx_queue));
  951. count++;
  952. }
  953. }
  954. }
  955. rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
  956. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
  957. WARN_ON(rc < 0);
  958. return rc;
  959. }
  960. int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
  961. {
  962. int rc;
  963. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
  964. if (rc)
  965. goto fail;
  966. return 0;
  967. fail:
  968. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  969. return rc;
  970. }
  971. #ifdef CONFIG_SFC_MTD
  972. #define EFX_MCDI_NVRAM_LEN_MAX 128
  973. static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
  974. {
  975. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
  976. int rc;
  977. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
  978. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
  979. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
  980. NULL, 0, NULL);
  981. if (rc)
  982. goto fail;
  983. return 0;
  984. fail:
  985. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  986. return rc;
  987. }
  988. static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
  989. loff_t offset, u8 *buffer, size_t length)
  990. {
  991. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
  992. MCDI_DECLARE_BUF(outbuf,
  993. MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  994. size_t outlen;
  995. int rc;
  996. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
  997. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
  998. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
  999. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
  1000. outbuf, sizeof(outbuf), &outlen);
  1001. if (rc)
  1002. goto fail;
  1003. memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
  1004. return 0;
  1005. fail:
  1006. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1007. return rc;
  1008. }
  1009. static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
  1010. loff_t offset, const u8 *buffer, size_t length)
  1011. {
  1012. MCDI_DECLARE_BUF(inbuf,
  1013. MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1014. int rc;
  1015. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
  1016. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
  1017. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
  1018. memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
  1019. BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
  1020. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
  1021. ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
  1022. NULL, 0, NULL);
  1023. if (rc)
  1024. goto fail;
  1025. return 0;
  1026. fail:
  1027. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1028. return rc;
  1029. }
  1030. static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
  1031. loff_t offset, size_t length)
  1032. {
  1033. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
  1034. int rc;
  1035. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
  1036. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
  1037. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
  1038. BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
  1039. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
  1040. NULL, 0, NULL);
  1041. if (rc)
  1042. goto fail;
  1043. return 0;
  1044. fail:
  1045. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1046. return rc;
  1047. }
  1048. static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
  1049. {
  1050. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
  1051. int rc;
  1052. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
  1053. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
  1054. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
  1055. NULL, 0, NULL);
  1056. if (rc)
  1057. goto fail;
  1058. return 0;
  1059. fail:
  1060. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1061. return rc;
  1062. }
  1063. int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
  1064. size_t len, size_t *retlen, u8 *buffer)
  1065. {
  1066. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1067. struct efx_nic *efx = mtd->priv;
  1068. loff_t offset = start;
  1069. loff_t end = min_t(loff_t, start + len, mtd->size);
  1070. size_t chunk;
  1071. int rc = 0;
  1072. while (offset < end) {
  1073. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1074. rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
  1075. buffer, chunk);
  1076. if (rc)
  1077. goto out;
  1078. offset += chunk;
  1079. buffer += chunk;
  1080. }
  1081. out:
  1082. *retlen = offset - start;
  1083. return rc;
  1084. }
  1085. int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  1086. {
  1087. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1088. struct efx_nic *efx = mtd->priv;
  1089. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  1090. loff_t end = min_t(loff_t, start + len, mtd->size);
  1091. size_t chunk = part->common.mtd.erasesize;
  1092. int rc = 0;
  1093. if (!part->updating) {
  1094. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1095. if (rc)
  1096. goto out;
  1097. part->updating = true;
  1098. }
  1099. /* The MCDI interface can in fact do multiple erase blocks at once;
  1100. * but erasing may be slow, so we make multiple calls here to avoid
  1101. * tripping the MCDI RPC timeout. */
  1102. while (offset < end) {
  1103. rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
  1104. chunk);
  1105. if (rc)
  1106. goto out;
  1107. offset += chunk;
  1108. }
  1109. out:
  1110. return rc;
  1111. }
  1112. int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
  1113. size_t len, size_t *retlen, const u8 *buffer)
  1114. {
  1115. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1116. struct efx_nic *efx = mtd->priv;
  1117. loff_t offset = start;
  1118. loff_t end = min_t(loff_t, start + len, mtd->size);
  1119. size_t chunk;
  1120. int rc = 0;
  1121. if (!part->updating) {
  1122. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1123. if (rc)
  1124. goto out;
  1125. part->updating = true;
  1126. }
  1127. while (offset < end) {
  1128. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1129. rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
  1130. buffer, chunk);
  1131. if (rc)
  1132. goto out;
  1133. offset += chunk;
  1134. buffer += chunk;
  1135. }
  1136. out:
  1137. *retlen = offset - start;
  1138. return rc;
  1139. }
  1140. int efx_mcdi_mtd_sync(struct mtd_info *mtd)
  1141. {
  1142. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1143. struct efx_nic *efx = mtd->priv;
  1144. int rc = 0;
  1145. if (part->updating) {
  1146. part->updating = false;
  1147. rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
  1148. }
  1149. return rc;
  1150. }
  1151. void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
  1152. {
  1153. struct efx_mcdi_mtd_partition *mcdi_part =
  1154. container_of(part, struct efx_mcdi_mtd_partition, common);
  1155. struct efx_nic *efx = part->mtd.priv;
  1156. snprintf(part->name, sizeof(part->name), "%s %s:%02x",
  1157. efx->name, part->type_name, mcdi_part->fw_subtype);
  1158. }
  1159. #endif /* CONFIG_SFC_MTD */