mcdi.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2008-2013 Solarflare Communications Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation, incorporated herein by reference.
  8. */
  9. #include <linux/delay.h>
  10. #include <asm/cmpxchg.h>
  11. #include "net_driver.h"
  12. #include "nic.h"
  13. #include "io.h"
  14. #include "farch_regs.h"
  15. #include "mcdi_pcol.h"
  16. #include "phy.h"
  17. /**************************************************************************
  18. *
  19. * Management-Controller-to-Driver Interface
  20. *
  21. **************************************************************************
  22. */
  23. #define MCDI_RPC_TIMEOUT (10 * HZ)
  24. /* A reboot/assertion causes the MCDI status word to be set after the
  25. * command word is set or a REBOOT event is sent. If we notice a reboot
  26. * via these mechanisms then wait 250ms for the status word to be set.
  27. */
  28. #define MCDI_STATUS_DELAY_US 100
  29. #define MCDI_STATUS_DELAY_COUNT 2500
  30. #define MCDI_STATUS_SLEEP_MS \
  31. (MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
  32. #define SEQ_MASK \
  33. EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
  34. struct efx_mcdi_async_param {
  35. struct list_head list;
  36. unsigned int cmd;
  37. size_t inlen;
  38. size_t outlen;
  39. efx_mcdi_async_completer *complete;
  40. unsigned long cookie;
  41. /* followed by request/response buffer */
  42. };
  43. static void efx_mcdi_timeout_async(unsigned long context);
  44. static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  45. bool *was_attached_out);
  46. static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
  47. {
  48. EFX_BUG_ON_PARANOID(!efx->mcdi);
  49. return &efx->mcdi->iface;
  50. }
  51. int efx_mcdi_init(struct efx_nic *efx)
  52. {
  53. struct efx_mcdi_iface *mcdi;
  54. bool already_attached;
  55. int rc;
  56. efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
  57. if (!efx->mcdi)
  58. return -ENOMEM;
  59. mcdi = efx_mcdi(efx);
  60. mcdi->efx = efx;
  61. init_waitqueue_head(&mcdi->wq);
  62. spin_lock_init(&mcdi->iface_lock);
  63. mcdi->state = MCDI_STATE_QUIESCENT;
  64. mcdi->mode = MCDI_MODE_POLL;
  65. spin_lock_init(&mcdi->async_lock);
  66. INIT_LIST_HEAD(&mcdi->async_list);
  67. setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
  68. (unsigned long)mcdi);
  69. (void) efx_mcdi_poll_reboot(efx);
  70. mcdi->new_epoch = true;
  71. /* Recover from a failed assertion before probing */
  72. rc = efx_mcdi_handle_assertion(efx);
  73. if (rc)
  74. return rc;
  75. /* Let the MC (and BMC, if this is a LOM) know that the driver
  76. * is loaded. We should do this before we reset the NIC.
  77. */
  78. rc = efx_mcdi_drv_attach(efx, true, &already_attached);
  79. if (rc) {
  80. netif_err(efx, probe, efx->net_dev,
  81. "Unable to register driver with MCPU\n");
  82. return rc;
  83. }
  84. if (already_attached)
  85. /* Not a fatal error */
  86. netif_err(efx, probe, efx->net_dev,
  87. "Host already registered with MCPU\n");
  88. return 0;
  89. }
  90. void efx_mcdi_fini(struct efx_nic *efx)
  91. {
  92. if (!efx->mcdi)
  93. return;
  94. BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
  95. /* Relinquish the device (back to the BMC, if this is a LOM) */
  96. efx_mcdi_drv_attach(efx, false, NULL);
  97. kfree(efx->mcdi);
  98. }
  99. static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
  100. const efx_dword_t *inbuf, size_t inlen)
  101. {
  102. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  103. efx_dword_t hdr[2];
  104. size_t hdr_len;
  105. u32 xflags, seqno;
  106. BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
  107. /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
  108. spin_lock_bh(&mcdi->iface_lock);
  109. ++mcdi->seqno;
  110. spin_unlock_bh(&mcdi->iface_lock);
  111. seqno = mcdi->seqno & SEQ_MASK;
  112. xflags = 0;
  113. if (mcdi->mode == MCDI_MODE_EVENTS)
  114. xflags |= MCDI_HEADER_XFLAGS_EVREQ;
  115. if (efx->type->mcdi_max_ver == 1) {
  116. /* MCDI v1 */
  117. EFX_POPULATE_DWORD_7(hdr[0],
  118. MCDI_HEADER_RESPONSE, 0,
  119. MCDI_HEADER_RESYNC, 1,
  120. MCDI_HEADER_CODE, cmd,
  121. MCDI_HEADER_DATALEN, inlen,
  122. MCDI_HEADER_SEQ, seqno,
  123. MCDI_HEADER_XFLAGS, xflags,
  124. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  125. hdr_len = 4;
  126. } else {
  127. /* MCDI v2 */
  128. BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
  129. EFX_POPULATE_DWORD_7(hdr[0],
  130. MCDI_HEADER_RESPONSE, 0,
  131. MCDI_HEADER_RESYNC, 1,
  132. MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
  133. MCDI_HEADER_DATALEN, 0,
  134. MCDI_HEADER_SEQ, seqno,
  135. MCDI_HEADER_XFLAGS, xflags,
  136. MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
  137. EFX_POPULATE_DWORD_2(hdr[1],
  138. MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
  139. MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
  140. hdr_len = 8;
  141. }
  142. efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
  143. mcdi->new_epoch = false;
  144. }
  145. static int efx_mcdi_errno(unsigned int mcdi_err)
  146. {
  147. switch (mcdi_err) {
  148. case 0:
  149. return 0;
  150. #define TRANSLATE_ERROR(name) \
  151. case MC_CMD_ERR_ ## name: \
  152. return -name;
  153. TRANSLATE_ERROR(EPERM);
  154. TRANSLATE_ERROR(ENOENT);
  155. TRANSLATE_ERROR(EINTR);
  156. TRANSLATE_ERROR(EAGAIN);
  157. TRANSLATE_ERROR(EACCES);
  158. TRANSLATE_ERROR(EBUSY);
  159. TRANSLATE_ERROR(EINVAL);
  160. TRANSLATE_ERROR(EDEADLK);
  161. TRANSLATE_ERROR(ENOSYS);
  162. TRANSLATE_ERROR(ETIME);
  163. TRANSLATE_ERROR(EALREADY);
  164. TRANSLATE_ERROR(ENOSPC);
  165. #undef TRANSLATE_ERROR
  166. case MC_CMD_ERR_ALLOC_FAIL:
  167. return -ENOBUFS;
  168. case MC_CMD_ERR_MAC_EXIST:
  169. return -EADDRINUSE;
  170. default:
  171. return -EPROTO;
  172. }
  173. }
  174. static void efx_mcdi_read_response_header(struct efx_nic *efx)
  175. {
  176. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  177. unsigned int respseq, respcmd, error;
  178. efx_dword_t hdr;
  179. efx->type->mcdi_read_response(efx, &hdr, 0, 4);
  180. respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
  181. respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
  182. error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
  183. if (respcmd != MC_CMD_V2_EXTN) {
  184. mcdi->resp_hdr_len = 4;
  185. mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
  186. } else {
  187. efx->type->mcdi_read_response(efx, &hdr, 4, 4);
  188. mcdi->resp_hdr_len = 8;
  189. mcdi->resp_data_len =
  190. EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
  191. }
  192. if (error && mcdi->resp_data_len == 0) {
  193. netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
  194. mcdi->resprc = -EIO;
  195. } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
  196. netif_err(efx, hw, efx->net_dev,
  197. "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
  198. respseq, mcdi->seqno);
  199. mcdi->resprc = -EIO;
  200. } else if (error) {
  201. efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
  202. mcdi->resprc =
  203. efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
  204. } else {
  205. mcdi->resprc = 0;
  206. }
  207. }
  208. static int efx_mcdi_poll(struct efx_nic *efx)
  209. {
  210. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  211. unsigned long time, finish;
  212. unsigned int spins;
  213. int rc;
  214. /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
  215. rc = efx_mcdi_poll_reboot(efx);
  216. if (rc) {
  217. spin_lock_bh(&mcdi->iface_lock);
  218. mcdi->resprc = rc;
  219. mcdi->resp_hdr_len = 0;
  220. mcdi->resp_data_len = 0;
  221. spin_unlock_bh(&mcdi->iface_lock);
  222. return 0;
  223. }
  224. /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
  225. * because generally mcdi responses are fast. After that, back off
  226. * and poll once a jiffy (approximately)
  227. */
  228. spins = TICK_USEC;
  229. finish = jiffies + MCDI_RPC_TIMEOUT;
  230. while (1) {
  231. if (spins != 0) {
  232. --spins;
  233. udelay(1);
  234. } else {
  235. schedule_timeout_uninterruptible(1);
  236. }
  237. time = jiffies;
  238. rmb();
  239. if (efx->type->mcdi_poll_response(efx))
  240. break;
  241. if (time_after(time, finish))
  242. return -ETIMEDOUT;
  243. }
  244. spin_lock_bh(&mcdi->iface_lock);
  245. efx_mcdi_read_response_header(efx);
  246. spin_unlock_bh(&mcdi->iface_lock);
  247. /* Return rc=0 like wait_event_timeout() */
  248. return 0;
  249. }
  250. /* Test and clear MC-rebooted flag for this port/function; reset
  251. * software state as necessary.
  252. */
  253. int efx_mcdi_poll_reboot(struct efx_nic *efx)
  254. {
  255. if (!efx->mcdi)
  256. return 0;
  257. return efx->type->mcdi_poll_reboot(efx);
  258. }
  259. static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
  260. {
  261. return cmpxchg(&mcdi->state,
  262. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
  263. MCDI_STATE_QUIESCENT;
  264. }
  265. static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
  266. {
  267. /* Wait until the interface becomes QUIESCENT and we win the race
  268. * to mark it RUNNING_SYNC.
  269. */
  270. wait_event(mcdi->wq,
  271. cmpxchg(&mcdi->state,
  272. MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
  273. MCDI_STATE_QUIESCENT);
  274. }
  275. static int efx_mcdi_await_completion(struct efx_nic *efx)
  276. {
  277. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  278. if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
  279. MCDI_RPC_TIMEOUT) == 0)
  280. return -ETIMEDOUT;
  281. /* Check if efx_mcdi_set_mode() switched us back to polled completions.
  282. * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
  283. * completed the request first, then we'll just end up completing the
  284. * request again, which is safe.
  285. *
  286. * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
  287. * wait_event_timeout() implicitly provides.
  288. */
  289. if (mcdi->mode == MCDI_MODE_POLL)
  290. return efx_mcdi_poll(efx);
  291. return 0;
  292. }
  293. /* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
  294. * requester. Return whether this was done. Does not take any locks.
  295. */
  296. static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
  297. {
  298. if (cmpxchg(&mcdi->state,
  299. MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
  300. MCDI_STATE_RUNNING_SYNC) {
  301. wake_up(&mcdi->wq);
  302. return true;
  303. }
  304. return false;
  305. }
  306. static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
  307. {
  308. if (mcdi->mode == MCDI_MODE_EVENTS) {
  309. struct efx_mcdi_async_param *async;
  310. struct efx_nic *efx = mcdi->efx;
  311. /* Process the asynchronous request queue */
  312. spin_lock_bh(&mcdi->async_lock);
  313. async = list_first_entry_or_null(
  314. &mcdi->async_list, struct efx_mcdi_async_param, list);
  315. if (async) {
  316. mcdi->state = MCDI_STATE_RUNNING_ASYNC;
  317. efx_mcdi_send_request(efx, async->cmd,
  318. (const efx_dword_t *)(async + 1),
  319. async->inlen);
  320. mod_timer(&mcdi->async_timer,
  321. jiffies + MCDI_RPC_TIMEOUT);
  322. }
  323. spin_unlock_bh(&mcdi->async_lock);
  324. if (async)
  325. return;
  326. }
  327. mcdi->state = MCDI_STATE_QUIESCENT;
  328. wake_up(&mcdi->wq);
  329. }
  330. /* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
  331. * asynchronous completion function, and release the interface.
  332. * Return whether this was done. Must be called in bh-disabled
  333. * context. Will take iface_lock and async_lock.
  334. */
  335. static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
  336. {
  337. struct efx_nic *efx = mcdi->efx;
  338. struct efx_mcdi_async_param *async;
  339. size_t hdr_len, data_len;
  340. efx_dword_t *outbuf;
  341. int rc;
  342. if (cmpxchg(&mcdi->state,
  343. MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
  344. MCDI_STATE_RUNNING_ASYNC)
  345. return false;
  346. spin_lock(&mcdi->iface_lock);
  347. if (timeout) {
  348. /* Ensure that if the completion event arrives later,
  349. * the seqno check in efx_mcdi_ev_cpl() will fail
  350. */
  351. ++mcdi->seqno;
  352. ++mcdi->credits;
  353. rc = -ETIMEDOUT;
  354. hdr_len = 0;
  355. data_len = 0;
  356. } else {
  357. rc = mcdi->resprc;
  358. hdr_len = mcdi->resp_hdr_len;
  359. data_len = mcdi->resp_data_len;
  360. }
  361. spin_unlock(&mcdi->iface_lock);
  362. /* Stop the timer. In case the timer function is running, we
  363. * must wait for it to return so that there is no possibility
  364. * of it aborting the next request.
  365. */
  366. if (!timeout)
  367. del_timer_sync(&mcdi->async_timer);
  368. spin_lock(&mcdi->async_lock);
  369. async = list_first_entry(&mcdi->async_list,
  370. struct efx_mcdi_async_param, list);
  371. list_del(&async->list);
  372. spin_unlock(&mcdi->async_lock);
  373. outbuf = (efx_dword_t *)(async + 1);
  374. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  375. min(async->outlen, data_len));
  376. async->complete(efx, async->cookie, rc, outbuf, data_len);
  377. kfree(async);
  378. efx_mcdi_release(mcdi);
  379. return true;
  380. }
  381. static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
  382. unsigned int datalen, unsigned int mcdi_err)
  383. {
  384. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  385. bool wake = false;
  386. spin_lock(&mcdi->iface_lock);
  387. if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
  388. if (mcdi->credits)
  389. /* The request has been cancelled */
  390. --mcdi->credits;
  391. else
  392. netif_err(efx, hw, efx->net_dev,
  393. "MC response mismatch tx seq 0x%x rx "
  394. "seq 0x%x\n", seqno, mcdi->seqno);
  395. } else {
  396. if (efx->type->mcdi_max_ver >= 2) {
  397. /* MCDI v2 responses don't fit in an event */
  398. efx_mcdi_read_response_header(efx);
  399. } else {
  400. mcdi->resprc = efx_mcdi_errno(mcdi_err);
  401. mcdi->resp_hdr_len = 4;
  402. mcdi->resp_data_len = datalen;
  403. }
  404. wake = true;
  405. }
  406. spin_unlock(&mcdi->iface_lock);
  407. if (wake) {
  408. if (!efx_mcdi_complete_async(mcdi, false))
  409. (void) efx_mcdi_complete_sync(mcdi);
  410. /* If the interface isn't RUNNING_ASYNC or
  411. * RUNNING_SYNC then we've received a duplicate
  412. * completion after we've already transitioned back to
  413. * QUIESCENT. [A subsequent invocation would increment
  414. * seqno, so would have failed the seqno check].
  415. */
  416. }
  417. }
  418. static void efx_mcdi_timeout_async(unsigned long context)
  419. {
  420. struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
  421. efx_mcdi_complete_async(mcdi, true);
  422. }
  423. static int
  424. efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
  425. {
  426. if (efx->type->mcdi_max_ver < 0 ||
  427. (efx->type->mcdi_max_ver < 2 &&
  428. cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
  429. return -EINVAL;
  430. if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
  431. (efx->type->mcdi_max_ver < 2 &&
  432. inlen > MCDI_CTL_SDU_LEN_MAX_V1))
  433. return -EMSGSIZE;
  434. return 0;
  435. }
  436. int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
  437. const efx_dword_t *inbuf, size_t inlen,
  438. efx_dword_t *outbuf, size_t outlen,
  439. size_t *outlen_actual)
  440. {
  441. int rc;
  442. rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
  443. if (rc)
  444. return rc;
  445. return efx_mcdi_rpc_finish(efx, cmd, inlen,
  446. outbuf, outlen, outlen_actual);
  447. }
  448. int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
  449. const efx_dword_t *inbuf, size_t inlen)
  450. {
  451. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  452. int rc;
  453. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  454. if (rc)
  455. return rc;
  456. efx_mcdi_acquire_sync(mcdi);
  457. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  458. return 0;
  459. }
  460. /**
  461. * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
  462. * @efx: NIC through which to issue the command
  463. * @cmd: Command type number
  464. * @inbuf: Command parameters
  465. * @inlen: Length of command parameters, in bytes
  466. * @outlen: Length to allocate for response buffer, in bytes
  467. * @complete: Function to be called on completion or cancellation.
  468. * @cookie: Arbitrary value to be passed to @complete.
  469. *
  470. * This function does not sleep and therefore may be called in atomic
  471. * context. It will fail if event queues are disabled or if MCDI
  472. * event completions have been disabled due to an error.
  473. *
  474. * If it succeeds, the @complete function will be called exactly once
  475. * in atomic context, when one of the following occurs:
  476. * (a) the completion event is received (in NAPI context)
  477. * (b) event queues are disabled (in the process that disables them)
  478. * (c) the request times-out (in timer context)
  479. */
  480. int
  481. efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
  482. const efx_dword_t *inbuf, size_t inlen, size_t outlen,
  483. efx_mcdi_async_completer *complete, unsigned long cookie)
  484. {
  485. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  486. struct efx_mcdi_async_param *async;
  487. int rc;
  488. rc = efx_mcdi_check_supported(efx, cmd, inlen);
  489. if (rc)
  490. return rc;
  491. async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
  492. GFP_ATOMIC);
  493. if (!async)
  494. return -ENOMEM;
  495. async->cmd = cmd;
  496. async->inlen = inlen;
  497. async->outlen = outlen;
  498. async->complete = complete;
  499. async->cookie = cookie;
  500. memcpy(async + 1, inbuf, inlen);
  501. spin_lock_bh(&mcdi->async_lock);
  502. if (mcdi->mode == MCDI_MODE_EVENTS) {
  503. list_add_tail(&async->list, &mcdi->async_list);
  504. /* If this is at the front of the queue, try to start it
  505. * immediately
  506. */
  507. if (mcdi->async_list.next == &async->list &&
  508. efx_mcdi_acquire_async(mcdi)) {
  509. efx_mcdi_send_request(efx, cmd, inbuf, inlen);
  510. mod_timer(&mcdi->async_timer,
  511. jiffies + MCDI_RPC_TIMEOUT);
  512. }
  513. } else {
  514. kfree(async);
  515. rc = -ENETDOWN;
  516. }
  517. spin_unlock_bh(&mcdi->async_lock);
  518. return rc;
  519. }
  520. int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
  521. efx_dword_t *outbuf, size_t outlen,
  522. size_t *outlen_actual)
  523. {
  524. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  525. int rc;
  526. if (mcdi->mode == MCDI_MODE_POLL)
  527. rc = efx_mcdi_poll(efx);
  528. else
  529. rc = efx_mcdi_await_completion(efx);
  530. if (rc != 0) {
  531. /* Close the race with efx_mcdi_ev_cpl() executing just too late
  532. * and completing a request we've just cancelled, by ensuring
  533. * that the seqno check therein fails.
  534. */
  535. spin_lock_bh(&mcdi->iface_lock);
  536. ++mcdi->seqno;
  537. ++mcdi->credits;
  538. spin_unlock_bh(&mcdi->iface_lock);
  539. netif_err(efx, hw, efx->net_dev,
  540. "MC command 0x%x inlen %d mode %d timed out\n",
  541. cmd, (int)inlen, mcdi->mode);
  542. } else {
  543. size_t hdr_len, data_len;
  544. /* At the very least we need a memory barrier here to ensure
  545. * we pick up changes from efx_mcdi_ev_cpl(). Protect against
  546. * a spurious efx_mcdi_ev_cpl() running concurrently by
  547. * acquiring the iface_lock. */
  548. spin_lock_bh(&mcdi->iface_lock);
  549. rc = mcdi->resprc;
  550. hdr_len = mcdi->resp_hdr_len;
  551. data_len = mcdi->resp_data_len;
  552. spin_unlock_bh(&mcdi->iface_lock);
  553. BUG_ON(rc > 0);
  554. if (rc == 0) {
  555. efx->type->mcdi_read_response(efx, outbuf, hdr_len,
  556. min(outlen, data_len));
  557. if (outlen_actual != NULL)
  558. *outlen_actual = data_len;
  559. } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
  560. ; /* Don't reset if MC_CMD_REBOOT returns EIO */
  561. else if (rc == -EIO || rc == -EINTR) {
  562. netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
  563. -rc);
  564. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  565. } else
  566. netif_dbg(efx, hw, efx->net_dev,
  567. "MC command 0x%x inlen %d failed rc=%d\n",
  568. cmd, (int)inlen, -rc);
  569. if (rc == -EIO || rc == -EINTR) {
  570. msleep(MCDI_STATUS_SLEEP_MS);
  571. efx_mcdi_poll_reboot(efx);
  572. mcdi->new_epoch = true;
  573. }
  574. }
  575. efx_mcdi_release(mcdi);
  576. return rc;
  577. }
  578. /* Switch to polled MCDI completions. This can be called in various
  579. * error conditions with various locks held, so it must be lockless.
  580. * Caller is responsible for flushing asynchronous requests later.
  581. */
  582. void efx_mcdi_mode_poll(struct efx_nic *efx)
  583. {
  584. struct efx_mcdi_iface *mcdi;
  585. if (!efx->mcdi)
  586. return;
  587. mcdi = efx_mcdi(efx);
  588. if (mcdi->mode == MCDI_MODE_POLL)
  589. return;
  590. /* We can switch from event completion to polled completion, because
  591. * mcdi requests are always completed in shared memory. We do this by
  592. * switching the mode to POLL'd then completing the request.
  593. * efx_mcdi_await_completion() will then call efx_mcdi_poll().
  594. *
  595. * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
  596. * which efx_mcdi_complete_sync() provides for us.
  597. */
  598. mcdi->mode = MCDI_MODE_POLL;
  599. efx_mcdi_complete_sync(mcdi);
  600. }
  601. /* Flush any running or queued asynchronous requests, after event processing
  602. * is stopped
  603. */
  604. void efx_mcdi_flush_async(struct efx_nic *efx)
  605. {
  606. struct efx_mcdi_async_param *async, *next;
  607. struct efx_mcdi_iface *mcdi;
  608. if (!efx->mcdi)
  609. return;
  610. mcdi = efx_mcdi(efx);
  611. /* We must be in polling mode so no more requests can be queued */
  612. BUG_ON(mcdi->mode != MCDI_MODE_POLL);
  613. del_timer_sync(&mcdi->async_timer);
  614. /* If a request is still running, make sure we give the MC
  615. * time to complete it so that the response won't overwrite our
  616. * next request.
  617. */
  618. if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
  619. efx_mcdi_poll(efx);
  620. mcdi->state = MCDI_STATE_QUIESCENT;
  621. }
  622. /* Nothing else will access the async list now, so it is safe
  623. * to walk it without holding async_lock. If we hold it while
  624. * calling a completer then lockdep may warn that we have
  625. * acquired locks in the wrong order.
  626. */
  627. list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
  628. async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
  629. list_del(&async->list);
  630. kfree(async);
  631. }
  632. }
  633. void efx_mcdi_mode_event(struct efx_nic *efx)
  634. {
  635. struct efx_mcdi_iface *mcdi;
  636. if (!efx->mcdi)
  637. return;
  638. mcdi = efx_mcdi(efx);
  639. if (mcdi->mode == MCDI_MODE_EVENTS)
  640. return;
  641. /* We can't switch from polled to event completion in the middle of a
  642. * request, because the completion method is specified in the request.
  643. * So acquire the interface to serialise the requestors. We don't need
  644. * to acquire the iface_lock to change the mode here, but we do need a
  645. * write memory barrier ensure that efx_mcdi_rpc() sees it, which
  646. * efx_mcdi_acquire() provides.
  647. */
  648. efx_mcdi_acquire_sync(mcdi);
  649. mcdi->mode = MCDI_MODE_EVENTS;
  650. efx_mcdi_release(mcdi);
  651. }
  652. static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
  653. {
  654. struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
  655. /* If there is an outstanding MCDI request, it has been terminated
  656. * either by a BADASSERT or REBOOT event. If the mcdi interface is
  657. * in polled mode, then do nothing because the MC reboot handler will
  658. * set the header correctly. However, if the mcdi interface is waiting
  659. * for a CMDDONE event it won't receive it [and since all MCDI events
  660. * are sent to the same queue, we can't be racing with
  661. * efx_mcdi_ev_cpl()]
  662. *
  663. * If there is an outstanding asynchronous request, we can't
  664. * complete it now (efx_mcdi_complete() would deadlock). The
  665. * reset process will take care of this.
  666. *
  667. * There's a race here with efx_mcdi_send_request(), because
  668. * we might receive a REBOOT event *before* the request has
  669. * been copied out. In polled mode (during startup) this is
  670. * irrelevant, because efx_mcdi_complete_sync() is ignored. In
  671. * event mode, this condition is just an edge-case of
  672. * receiving a REBOOT event after posting the MCDI
  673. * request. Did the mc reboot before or after the copyout? The
  674. * best we can do always is just return failure.
  675. */
  676. spin_lock(&mcdi->iface_lock);
  677. if (efx_mcdi_complete_sync(mcdi)) {
  678. if (mcdi->mode == MCDI_MODE_EVENTS) {
  679. mcdi->resprc = rc;
  680. mcdi->resp_hdr_len = 0;
  681. mcdi->resp_data_len = 0;
  682. ++mcdi->credits;
  683. }
  684. } else {
  685. int count;
  686. /* Consume the status word since efx_mcdi_rpc_finish() won't */
  687. for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
  688. if (efx_mcdi_poll_reboot(efx))
  689. break;
  690. udelay(MCDI_STATUS_DELAY_US);
  691. }
  692. mcdi->new_epoch = true;
  693. /* Nobody was waiting for an MCDI request, so trigger a reset */
  694. efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
  695. }
  696. spin_unlock(&mcdi->iface_lock);
  697. }
  698. /* Called from falcon_process_eventq for MCDI events */
  699. void efx_mcdi_process_event(struct efx_channel *channel,
  700. efx_qword_t *event)
  701. {
  702. struct efx_nic *efx = channel->efx;
  703. int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
  704. u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
  705. switch (code) {
  706. case MCDI_EVENT_CODE_BADSSERT:
  707. netif_err(efx, hw, efx->net_dev,
  708. "MC watchdog or assertion failure at 0x%x\n", data);
  709. efx_mcdi_ev_death(efx, -EINTR);
  710. break;
  711. case MCDI_EVENT_CODE_PMNOTICE:
  712. netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
  713. break;
  714. case MCDI_EVENT_CODE_CMDDONE:
  715. efx_mcdi_ev_cpl(efx,
  716. MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
  717. MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
  718. MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
  719. break;
  720. case MCDI_EVENT_CODE_LINKCHANGE:
  721. efx_mcdi_process_link_change(efx, event);
  722. break;
  723. case MCDI_EVENT_CODE_SENSOREVT:
  724. efx_mcdi_sensor_event(efx, event);
  725. break;
  726. case MCDI_EVENT_CODE_SCHEDERR:
  727. netif_info(efx, hw, efx->net_dev,
  728. "MC Scheduler error address=0x%x\n", data);
  729. break;
  730. case MCDI_EVENT_CODE_REBOOT:
  731. case MCDI_EVENT_CODE_MC_REBOOT:
  732. netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
  733. efx_mcdi_ev_death(efx, -EIO);
  734. break;
  735. case MCDI_EVENT_CODE_MAC_STATS_DMA:
  736. /* MAC stats are gather lazily. We can ignore this. */
  737. break;
  738. case MCDI_EVENT_CODE_FLR:
  739. efx_sriov_flr(efx, MCDI_EVENT_FIELD(*event, FLR_VF));
  740. break;
  741. case MCDI_EVENT_CODE_PTP_RX:
  742. case MCDI_EVENT_CODE_PTP_FAULT:
  743. case MCDI_EVENT_CODE_PTP_PPS:
  744. efx_ptp_event(efx, event);
  745. break;
  746. case MCDI_EVENT_CODE_TX_FLUSH:
  747. case MCDI_EVENT_CODE_RX_FLUSH:
  748. /* Two flush events will be sent: one to the same event
  749. * queue as completions, and one to event queue 0.
  750. * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
  751. * flag will be set, and we should ignore the event
  752. * because we want to wait for all completions.
  753. */
  754. BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
  755. MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
  756. if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
  757. efx_ef10_handle_drain_event(efx);
  758. break;
  759. case MCDI_EVENT_CODE_TX_ERR:
  760. case MCDI_EVENT_CODE_RX_ERR:
  761. netif_err(efx, hw, efx->net_dev,
  762. "%s DMA error (event: "EFX_QWORD_FMT")\n",
  763. code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
  764. EFX_QWORD_VAL(*event));
  765. efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
  766. break;
  767. default:
  768. netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
  769. code);
  770. }
  771. }
  772. /**************************************************************************
  773. *
  774. * Specific request functions
  775. *
  776. **************************************************************************
  777. */
  778. void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
  779. {
  780. MCDI_DECLARE_BUF(outbuf,
  781. max(MC_CMD_GET_VERSION_OUT_LEN,
  782. MC_CMD_GET_CAPABILITIES_OUT_LEN));
  783. size_t outlength;
  784. const __le16 *ver_words;
  785. size_t offset;
  786. int rc;
  787. BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
  788. rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
  789. outbuf, sizeof(outbuf), &outlength);
  790. if (rc)
  791. goto fail;
  792. if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
  793. rc = -EIO;
  794. goto fail;
  795. }
  796. ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
  797. offset = snprintf(buf, len, "%u.%u.%u.%u",
  798. le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
  799. le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
  800. /* EF10 may have multiple datapath firmware variants within a
  801. * single version. Report which variants are running.
  802. */
  803. if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
  804. BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
  805. rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
  806. outbuf, sizeof(outbuf), &outlength);
  807. if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
  808. offset += snprintf(
  809. buf + offset, len - offset, " rx? tx?");
  810. else
  811. offset += snprintf(
  812. buf + offset, len - offset, " rx%x tx%x",
  813. MCDI_WORD(outbuf,
  814. GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
  815. MCDI_WORD(outbuf,
  816. GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
  817. /* It's theoretically possible for the string to exceed 31
  818. * characters, though in practice the first three version
  819. * components are short enough that this doesn't happen.
  820. */
  821. if (WARN_ON(offset >= len))
  822. buf[0] = 0;
  823. }
  824. return;
  825. fail:
  826. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  827. buf[0] = 0;
  828. }
  829. static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
  830. bool *was_attached)
  831. {
  832. MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
  833. MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
  834. size_t outlen;
  835. int rc;
  836. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
  837. driver_operating ? 1 : 0);
  838. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
  839. MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
  840. rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
  841. outbuf, sizeof(outbuf), &outlen);
  842. if (rc)
  843. goto fail;
  844. if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
  845. rc = -EIO;
  846. goto fail;
  847. }
  848. /* We currently assume we have control of the external link
  849. * and are completely trusted by firmware. Abort probing
  850. * if that's not true for this function.
  851. */
  852. if (driver_operating &&
  853. outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN &&
  854. (MCDI_DWORD(outbuf, DRV_ATTACH_EXT_OUT_FUNC_FLAGS) &
  855. (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
  856. 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) !=
  857. (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
  858. 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED)) {
  859. netif_err(efx, probe, efx->net_dev,
  860. "This driver version only supports one function per port\n");
  861. return -ENODEV;
  862. }
  863. if (was_attached != NULL)
  864. *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
  865. return 0;
  866. fail:
  867. netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  868. return rc;
  869. }
  870. int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
  871. u16 *fw_subtype_list, u32 *capabilities)
  872. {
  873. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
  874. size_t outlen, i;
  875. int port_num = efx_port_num(efx);
  876. int rc;
  877. BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
  878. rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
  879. outbuf, sizeof(outbuf), &outlen);
  880. if (rc)
  881. goto fail;
  882. if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
  883. rc = -EIO;
  884. goto fail;
  885. }
  886. if (mac_address)
  887. memcpy(mac_address,
  888. port_num ?
  889. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
  890. MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
  891. ETH_ALEN);
  892. if (fw_subtype_list) {
  893. for (i = 0;
  894. i < MCDI_VAR_ARRAY_LEN(outlen,
  895. GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
  896. i++)
  897. fw_subtype_list[i] = MCDI_ARRAY_WORD(
  898. outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
  899. for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
  900. fw_subtype_list[i] = 0;
  901. }
  902. if (capabilities) {
  903. if (port_num)
  904. *capabilities = MCDI_DWORD(outbuf,
  905. GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
  906. else
  907. *capabilities = MCDI_DWORD(outbuf,
  908. GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
  909. }
  910. return 0;
  911. fail:
  912. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
  913. __func__, rc, (int)outlen);
  914. return rc;
  915. }
  916. int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
  917. {
  918. MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
  919. u32 dest = 0;
  920. int rc;
  921. if (uart)
  922. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
  923. if (evq)
  924. dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
  925. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
  926. MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
  927. BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
  928. rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
  929. NULL, 0, NULL);
  930. if (rc)
  931. goto fail;
  932. return 0;
  933. fail:
  934. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  935. return rc;
  936. }
  937. int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
  938. {
  939. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
  940. size_t outlen;
  941. int rc;
  942. BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
  943. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
  944. outbuf, sizeof(outbuf), &outlen);
  945. if (rc)
  946. goto fail;
  947. if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
  948. rc = -EIO;
  949. goto fail;
  950. }
  951. *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
  952. return 0;
  953. fail:
  954. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  955. __func__, rc);
  956. return rc;
  957. }
  958. int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
  959. size_t *size_out, size_t *erase_size_out,
  960. bool *protected_out)
  961. {
  962. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
  963. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
  964. size_t outlen;
  965. int rc;
  966. MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
  967. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
  968. outbuf, sizeof(outbuf), &outlen);
  969. if (rc)
  970. goto fail;
  971. if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
  972. rc = -EIO;
  973. goto fail;
  974. }
  975. *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
  976. *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
  977. *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
  978. (1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
  979. return 0;
  980. fail:
  981. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  982. return rc;
  983. }
  984. static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
  985. {
  986. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
  987. MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
  988. int rc;
  989. MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
  990. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
  991. outbuf, sizeof(outbuf), NULL);
  992. if (rc)
  993. return rc;
  994. switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
  995. case MC_CMD_NVRAM_TEST_PASS:
  996. case MC_CMD_NVRAM_TEST_NOTSUPP:
  997. return 0;
  998. default:
  999. return -EIO;
  1000. }
  1001. }
  1002. int efx_mcdi_nvram_test_all(struct efx_nic *efx)
  1003. {
  1004. u32 nvram_types;
  1005. unsigned int type;
  1006. int rc;
  1007. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  1008. if (rc)
  1009. goto fail1;
  1010. type = 0;
  1011. while (nvram_types != 0) {
  1012. if (nvram_types & 1) {
  1013. rc = efx_mcdi_nvram_test(efx, type);
  1014. if (rc)
  1015. goto fail2;
  1016. }
  1017. type++;
  1018. nvram_types >>= 1;
  1019. }
  1020. return 0;
  1021. fail2:
  1022. netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
  1023. __func__, type);
  1024. fail1:
  1025. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1026. return rc;
  1027. }
  1028. static int efx_mcdi_read_assertion(struct efx_nic *efx)
  1029. {
  1030. MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
  1031. MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
  1032. unsigned int flags, index;
  1033. const char *reason;
  1034. size_t outlen;
  1035. int retry;
  1036. int rc;
  1037. /* Attempt to read any stored assertion state before we reboot
  1038. * the mcfw out of the assertion handler. Retry twice, once
  1039. * because a boot-time assertion might cause this command to fail
  1040. * with EINTR. And once again because GET_ASSERTS can race with
  1041. * MC_CMD_REBOOT running on the other port. */
  1042. retry = 2;
  1043. do {
  1044. MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
  1045. rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
  1046. inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
  1047. outbuf, sizeof(outbuf), &outlen);
  1048. } while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
  1049. if (rc)
  1050. return rc;
  1051. if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
  1052. return -EIO;
  1053. /* Print out any recorded assertion state */
  1054. flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
  1055. if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
  1056. return 0;
  1057. reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
  1058. ? "system-level assertion"
  1059. : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
  1060. ? "thread-level assertion"
  1061. : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
  1062. ? "watchdog reset"
  1063. : "unknown assertion";
  1064. netif_err(efx, hw, efx->net_dev,
  1065. "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
  1066. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
  1067. MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
  1068. /* Print out the registers */
  1069. for (index = 0;
  1070. index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
  1071. index++)
  1072. netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
  1073. 1 + index,
  1074. MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
  1075. index));
  1076. return 0;
  1077. }
  1078. static void efx_mcdi_exit_assertion(struct efx_nic *efx)
  1079. {
  1080. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  1081. /* If the MC is running debug firmware, it might now be
  1082. * waiting for a debugger to attach, but we just want it to
  1083. * reboot. We set a flag that makes the command a no-op if it
  1084. * has already done so. We don't know what return code to
  1085. * expect (0 or -EIO), so ignore it.
  1086. */
  1087. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  1088. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
  1089. MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
  1090. (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
  1091. NULL, 0, NULL);
  1092. }
  1093. int efx_mcdi_handle_assertion(struct efx_nic *efx)
  1094. {
  1095. int rc;
  1096. rc = efx_mcdi_read_assertion(efx);
  1097. if (rc)
  1098. return rc;
  1099. efx_mcdi_exit_assertion(efx);
  1100. return 0;
  1101. }
  1102. void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
  1103. {
  1104. MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
  1105. int rc;
  1106. BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
  1107. BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
  1108. BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
  1109. BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
  1110. MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
  1111. rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
  1112. NULL, 0, NULL);
  1113. if (rc)
  1114. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  1115. __func__, rc);
  1116. }
  1117. static int efx_mcdi_reset_port(struct efx_nic *efx)
  1118. {
  1119. int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
  1120. if (rc)
  1121. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
  1122. __func__, rc);
  1123. return rc;
  1124. }
  1125. static int efx_mcdi_reset_mc(struct efx_nic *efx)
  1126. {
  1127. MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
  1128. int rc;
  1129. BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
  1130. MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
  1131. rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
  1132. NULL, 0, NULL);
  1133. /* White is black, and up is down */
  1134. if (rc == -EIO)
  1135. return 0;
  1136. if (rc == 0)
  1137. rc = -EIO;
  1138. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1139. return rc;
  1140. }
  1141. enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
  1142. {
  1143. return RESET_TYPE_RECOVER_OR_ALL;
  1144. }
  1145. int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
  1146. {
  1147. int rc;
  1148. /* Recover from a failed assertion pre-reset */
  1149. rc = efx_mcdi_handle_assertion(efx);
  1150. if (rc)
  1151. return rc;
  1152. if (method == RESET_TYPE_WORLD)
  1153. return efx_mcdi_reset_mc(efx);
  1154. else
  1155. return efx_mcdi_reset_port(efx);
  1156. }
  1157. static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
  1158. const u8 *mac, int *id_out)
  1159. {
  1160. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
  1161. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
  1162. size_t outlen;
  1163. int rc;
  1164. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
  1165. MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
  1166. MC_CMD_FILTER_MODE_SIMPLE);
  1167. memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN);
  1168. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
  1169. outbuf, sizeof(outbuf), &outlen);
  1170. if (rc)
  1171. goto fail;
  1172. if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
  1173. rc = -EIO;
  1174. goto fail;
  1175. }
  1176. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
  1177. return 0;
  1178. fail:
  1179. *id_out = -1;
  1180. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1181. return rc;
  1182. }
  1183. int
  1184. efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
  1185. {
  1186. return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
  1187. }
  1188. int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
  1189. {
  1190. MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
  1191. size_t outlen;
  1192. int rc;
  1193. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
  1194. outbuf, sizeof(outbuf), &outlen);
  1195. if (rc)
  1196. goto fail;
  1197. if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
  1198. rc = -EIO;
  1199. goto fail;
  1200. }
  1201. *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
  1202. return 0;
  1203. fail:
  1204. *id_out = -1;
  1205. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1206. return rc;
  1207. }
  1208. int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
  1209. {
  1210. MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
  1211. int rc;
  1212. MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
  1213. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
  1214. NULL, 0, NULL);
  1215. if (rc)
  1216. goto fail;
  1217. return 0;
  1218. fail:
  1219. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1220. return rc;
  1221. }
  1222. int efx_mcdi_flush_rxqs(struct efx_nic *efx)
  1223. {
  1224. struct efx_channel *channel;
  1225. struct efx_rx_queue *rx_queue;
  1226. MCDI_DECLARE_BUF(inbuf,
  1227. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
  1228. int rc, count;
  1229. BUILD_BUG_ON(EFX_MAX_CHANNELS >
  1230. MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
  1231. count = 0;
  1232. efx_for_each_channel(channel, efx) {
  1233. efx_for_each_channel_rx_queue(rx_queue, channel) {
  1234. if (rx_queue->flush_pending) {
  1235. rx_queue->flush_pending = false;
  1236. atomic_dec(&efx->rxq_flush_pending);
  1237. MCDI_SET_ARRAY_DWORD(
  1238. inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
  1239. count, efx_rx_queue_index(rx_queue));
  1240. count++;
  1241. }
  1242. }
  1243. }
  1244. rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
  1245. MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
  1246. WARN_ON(rc < 0);
  1247. return rc;
  1248. }
  1249. int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
  1250. {
  1251. int rc;
  1252. rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
  1253. if (rc)
  1254. goto fail;
  1255. return 0;
  1256. fail:
  1257. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1258. return rc;
  1259. }
  1260. int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
  1261. {
  1262. MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
  1263. BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
  1264. MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
  1265. MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
  1266. return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
  1267. NULL, 0, NULL);
  1268. }
  1269. #ifdef CONFIG_SFC_MTD
  1270. #define EFX_MCDI_NVRAM_LEN_MAX 128
  1271. static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
  1272. {
  1273. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
  1274. int rc;
  1275. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
  1276. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
  1277. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
  1278. NULL, 0, NULL);
  1279. if (rc)
  1280. goto fail;
  1281. return 0;
  1282. fail:
  1283. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1284. return rc;
  1285. }
  1286. static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
  1287. loff_t offset, u8 *buffer, size_t length)
  1288. {
  1289. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
  1290. MCDI_DECLARE_BUF(outbuf,
  1291. MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1292. size_t outlen;
  1293. int rc;
  1294. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
  1295. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
  1296. MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
  1297. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
  1298. outbuf, sizeof(outbuf), &outlen);
  1299. if (rc)
  1300. goto fail;
  1301. memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
  1302. return 0;
  1303. fail:
  1304. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1305. return rc;
  1306. }
  1307. static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
  1308. loff_t offset, const u8 *buffer, size_t length)
  1309. {
  1310. MCDI_DECLARE_BUF(inbuf,
  1311. MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
  1312. int rc;
  1313. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
  1314. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
  1315. MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
  1316. memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
  1317. BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
  1318. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
  1319. ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
  1320. NULL, 0, NULL);
  1321. if (rc)
  1322. goto fail;
  1323. return 0;
  1324. fail:
  1325. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1326. return rc;
  1327. }
  1328. static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
  1329. loff_t offset, size_t length)
  1330. {
  1331. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
  1332. int rc;
  1333. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
  1334. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
  1335. MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
  1336. BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
  1337. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
  1338. NULL, 0, NULL);
  1339. if (rc)
  1340. goto fail;
  1341. return 0;
  1342. fail:
  1343. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1344. return rc;
  1345. }
  1346. static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
  1347. {
  1348. MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
  1349. int rc;
  1350. MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
  1351. BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
  1352. rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
  1353. NULL, 0, NULL);
  1354. if (rc)
  1355. goto fail;
  1356. return 0;
  1357. fail:
  1358. netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
  1359. return rc;
  1360. }
  1361. int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
  1362. size_t len, size_t *retlen, u8 *buffer)
  1363. {
  1364. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1365. struct efx_nic *efx = mtd->priv;
  1366. loff_t offset = start;
  1367. loff_t end = min_t(loff_t, start + len, mtd->size);
  1368. size_t chunk;
  1369. int rc = 0;
  1370. while (offset < end) {
  1371. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1372. rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
  1373. buffer, chunk);
  1374. if (rc)
  1375. goto out;
  1376. offset += chunk;
  1377. buffer += chunk;
  1378. }
  1379. out:
  1380. *retlen = offset - start;
  1381. return rc;
  1382. }
  1383. int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  1384. {
  1385. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1386. struct efx_nic *efx = mtd->priv;
  1387. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  1388. loff_t end = min_t(loff_t, start + len, mtd->size);
  1389. size_t chunk = part->common.mtd.erasesize;
  1390. int rc = 0;
  1391. if (!part->updating) {
  1392. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1393. if (rc)
  1394. goto out;
  1395. part->updating = true;
  1396. }
  1397. /* The MCDI interface can in fact do multiple erase blocks at once;
  1398. * but erasing may be slow, so we make multiple calls here to avoid
  1399. * tripping the MCDI RPC timeout. */
  1400. while (offset < end) {
  1401. rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
  1402. chunk);
  1403. if (rc)
  1404. goto out;
  1405. offset += chunk;
  1406. }
  1407. out:
  1408. return rc;
  1409. }
  1410. int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
  1411. size_t len, size_t *retlen, const u8 *buffer)
  1412. {
  1413. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1414. struct efx_nic *efx = mtd->priv;
  1415. loff_t offset = start;
  1416. loff_t end = min_t(loff_t, start + len, mtd->size);
  1417. size_t chunk;
  1418. int rc = 0;
  1419. if (!part->updating) {
  1420. rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
  1421. if (rc)
  1422. goto out;
  1423. part->updating = true;
  1424. }
  1425. while (offset < end) {
  1426. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  1427. rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
  1428. buffer, chunk);
  1429. if (rc)
  1430. goto out;
  1431. offset += chunk;
  1432. buffer += chunk;
  1433. }
  1434. out:
  1435. *retlen = offset - start;
  1436. return rc;
  1437. }
  1438. int efx_mcdi_mtd_sync(struct mtd_info *mtd)
  1439. {
  1440. struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
  1441. struct efx_nic *efx = mtd->priv;
  1442. int rc = 0;
  1443. if (part->updating) {
  1444. part->updating = false;
  1445. rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
  1446. }
  1447. return rc;
  1448. }
  1449. void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
  1450. {
  1451. struct efx_mcdi_mtd_partition *mcdi_part =
  1452. container_of(part, struct efx_mcdi_mtd_partition, common);
  1453. struct efx_nic *efx = part->mtd.priv;
  1454. snprintf(part->name, sizeof(part->name), "%s %s:%02x",
  1455. efx->name, part->type_name, mcdi_part->fw_subtype);
  1456. }
  1457. #endif /* CONFIG_SFC_MTD */