mtd.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2010 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/module.h>
  12. #include <linux/mtd/mtd.h>
  13. #include <linux/delay.h>
  14. #include <linux/slab.h>
  15. #include <linux/rtnetlink.h>
  16. #include "net_driver.h"
  17. #include "spi.h"
  18. #include "efx.h"
  19. #include "nic.h"
  20. #include "mcdi.h"
  21. #include "mcdi_pcol.h"
  22. #define EFX_SPI_VERIFY_BUF_LEN 16
  23. struct efx_mtd_partition {
  24. struct mtd_info mtd;
  25. union {
  26. struct {
  27. bool updating;
  28. u8 nvram_type;
  29. u16 fw_subtype;
  30. } mcdi;
  31. size_t offset;
  32. };
  33. const char *type_name;
  34. char name[IFNAMSIZ + 20];
  35. };
  36. struct efx_mtd_ops {
  37. int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
  38. size_t *retlen, u8 *buffer);
  39. int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
  40. int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
  41. size_t *retlen, const u8 *buffer);
  42. int (*sync)(struct mtd_info *mtd);
  43. };
  44. struct efx_mtd {
  45. struct list_head node;
  46. struct efx_nic *efx;
  47. const struct efx_spi_device *spi;
  48. const char *name;
  49. const struct efx_mtd_ops *ops;
  50. size_t n_parts;
  51. struct efx_mtd_partition part[0];
  52. };
  53. #define efx_for_each_partition(part, efx_mtd) \
  54. for ((part) = &(efx_mtd)->part[0]; \
  55. (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
  56. (part)++)
  57. #define to_efx_mtd_partition(mtd) \
  58. container_of(mtd, struct efx_mtd_partition, mtd)
  59. static int falcon_mtd_probe(struct efx_nic *efx);
  60. static int siena_mtd_probe(struct efx_nic *efx);
  61. /* SPI utilities */
  62. static int
  63. efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
  64. {
  65. struct efx_mtd *efx_mtd = part->mtd.priv;
  66. const struct efx_spi_device *spi = efx_mtd->spi;
  67. struct efx_nic *efx = efx_mtd->efx;
  68. u8 status;
  69. int rc, i;
  70. /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
  71. for (i = 0; i < 40; i++) {
  72. __set_current_state(uninterruptible ?
  73. TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
  74. schedule_timeout(HZ / 10);
  75. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  76. &status, sizeof(status));
  77. if (rc)
  78. return rc;
  79. if (!(status & SPI_STATUS_NRDY))
  80. return 0;
  81. if (signal_pending(current))
  82. return -EINTR;
  83. }
  84. pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
  85. return -ETIMEDOUT;
  86. }
  87. static int
  88. efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
  89. {
  90. const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
  91. SPI_STATUS_BP0);
  92. u8 status;
  93. int rc;
  94. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  95. &status, sizeof(status));
  96. if (rc)
  97. return rc;
  98. if (!(status & unlock_mask))
  99. return 0; /* already unlocked */
  100. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  101. if (rc)
  102. return rc;
  103. rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
  104. if (rc)
  105. return rc;
  106. status &= ~unlock_mask;
  107. rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
  108. NULL, sizeof(status));
  109. if (rc)
  110. return rc;
  111. rc = falcon_spi_wait_write(efx, spi);
  112. if (rc)
  113. return rc;
  114. return 0;
  115. }
  116. static int
  117. efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
  118. {
  119. struct efx_mtd *efx_mtd = part->mtd.priv;
  120. const struct efx_spi_device *spi = efx_mtd->spi;
  121. struct efx_nic *efx = efx_mtd->efx;
  122. unsigned pos, block_len;
  123. u8 empty[EFX_SPI_VERIFY_BUF_LEN];
  124. u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
  125. int rc;
  126. if (len != spi->erase_size)
  127. return -EINVAL;
  128. if (spi->erase_command == 0)
  129. return -EOPNOTSUPP;
  130. rc = efx_spi_unlock(efx, spi);
  131. if (rc)
  132. return rc;
  133. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  134. if (rc)
  135. return rc;
  136. rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
  137. NULL, 0);
  138. if (rc)
  139. return rc;
  140. rc = efx_spi_slow_wait(part, false);
  141. /* Verify the entire region has been wiped */
  142. memset(empty, 0xff, sizeof(empty));
  143. for (pos = 0; pos < len; pos += block_len) {
  144. block_len = min(len - pos, sizeof(buffer));
  145. rc = falcon_spi_read(efx, spi, start + pos, block_len,
  146. NULL, buffer);
  147. if (rc)
  148. return rc;
  149. if (memcmp(empty, buffer, block_len))
  150. return -EIO;
  151. /* Avoid locking up the system */
  152. cond_resched();
  153. if (signal_pending(current))
  154. return -EINTR;
  155. }
  156. return rc;
  157. }
  158. /* MTD interface */
  159. static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
  160. {
  161. struct efx_mtd *efx_mtd = mtd->priv;
  162. int rc;
  163. rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
  164. if (rc == 0) {
  165. erase->state = MTD_ERASE_DONE;
  166. } else {
  167. erase->state = MTD_ERASE_FAILED;
  168. erase->fail_addr = 0xffffffff;
  169. }
  170. mtd_erase_callback(erase);
  171. return rc;
  172. }
  173. static void efx_mtd_sync(struct mtd_info *mtd)
  174. {
  175. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  176. struct efx_mtd *efx_mtd = mtd->priv;
  177. int rc;
  178. rc = efx_mtd->ops->sync(mtd);
  179. if (rc)
  180. pr_err("%s: %s sync failed (%d)\n",
  181. part->name, efx_mtd->name, rc);
  182. }
  183. static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
  184. {
  185. int rc;
  186. for (;;) {
  187. rc = mtd_device_unregister(&part->mtd);
  188. if (rc != -EBUSY)
  189. break;
  190. ssleep(1);
  191. }
  192. WARN_ON(rc);
  193. }
  194. static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
  195. {
  196. struct efx_mtd_partition *part;
  197. efx_for_each_partition(part, efx_mtd)
  198. efx_mtd_remove_partition(part);
  199. list_del(&efx_mtd->node);
  200. kfree(efx_mtd);
  201. }
  202. static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
  203. {
  204. struct efx_mtd_partition *part;
  205. efx_for_each_partition(part, efx_mtd)
  206. if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
  207. snprintf(part->name, sizeof(part->name),
  208. "%s %s:%02x", efx_mtd->efx->name,
  209. part->type_name, part->mcdi.fw_subtype);
  210. else
  211. snprintf(part->name, sizeof(part->name),
  212. "%s %s", efx_mtd->efx->name,
  213. part->type_name);
  214. }
  215. static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
  216. {
  217. struct efx_mtd_partition *part;
  218. efx_mtd->efx = efx;
  219. efx_mtd_rename_device(efx_mtd);
  220. efx_for_each_partition(part, efx_mtd) {
  221. part->mtd.writesize = 1;
  222. part->mtd.owner = THIS_MODULE;
  223. part->mtd.priv = efx_mtd;
  224. part->mtd.name = part->name;
  225. part->mtd.erase = efx_mtd_erase;
  226. part->mtd.read = efx_mtd->ops->read;
  227. part->mtd.write = efx_mtd->ops->write;
  228. part->mtd.sync = efx_mtd_sync;
  229. if (mtd_device_register(&part->mtd, NULL, 0))
  230. goto fail;
  231. }
  232. list_add(&efx_mtd->node, &efx->mtd_list);
  233. return 0;
  234. fail:
  235. while (part != &efx_mtd->part[0]) {
  236. --part;
  237. efx_mtd_remove_partition(part);
  238. }
  239. /* mtd_device_register() returns 1 if the MTD table is full */
  240. return -ENOMEM;
  241. }
  242. void efx_mtd_remove(struct efx_nic *efx)
  243. {
  244. struct efx_mtd *efx_mtd, *next;
  245. WARN_ON(efx_dev_registered(efx));
  246. list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
  247. efx_mtd_remove_device(efx_mtd);
  248. }
  249. void efx_mtd_rename(struct efx_nic *efx)
  250. {
  251. struct efx_mtd *efx_mtd;
  252. ASSERT_RTNL();
  253. list_for_each_entry(efx_mtd, &efx->mtd_list, node)
  254. efx_mtd_rename_device(efx_mtd);
  255. }
  256. int efx_mtd_probe(struct efx_nic *efx)
  257. {
  258. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  259. return siena_mtd_probe(efx);
  260. else
  261. return falcon_mtd_probe(efx);
  262. }
  263. /* Implementation of MTD operations for Falcon */
  264. static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
  265. size_t len, size_t *retlen, u8 *buffer)
  266. {
  267. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  268. struct efx_mtd *efx_mtd = mtd->priv;
  269. const struct efx_spi_device *spi = efx_mtd->spi;
  270. struct efx_nic *efx = efx_mtd->efx;
  271. struct falcon_nic_data *nic_data = efx->nic_data;
  272. int rc;
  273. rc = mutex_lock_interruptible(&nic_data->spi_lock);
  274. if (rc)
  275. return rc;
  276. rc = falcon_spi_read(efx, spi, part->offset + start, len,
  277. retlen, buffer);
  278. mutex_unlock(&nic_data->spi_lock);
  279. return rc;
  280. }
  281. static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  282. {
  283. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  284. struct efx_mtd *efx_mtd = mtd->priv;
  285. struct efx_nic *efx = efx_mtd->efx;
  286. struct falcon_nic_data *nic_data = efx->nic_data;
  287. int rc;
  288. rc = mutex_lock_interruptible(&nic_data->spi_lock);
  289. if (rc)
  290. return rc;
  291. rc = efx_spi_erase(part, part->offset + start, len);
  292. mutex_unlock(&nic_data->spi_lock);
  293. return rc;
  294. }
  295. static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
  296. size_t len, size_t *retlen, const u8 *buffer)
  297. {
  298. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  299. struct efx_mtd *efx_mtd = mtd->priv;
  300. const struct efx_spi_device *spi = efx_mtd->spi;
  301. struct efx_nic *efx = efx_mtd->efx;
  302. struct falcon_nic_data *nic_data = efx->nic_data;
  303. int rc;
  304. rc = mutex_lock_interruptible(&nic_data->spi_lock);
  305. if (rc)
  306. return rc;
  307. rc = falcon_spi_write(efx, spi, part->offset + start, len,
  308. retlen, buffer);
  309. mutex_unlock(&nic_data->spi_lock);
  310. return rc;
  311. }
  312. static int falcon_mtd_sync(struct mtd_info *mtd)
  313. {
  314. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  315. struct efx_mtd *efx_mtd = mtd->priv;
  316. struct efx_nic *efx = efx_mtd->efx;
  317. struct falcon_nic_data *nic_data = efx->nic_data;
  318. int rc;
  319. mutex_lock(&nic_data->spi_lock);
  320. rc = efx_spi_slow_wait(part, true);
  321. mutex_unlock(&nic_data->spi_lock);
  322. return rc;
  323. }
  324. static struct efx_mtd_ops falcon_mtd_ops = {
  325. .read = falcon_mtd_read,
  326. .erase = falcon_mtd_erase,
  327. .write = falcon_mtd_write,
  328. .sync = falcon_mtd_sync,
  329. };
  330. static int falcon_mtd_probe(struct efx_nic *efx)
  331. {
  332. struct falcon_nic_data *nic_data = efx->nic_data;
  333. struct efx_spi_device *spi;
  334. struct efx_mtd *efx_mtd;
  335. int rc = -ENODEV;
  336. ASSERT_RTNL();
  337. spi = &nic_data->spi_flash;
  338. if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
  339. efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
  340. GFP_KERNEL);
  341. if (!efx_mtd)
  342. return -ENOMEM;
  343. efx_mtd->spi = spi;
  344. efx_mtd->name = "flash";
  345. efx_mtd->ops = &falcon_mtd_ops;
  346. efx_mtd->n_parts = 1;
  347. efx_mtd->part[0].mtd.type = MTD_NORFLASH;
  348. efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
  349. efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
  350. efx_mtd->part[0].mtd.erasesize = spi->erase_size;
  351. efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
  352. efx_mtd->part[0].type_name = "sfc_flash_bootrom";
  353. rc = efx_mtd_probe_device(efx, efx_mtd);
  354. if (rc) {
  355. kfree(efx_mtd);
  356. return rc;
  357. }
  358. }
  359. spi = &nic_data->spi_eeprom;
  360. if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
  361. efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
  362. GFP_KERNEL);
  363. if (!efx_mtd)
  364. return -ENOMEM;
  365. efx_mtd->spi = spi;
  366. efx_mtd->name = "EEPROM";
  367. efx_mtd->ops = &falcon_mtd_ops;
  368. efx_mtd->n_parts = 1;
  369. efx_mtd->part[0].mtd.type = MTD_RAM;
  370. efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
  371. efx_mtd->part[0].mtd.size =
  372. min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
  373. EFX_EEPROM_BOOTCONFIG_START;
  374. efx_mtd->part[0].mtd.erasesize = spi->erase_size;
  375. efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
  376. efx_mtd->part[0].type_name = "sfc_bootconfig";
  377. rc = efx_mtd_probe_device(efx, efx_mtd);
  378. if (rc) {
  379. kfree(efx_mtd);
  380. return rc;
  381. }
  382. }
  383. return rc;
  384. }
  385. /* Implementation of MTD operations for Siena */
  386. static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
  387. size_t len, size_t *retlen, u8 *buffer)
  388. {
  389. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  390. struct efx_mtd *efx_mtd = mtd->priv;
  391. struct efx_nic *efx = efx_mtd->efx;
  392. loff_t offset = start;
  393. loff_t end = min_t(loff_t, start + len, mtd->size);
  394. size_t chunk;
  395. int rc = 0;
  396. while (offset < end) {
  397. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  398. rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
  399. buffer, chunk);
  400. if (rc)
  401. goto out;
  402. offset += chunk;
  403. buffer += chunk;
  404. }
  405. out:
  406. *retlen = offset - start;
  407. return rc;
  408. }
  409. static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  410. {
  411. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  412. struct efx_mtd *efx_mtd = mtd->priv;
  413. struct efx_nic *efx = efx_mtd->efx;
  414. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  415. loff_t end = min_t(loff_t, start + len, mtd->size);
  416. size_t chunk = part->mtd.erasesize;
  417. int rc = 0;
  418. if (!part->mcdi.updating) {
  419. rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
  420. if (rc)
  421. goto out;
  422. part->mcdi.updating = 1;
  423. }
  424. /* The MCDI interface can in fact do multiple erase blocks at once;
  425. * but erasing may be slow, so we make multiple calls here to avoid
  426. * tripping the MCDI RPC timeout. */
  427. while (offset < end) {
  428. rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
  429. chunk);
  430. if (rc)
  431. goto out;
  432. offset += chunk;
  433. }
  434. out:
  435. return rc;
  436. }
  437. static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
  438. size_t len, size_t *retlen, const u8 *buffer)
  439. {
  440. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  441. struct efx_mtd *efx_mtd = mtd->priv;
  442. struct efx_nic *efx = efx_mtd->efx;
  443. loff_t offset = start;
  444. loff_t end = min_t(loff_t, start + len, mtd->size);
  445. size_t chunk;
  446. int rc = 0;
  447. if (!part->mcdi.updating) {
  448. rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
  449. if (rc)
  450. goto out;
  451. part->mcdi.updating = 1;
  452. }
  453. while (offset < end) {
  454. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  455. rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
  456. buffer, chunk);
  457. if (rc)
  458. goto out;
  459. offset += chunk;
  460. buffer += chunk;
  461. }
  462. out:
  463. *retlen = offset - start;
  464. return rc;
  465. }
  466. static int siena_mtd_sync(struct mtd_info *mtd)
  467. {
  468. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  469. struct efx_mtd *efx_mtd = mtd->priv;
  470. struct efx_nic *efx = efx_mtd->efx;
  471. int rc = 0;
  472. if (part->mcdi.updating) {
  473. part->mcdi.updating = 0;
  474. rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
  475. }
  476. return rc;
  477. }
  478. static struct efx_mtd_ops siena_mtd_ops = {
  479. .read = siena_mtd_read,
  480. .erase = siena_mtd_erase,
  481. .write = siena_mtd_write,
  482. .sync = siena_mtd_sync,
  483. };
  484. struct siena_nvram_type_info {
  485. int port;
  486. const char *name;
  487. };
  488. static struct siena_nvram_type_info siena_nvram_types[] = {
  489. [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
  490. [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
  491. [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
  492. [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
  493. [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
  494. [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
  495. [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
  496. [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
  497. [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
  498. [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
  499. [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
  500. [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
  501. };
  502. static int siena_mtd_probe_partition(struct efx_nic *efx,
  503. struct efx_mtd *efx_mtd,
  504. unsigned int part_id,
  505. unsigned int type)
  506. {
  507. struct efx_mtd_partition *part = &efx_mtd->part[part_id];
  508. struct siena_nvram_type_info *info;
  509. size_t size, erase_size;
  510. bool protected;
  511. int rc;
  512. if (type >= ARRAY_SIZE(siena_nvram_types))
  513. return -ENODEV;
  514. info = &siena_nvram_types[type];
  515. if (info->port != efx_port_num(efx))
  516. return -ENODEV;
  517. rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
  518. if (rc)
  519. return rc;
  520. if (protected)
  521. return -ENODEV; /* hide it */
  522. part->mcdi.nvram_type = type;
  523. part->type_name = info->name;
  524. part->mtd.type = MTD_NORFLASH;
  525. part->mtd.flags = MTD_CAP_NORFLASH;
  526. part->mtd.size = size;
  527. part->mtd.erasesize = erase_size;
  528. return 0;
  529. }
  530. static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
  531. struct efx_mtd *efx_mtd)
  532. {
  533. struct efx_mtd_partition *part;
  534. uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
  535. sizeof(uint16_t)];
  536. int rc;
  537. rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
  538. if (rc)
  539. return rc;
  540. efx_for_each_partition(part, efx_mtd)
  541. part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
  542. return 0;
  543. }
  544. static int siena_mtd_probe(struct efx_nic *efx)
  545. {
  546. struct efx_mtd *efx_mtd;
  547. int rc = -ENODEV;
  548. u32 nvram_types;
  549. unsigned int type;
  550. ASSERT_RTNL();
  551. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  552. if (rc)
  553. return rc;
  554. efx_mtd = kzalloc(sizeof(*efx_mtd) +
  555. hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
  556. GFP_KERNEL);
  557. if (!efx_mtd)
  558. return -ENOMEM;
  559. efx_mtd->name = "Siena NVRAM manager";
  560. efx_mtd->ops = &siena_mtd_ops;
  561. type = 0;
  562. efx_mtd->n_parts = 0;
  563. while (nvram_types != 0) {
  564. if (nvram_types & 1) {
  565. rc = siena_mtd_probe_partition(efx, efx_mtd,
  566. efx_mtd->n_parts, type);
  567. if (rc == 0)
  568. efx_mtd->n_parts++;
  569. else if (rc != -ENODEV)
  570. goto fail;
  571. }
  572. type++;
  573. nvram_types >>= 1;
  574. }
  575. rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
  576. if (rc)
  577. goto fail;
  578. rc = efx_mtd_probe_device(efx, efx_mtd);
  579. fail:
  580. if (rc)
  581. kfree(efx_mtd);
  582. return rc;
  583. }