mtd.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2009 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/module.h>
  12. #include <linux/mtd/mtd.h>
  13. #include <linux/delay.h>
  14. #include <linux/rtnetlink.h>
  15. #define EFX_DRIVER_NAME "sfc_mtd"
  16. #include "net_driver.h"
  17. #include "spi.h"
  18. #include "efx.h"
  19. #include "nic.h"
  20. #include "mcdi.h"
  21. #include "mcdi_pcol.h"
  22. #define EFX_SPI_VERIFY_BUF_LEN 16
  23. struct efx_mtd_partition {
  24. struct mtd_info mtd;
  25. union {
  26. struct {
  27. bool updating;
  28. u8 nvram_type;
  29. u16 fw_subtype;
  30. } mcdi;
  31. size_t offset;
  32. };
  33. const char *type_name;
  34. char name[IFNAMSIZ + 20];
  35. };
  36. struct efx_mtd_ops {
  37. int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
  38. size_t *retlen, u8 *buffer);
  39. int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
  40. int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
  41. size_t *retlen, const u8 *buffer);
  42. int (*sync)(struct mtd_info *mtd);
  43. };
  44. struct efx_mtd {
  45. struct list_head node;
  46. struct efx_nic *efx;
  47. const struct efx_spi_device *spi;
  48. const char *name;
  49. const struct efx_mtd_ops *ops;
  50. size_t n_parts;
  51. struct efx_mtd_partition part[0];
  52. };
  53. #define efx_for_each_partition(part, efx_mtd) \
  54. for ((part) = &(efx_mtd)->part[0]; \
  55. (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
  56. (part)++)
  57. #define to_efx_mtd_partition(mtd) \
  58. container_of(mtd, struct efx_mtd_partition, mtd)
  59. static int falcon_mtd_probe(struct efx_nic *efx);
  60. static int siena_mtd_probe(struct efx_nic *efx);
  61. /* SPI utilities */
  62. static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
  63. {
  64. const struct efx_spi_device *spi = efx_mtd->spi;
  65. struct efx_nic *efx = efx_mtd->efx;
  66. u8 status;
  67. int rc, i;
  68. /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
  69. for (i = 0; i < 40; i++) {
  70. __set_current_state(uninterruptible ?
  71. TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
  72. schedule_timeout(HZ / 10);
  73. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  74. &status, sizeof(status));
  75. if (rc)
  76. return rc;
  77. if (!(status & SPI_STATUS_NRDY))
  78. return 0;
  79. if (signal_pending(current))
  80. return -EINTR;
  81. }
  82. EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
  83. return -ETIMEDOUT;
  84. }
  85. static int
  86. efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
  87. {
  88. const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
  89. SPI_STATUS_BP0);
  90. u8 status;
  91. int rc;
  92. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  93. &status, sizeof(status));
  94. if (rc)
  95. return rc;
  96. if (!(status & unlock_mask))
  97. return 0; /* already unlocked */
  98. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  99. if (rc)
  100. return rc;
  101. rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
  102. if (rc)
  103. return rc;
  104. status &= ~unlock_mask;
  105. rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
  106. NULL, sizeof(status));
  107. if (rc)
  108. return rc;
  109. rc = falcon_spi_wait_write(efx, spi);
  110. if (rc)
  111. return rc;
  112. return 0;
  113. }
  114. static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
  115. {
  116. const struct efx_spi_device *spi = efx_mtd->spi;
  117. struct efx_nic *efx = efx_mtd->efx;
  118. unsigned pos, block_len;
  119. u8 empty[EFX_SPI_VERIFY_BUF_LEN];
  120. u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
  121. int rc;
  122. if (len != spi->erase_size)
  123. return -EINVAL;
  124. if (spi->erase_command == 0)
  125. return -EOPNOTSUPP;
  126. rc = efx_spi_unlock(efx, spi);
  127. if (rc)
  128. return rc;
  129. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  130. if (rc)
  131. return rc;
  132. rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
  133. NULL, 0);
  134. if (rc)
  135. return rc;
  136. rc = efx_spi_slow_wait(efx_mtd, false);
  137. /* Verify the entire region has been wiped */
  138. memset(empty, 0xff, sizeof(empty));
  139. for (pos = 0; pos < len; pos += block_len) {
  140. block_len = min(len - pos, sizeof(buffer));
  141. rc = falcon_spi_read(efx, spi, start + pos, block_len,
  142. NULL, buffer);
  143. if (rc)
  144. return rc;
  145. if (memcmp(empty, buffer, block_len))
  146. return -EIO;
  147. /* Avoid locking up the system */
  148. cond_resched();
  149. if (signal_pending(current))
  150. return -EINTR;
  151. }
  152. return rc;
  153. }
  154. /* MTD interface */
  155. static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
  156. {
  157. struct efx_mtd *efx_mtd = mtd->priv;
  158. int rc;
  159. rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
  160. if (rc == 0) {
  161. erase->state = MTD_ERASE_DONE;
  162. } else {
  163. erase->state = MTD_ERASE_FAILED;
  164. erase->fail_addr = 0xffffffff;
  165. }
  166. mtd_erase_callback(erase);
  167. return rc;
  168. }
  169. static void efx_mtd_sync(struct mtd_info *mtd)
  170. {
  171. struct efx_mtd *efx_mtd = mtd->priv;
  172. struct efx_nic *efx = efx_mtd->efx;
  173. int rc;
  174. rc = efx_mtd->ops->sync(mtd);
  175. if (rc)
  176. EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
  177. }
  178. static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
  179. {
  180. int rc;
  181. for (;;) {
  182. rc = del_mtd_device(&part->mtd);
  183. if (rc != -EBUSY)
  184. break;
  185. ssleep(1);
  186. }
  187. WARN_ON(rc);
  188. }
  189. static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
  190. {
  191. struct efx_mtd_partition *part;
  192. efx_for_each_partition(part, efx_mtd)
  193. efx_mtd_remove_partition(part);
  194. list_del(&efx_mtd->node);
  195. kfree(efx_mtd);
  196. }
  197. static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
  198. {
  199. struct efx_mtd_partition *part;
  200. efx_for_each_partition(part, efx_mtd)
  201. if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
  202. snprintf(part->name, sizeof(part->name),
  203. "%s %s:%02x", efx_mtd->efx->name,
  204. part->type_name, part->mcdi.fw_subtype);
  205. else
  206. snprintf(part->name, sizeof(part->name),
  207. "%s %s", efx_mtd->efx->name,
  208. part->type_name);
  209. }
  210. static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
  211. {
  212. struct efx_mtd_partition *part;
  213. efx_mtd->efx = efx;
  214. efx_mtd_rename_device(efx_mtd);
  215. efx_for_each_partition(part, efx_mtd) {
  216. part->mtd.writesize = 1;
  217. part->mtd.owner = THIS_MODULE;
  218. part->mtd.priv = efx_mtd;
  219. part->mtd.name = part->name;
  220. part->mtd.erase = efx_mtd_erase;
  221. part->mtd.read = efx_mtd->ops->read;
  222. part->mtd.write = efx_mtd->ops->write;
  223. part->mtd.sync = efx_mtd_sync;
  224. if (add_mtd_device(&part->mtd))
  225. goto fail;
  226. }
  227. list_add(&efx_mtd->node, &efx->mtd_list);
  228. return 0;
  229. fail:
  230. while (part != &efx_mtd->part[0]) {
  231. --part;
  232. efx_mtd_remove_partition(part);
  233. }
  234. /* add_mtd_device() returns 1 if the MTD table is full */
  235. return -ENOMEM;
  236. }
  237. void efx_mtd_remove(struct efx_nic *efx)
  238. {
  239. struct efx_mtd *efx_mtd, *next;
  240. WARN_ON(efx_dev_registered(efx));
  241. list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
  242. efx_mtd_remove_device(efx_mtd);
  243. }
  244. void efx_mtd_rename(struct efx_nic *efx)
  245. {
  246. struct efx_mtd *efx_mtd;
  247. ASSERT_RTNL();
  248. list_for_each_entry(efx_mtd, &efx->mtd_list, node)
  249. efx_mtd_rename_device(efx_mtd);
  250. }
  251. int efx_mtd_probe(struct efx_nic *efx)
  252. {
  253. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  254. return siena_mtd_probe(efx);
  255. else
  256. return falcon_mtd_probe(efx);
  257. }
  258. /* Implementation of MTD operations for Falcon */
  259. static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
  260. size_t len, size_t *retlen, u8 *buffer)
  261. {
  262. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  263. struct efx_mtd *efx_mtd = mtd->priv;
  264. const struct efx_spi_device *spi = efx_mtd->spi;
  265. struct efx_nic *efx = efx_mtd->efx;
  266. int rc;
  267. rc = mutex_lock_interruptible(&efx->spi_lock);
  268. if (rc)
  269. return rc;
  270. rc = falcon_spi_read(efx, spi, part->offset + start, len,
  271. retlen, buffer);
  272. mutex_unlock(&efx->spi_lock);
  273. return rc;
  274. }
  275. static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  276. {
  277. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  278. struct efx_mtd *efx_mtd = mtd->priv;
  279. struct efx_nic *efx = efx_mtd->efx;
  280. int rc;
  281. rc = mutex_lock_interruptible(&efx->spi_lock);
  282. if (rc)
  283. return rc;
  284. rc = efx_spi_erase(efx_mtd, part->offset + start, len);
  285. mutex_unlock(&efx->spi_lock);
  286. return rc;
  287. }
  288. static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
  289. size_t len, size_t *retlen, const u8 *buffer)
  290. {
  291. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  292. struct efx_mtd *efx_mtd = mtd->priv;
  293. const struct efx_spi_device *spi = efx_mtd->spi;
  294. struct efx_nic *efx = efx_mtd->efx;
  295. int rc;
  296. rc = mutex_lock_interruptible(&efx->spi_lock);
  297. if (rc)
  298. return rc;
  299. rc = falcon_spi_write(efx, spi, part->offset + start, len,
  300. retlen, buffer);
  301. mutex_unlock(&efx->spi_lock);
  302. return rc;
  303. }
  304. static int falcon_mtd_sync(struct mtd_info *mtd)
  305. {
  306. struct efx_mtd *efx_mtd = mtd->priv;
  307. struct efx_nic *efx = efx_mtd->efx;
  308. int rc;
  309. mutex_lock(&efx->spi_lock);
  310. rc = efx_spi_slow_wait(efx_mtd, true);
  311. mutex_unlock(&efx->spi_lock);
  312. return rc;
  313. }
  314. static struct efx_mtd_ops falcon_mtd_ops = {
  315. .read = falcon_mtd_read,
  316. .erase = falcon_mtd_erase,
  317. .write = falcon_mtd_write,
  318. .sync = falcon_mtd_sync,
  319. };
  320. static int falcon_mtd_probe(struct efx_nic *efx)
  321. {
  322. struct efx_spi_device *spi = efx->spi_flash;
  323. struct efx_mtd *efx_mtd;
  324. int rc;
  325. ASSERT_RTNL();
  326. if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
  327. return -ENODEV;
  328. efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
  329. GFP_KERNEL);
  330. if (!efx_mtd)
  331. return -ENOMEM;
  332. efx_mtd->spi = spi;
  333. efx_mtd->name = "flash";
  334. efx_mtd->ops = &falcon_mtd_ops;
  335. efx_mtd->n_parts = 1;
  336. efx_mtd->part[0].mtd.type = MTD_NORFLASH;
  337. efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
  338. efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
  339. efx_mtd->part[0].mtd.erasesize = spi->erase_size;
  340. efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
  341. efx_mtd->part[0].type_name = "sfc_flash_bootrom";
  342. rc = efx_mtd_probe_device(efx, efx_mtd);
  343. if (rc)
  344. kfree(efx_mtd);
  345. return rc;
  346. }
  347. /* Implementation of MTD operations for Siena */
  348. static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
  349. size_t len, size_t *retlen, u8 *buffer)
  350. {
  351. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  352. struct efx_mtd *efx_mtd = mtd->priv;
  353. struct efx_nic *efx = efx_mtd->efx;
  354. loff_t offset = start;
  355. loff_t end = min_t(loff_t, start + len, mtd->size);
  356. size_t chunk;
  357. int rc = 0;
  358. while (offset < end) {
  359. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  360. rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
  361. buffer, chunk);
  362. if (rc)
  363. goto out;
  364. offset += chunk;
  365. buffer += chunk;
  366. }
  367. out:
  368. *retlen = offset - start;
  369. return rc;
  370. }
  371. static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  372. {
  373. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  374. struct efx_mtd *efx_mtd = mtd->priv;
  375. struct efx_nic *efx = efx_mtd->efx;
  376. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  377. loff_t end = min_t(loff_t, start + len, mtd->size);
  378. size_t chunk = part->mtd.erasesize;
  379. int rc = 0;
  380. if (!part->mcdi.updating) {
  381. rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
  382. if (rc)
  383. goto out;
  384. part->mcdi.updating = 1;
  385. }
  386. /* The MCDI interface can in fact do multiple erase blocks at once;
  387. * but erasing may be slow, so we make multiple calls here to avoid
  388. * tripping the MCDI RPC timeout. */
  389. while (offset < end) {
  390. rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
  391. chunk);
  392. if (rc)
  393. goto out;
  394. offset += chunk;
  395. }
  396. out:
  397. return rc;
  398. }
  399. static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
  400. size_t len, size_t *retlen, const u8 *buffer)
  401. {
  402. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  403. struct efx_mtd *efx_mtd = mtd->priv;
  404. struct efx_nic *efx = efx_mtd->efx;
  405. loff_t offset = start;
  406. loff_t end = min_t(loff_t, start + len, mtd->size);
  407. size_t chunk;
  408. int rc = 0;
  409. if (!part->mcdi.updating) {
  410. rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
  411. if (rc)
  412. goto out;
  413. part->mcdi.updating = 1;
  414. }
  415. while (offset < end) {
  416. chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
  417. rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
  418. buffer, chunk);
  419. if (rc)
  420. goto out;
  421. offset += chunk;
  422. buffer += chunk;
  423. }
  424. out:
  425. *retlen = offset - start;
  426. return rc;
  427. }
  428. static int siena_mtd_sync(struct mtd_info *mtd)
  429. {
  430. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  431. struct efx_mtd *efx_mtd = mtd->priv;
  432. struct efx_nic *efx = efx_mtd->efx;
  433. int rc = 0;
  434. if (part->mcdi.updating) {
  435. part->mcdi.updating = 0;
  436. rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
  437. }
  438. return rc;
  439. }
  440. static struct efx_mtd_ops siena_mtd_ops = {
  441. .read = siena_mtd_read,
  442. .erase = siena_mtd_erase,
  443. .write = siena_mtd_write,
  444. .sync = siena_mtd_sync,
  445. };
  446. struct siena_nvram_type_info {
  447. int port;
  448. const char *name;
  449. };
  450. static struct siena_nvram_type_info siena_nvram_types[] = {
  451. [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
  452. [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
  453. [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
  454. [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
  455. [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
  456. [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
  457. [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
  458. [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
  459. [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
  460. [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
  461. [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
  462. [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
  463. };
  464. static int siena_mtd_probe_partition(struct efx_nic *efx,
  465. struct efx_mtd *efx_mtd,
  466. unsigned int part_id,
  467. unsigned int type)
  468. {
  469. struct efx_mtd_partition *part = &efx_mtd->part[part_id];
  470. struct siena_nvram_type_info *info;
  471. size_t size, erase_size;
  472. bool protected;
  473. int rc;
  474. if (type >= ARRAY_SIZE(siena_nvram_types))
  475. return -ENODEV;
  476. info = &siena_nvram_types[type];
  477. if (info->port != efx_port_num(efx))
  478. return -ENODEV;
  479. rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
  480. if (rc)
  481. return rc;
  482. if (protected)
  483. return -ENODEV; /* hide it */
  484. part->mcdi.nvram_type = type;
  485. part->type_name = info->name;
  486. part->mtd.type = MTD_NORFLASH;
  487. part->mtd.flags = MTD_CAP_NORFLASH;
  488. part->mtd.size = size;
  489. part->mtd.erasesize = erase_size;
  490. return 0;
  491. }
  492. static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
  493. struct efx_mtd *efx_mtd)
  494. {
  495. struct efx_mtd_partition *part;
  496. uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
  497. sizeof(uint16_t)];
  498. int rc;
  499. rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
  500. if (rc)
  501. return rc;
  502. efx_for_each_partition(part, efx_mtd)
  503. part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
  504. return 0;
  505. }
  506. static int siena_mtd_probe(struct efx_nic *efx)
  507. {
  508. struct efx_mtd *efx_mtd;
  509. int rc = -ENODEV;
  510. u32 nvram_types;
  511. unsigned int type;
  512. ASSERT_RTNL();
  513. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  514. if (rc)
  515. return rc;
  516. efx_mtd = kzalloc(sizeof(*efx_mtd) +
  517. hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
  518. GFP_KERNEL);
  519. if (!efx_mtd)
  520. return -ENOMEM;
  521. efx_mtd->name = "Siena NVRAM manager";
  522. efx_mtd->ops = &siena_mtd_ops;
  523. type = 0;
  524. efx_mtd->n_parts = 0;
  525. while (nvram_types != 0) {
  526. if (nvram_types & 1) {
  527. rc = siena_mtd_probe_partition(efx, efx_mtd,
  528. efx_mtd->n_parts, type);
  529. if (rc == 0)
  530. efx_mtd->n_parts++;
  531. else if (rc != -ENODEV)
  532. goto fail;
  533. }
  534. type++;
  535. nvram_types >>= 1;
  536. }
  537. rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
  538. if (rc)
  539. goto fail;
  540. rc = efx_mtd_probe_device(efx, efx_mtd);
  541. fail:
  542. if (rc)
  543. kfree(efx_mtd);
  544. return rc;
  545. }