mtd.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2009 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/bitops.h>
  11. #include <linux/module.h>
  12. #include <linux/mtd/mtd.h>
  13. #include <linux/delay.h>
  14. #include <linux/rtnetlink.h>
  15. #define EFX_DRIVER_NAME "sfc_mtd"
  16. #include "net_driver.h"
  17. #include "spi.h"
  18. #include "efx.h"
  19. #include "nic.h"
  20. #include "mcdi.h"
  21. #include "mcdi_pcol.h"
  22. #define EFX_SPI_VERIFY_BUF_LEN 16
  23. #define EFX_MCDI_CHUNK_LEN 128
  24. struct efx_mtd_partition {
  25. struct mtd_info mtd;
  26. union {
  27. struct {
  28. bool updating;
  29. u8 nvram_type;
  30. u16 fw_subtype;
  31. } mcdi;
  32. size_t offset;
  33. };
  34. const char *type_name;
  35. char name[IFNAMSIZ + 20];
  36. };
  37. struct efx_mtd_ops {
  38. int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
  39. size_t *retlen, u8 *buffer);
  40. int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
  41. int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
  42. size_t *retlen, const u8 *buffer);
  43. int (*sync)(struct mtd_info *mtd);
  44. };
  45. struct efx_mtd {
  46. struct list_head node;
  47. struct efx_nic *efx;
  48. const struct efx_spi_device *spi;
  49. const char *name;
  50. const struct efx_mtd_ops *ops;
  51. size_t n_parts;
  52. struct efx_mtd_partition part[0];
  53. };
  54. #define efx_for_each_partition(part, efx_mtd) \
  55. for ((part) = &(efx_mtd)->part[0]; \
  56. (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
  57. (part)++)
  58. #define to_efx_mtd_partition(mtd) \
  59. container_of(mtd, struct efx_mtd_partition, mtd)
  60. static int falcon_mtd_probe(struct efx_nic *efx);
  61. static int siena_mtd_probe(struct efx_nic *efx);
  62. /* SPI utilities */
  63. static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
  64. {
  65. const struct efx_spi_device *spi = efx_mtd->spi;
  66. struct efx_nic *efx = efx_mtd->efx;
  67. u8 status;
  68. int rc, i;
  69. /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
  70. for (i = 0; i < 40; i++) {
  71. __set_current_state(uninterruptible ?
  72. TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
  73. schedule_timeout(HZ / 10);
  74. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  75. &status, sizeof(status));
  76. if (rc)
  77. return rc;
  78. if (!(status & SPI_STATUS_NRDY))
  79. return 0;
  80. if (signal_pending(current))
  81. return -EINTR;
  82. }
  83. EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
  84. return -ETIMEDOUT;
  85. }
  86. static int
  87. efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
  88. {
  89. const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
  90. SPI_STATUS_BP0);
  91. u8 status;
  92. int rc;
  93. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  94. &status, sizeof(status));
  95. if (rc)
  96. return rc;
  97. if (!(status & unlock_mask))
  98. return 0; /* already unlocked */
  99. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  100. if (rc)
  101. return rc;
  102. rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
  103. if (rc)
  104. return rc;
  105. status &= ~unlock_mask;
  106. rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
  107. NULL, sizeof(status));
  108. if (rc)
  109. return rc;
  110. rc = falcon_spi_wait_write(efx, spi);
  111. if (rc)
  112. return rc;
  113. return 0;
  114. }
  115. static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
  116. {
  117. const struct efx_spi_device *spi = efx_mtd->spi;
  118. struct efx_nic *efx = efx_mtd->efx;
  119. unsigned pos, block_len;
  120. u8 empty[EFX_SPI_VERIFY_BUF_LEN];
  121. u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
  122. int rc;
  123. if (len != spi->erase_size)
  124. return -EINVAL;
  125. if (spi->erase_command == 0)
  126. return -EOPNOTSUPP;
  127. rc = efx_spi_unlock(efx, spi);
  128. if (rc)
  129. return rc;
  130. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  131. if (rc)
  132. return rc;
  133. rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
  134. NULL, 0);
  135. if (rc)
  136. return rc;
  137. rc = efx_spi_slow_wait(efx_mtd, false);
  138. /* Verify the entire region has been wiped */
  139. memset(empty, 0xff, sizeof(empty));
  140. for (pos = 0; pos < len; pos += block_len) {
  141. block_len = min(len - pos, sizeof(buffer));
  142. rc = falcon_spi_read(efx, spi, start + pos, block_len,
  143. NULL, buffer);
  144. if (rc)
  145. return rc;
  146. if (memcmp(empty, buffer, block_len))
  147. return -EIO;
  148. /* Avoid locking up the system */
  149. cond_resched();
  150. if (signal_pending(current))
  151. return -EINTR;
  152. }
  153. return rc;
  154. }
  155. /* MTD interface */
  156. static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
  157. {
  158. struct efx_mtd *efx_mtd = mtd->priv;
  159. int rc;
  160. rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
  161. if (rc == 0) {
  162. erase->state = MTD_ERASE_DONE;
  163. } else {
  164. erase->state = MTD_ERASE_FAILED;
  165. erase->fail_addr = 0xffffffff;
  166. }
  167. mtd_erase_callback(erase);
  168. return rc;
  169. }
  170. static void efx_mtd_sync(struct mtd_info *mtd)
  171. {
  172. struct efx_mtd *efx_mtd = mtd->priv;
  173. struct efx_nic *efx = efx_mtd->efx;
  174. int rc;
  175. rc = efx_mtd->ops->sync(mtd);
  176. if (rc)
  177. EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
  178. }
  179. static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
  180. {
  181. int rc;
  182. for (;;) {
  183. rc = del_mtd_device(&part->mtd);
  184. if (rc != -EBUSY)
  185. break;
  186. ssleep(1);
  187. }
  188. WARN_ON(rc);
  189. }
  190. static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
  191. {
  192. struct efx_mtd_partition *part;
  193. efx_for_each_partition(part, efx_mtd)
  194. efx_mtd_remove_partition(part);
  195. list_del(&efx_mtd->node);
  196. kfree(efx_mtd);
  197. }
  198. static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
  199. {
  200. struct efx_mtd_partition *part;
  201. efx_for_each_partition(part, efx_mtd)
  202. if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
  203. snprintf(part->name, sizeof(part->name),
  204. "%s %s:%02x", efx_mtd->efx->name,
  205. part->type_name, part->mcdi.fw_subtype);
  206. else
  207. snprintf(part->name, sizeof(part->name),
  208. "%s %s", efx_mtd->efx->name,
  209. part->type_name);
  210. }
  211. static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
  212. {
  213. struct efx_mtd_partition *part;
  214. efx_mtd->efx = efx;
  215. efx_mtd_rename_device(efx_mtd);
  216. efx_for_each_partition(part, efx_mtd) {
  217. part->mtd.writesize = 1;
  218. part->mtd.owner = THIS_MODULE;
  219. part->mtd.priv = efx_mtd;
  220. part->mtd.name = part->name;
  221. part->mtd.erase = efx_mtd_erase;
  222. part->mtd.read = efx_mtd->ops->read;
  223. part->mtd.write = efx_mtd->ops->write;
  224. part->mtd.sync = efx_mtd_sync;
  225. if (add_mtd_device(&part->mtd))
  226. goto fail;
  227. }
  228. list_add(&efx_mtd->node, &efx->mtd_list);
  229. return 0;
  230. fail:
  231. while (part != &efx_mtd->part[0]) {
  232. --part;
  233. efx_mtd_remove_partition(part);
  234. }
  235. /* add_mtd_device() returns 1 if the MTD table is full */
  236. return -ENOMEM;
  237. }
  238. void efx_mtd_remove(struct efx_nic *efx)
  239. {
  240. struct efx_mtd *efx_mtd, *next;
  241. WARN_ON(efx_dev_registered(efx));
  242. list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
  243. efx_mtd_remove_device(efx_mtd);
  244. }
  245. void efx_mtd_rename(struct efx_nic *efx)
  246. {
  247. struct efx_mtd *efx_mtd;
  248. ASSERT_RTNL();
  249. list_for_each_entry(efx_mtd, &efx->mtd_list, node)
  250. efx_mtd_rename_device(efx_mtd);
  251. }
  252. int efx_mtd_probe(struct efx_nic *efx)
  253. {
  254. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  255. return siena_mtd_probe(efx);
  256. else
  257. return falcon_mtd_probe(efx);
  258. }
  259. /* Implementation of MTD operations for Falcon */
  260. static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
  261. size_t len, size_t *retlen, u8 *buffer)
  262. {
  263. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  264. struct efx_mtd *efx_mtd = mtd->priv;
  265. const struct efx_spi_device *spi = efx_mtd->spi;
  266. struct efx_nic *efx = efx_mtd->efx;
  267. int rc;
  268. rc = mutex_lock_interruptible(&efx->spi_lock);
  269. if (rc)
  270. return rc;
  271. rc = falcon_spi_read(efx, spi, part->offset + start, len,
  272. retlen, buffer);
  273. mutex_unlock(&efx->spi_lock);
  274. return rc;
  275. }
  276. static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  277. {
  278. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  279. struct efx_mtd *efx_mtd = mtd->priv;
  280. struct efx_nic *efx = efx_mtd->efx;
  281. int rc;
  282. rc = mutex_lock_interruptible(&efx->spi_lock);
  283. if (rc)
  284. return rc;
  285. rc = efx_spi_erase(efx_mtd, part->offset + start, len);
  286. mutex_unlock(&efx->spi_lock);
  287. return rc;
  288. }
  289. static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
  290. size_t len, size_t *retlen, const u8 *buffer)
  291. {
  292. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  293. struct efx_mtd *efx_mtd = mtd->priv;
  294. const struct efx_spi_device *spi = efx_mtd->spi;
  295. struct efx_nic *efx = efx_mtd->efx;
  296. int rc;
  297. rc = mutex_lock_interruptible(&efx->spi_lock);
  298. if (rc)
  299. return rc;
  300. rc = falcon_spi_write(efx, spi, part->offset + start, len,
  301. retlen, buffer);
  302. mutex_unlock(&efx->spi_lock);
  303. return rc;
  304. }
  305. static int falcon_mtd_sync(struct mtd_info *mtd)
  306. {
  307. struct efx_mtd *efx_mtd = mtd->priv;
  308. struct efx_nic *efx = efx_mtd->efx;
  309. int rc;
  310. mutex_lock(&efx->spi_lock);
  311. rc = efx_spi_slow_wait(efx_mtd, true);
  312. mutex_unlock(&efx->spi_lock);
  313. return rc;
  314. }
  315. static struct efx_mtd_ops falcon_mtd_ops = {
  316. .read = falcon_mtd_read,
  317. .erase = falcon_mtd_erase,
  318. .write = falcon_mtd_write,
  319. .sync = falcon_mtd_sync,
  320. };
  321. static int falcon_mtd_probe(struct efx_nic *efx)
  322. {
  323. struct efx_spi_device *spi = efx->spi_flash;
  324. struct efx_mtd *efx_mtd;
  325. int rc;
  326. ASSERT_RTNL();
  327. if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
  328. return -ENODEV;
  329. efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
  330. GFP_KERNEL);
  331. if (!efx_mtd)
  332. return -ENOMEM;
  333. efx_mtd->spi = spi;
  334. efx_mtd->name = "flash";
  335. efx_mtd->ops = &falcon_mtd_ops;
  336. efx_mtd->n_parts = 1;
  337. efx_mtd->part[0].mtd.type = MTD_NORFLASH;
  338. efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
  339. efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
  340. efx_mtd->part[0].mtd.erasesize = spi->erase_size;
  341. efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
  342. efx_mtd->part[0].type_name = "sfc_flash_bootrom";
  343. rc = efx_mtd_probe_device(efx, efx_mtd);
  344. if (rc)
  345. kfree(efx_mtd);
  346. return rc;
  347. }
  348. /* Implementation of MTD operations for Siena */
  349. static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
  350. size_t len, size_t *retlen, u8 *buffer)
  351. {
  352. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  353. struct efx_mtd *efx_mtd = mtd->priv;
  354. struct efx_nic *efx = efx_mtd->efx;
  355. loff_t offset = start;
  356. loff_t end = min_t(loff_t, start + len, mtd->size);
  357. size_t chunk;
  358. int rc = 0;
  359. while (offset < end) {
  360. chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
  361. rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
  362. buffer, chunk);
  363. if (rc)
  364. goto out;
  365. offset += chunk;
  366. buffer += chunk;
  367. }
  368. out:
  369. *retlen = offset - start;
  370. return rc;
  371. }
  372. static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  373. {
  374. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  375. struct efx_mtd *efx_mtd = mtd->priv;
  376. struct efx_nic *efx = efx_mtd->efx;
  377. loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
  378. loff_t end = min_t(loff_t, start + len, mtd->size);
  379. size_t chunk = part->mtd.erasesize;
  380. int rc = 0;
  381. if (!part->mcdi.updating) {
  382. rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
  383. if (rc)
  384. goto out;
  385. part->mcdi.updating = 1;
  386. }
  387. /* The MCDI interface can in fact do multiple erase blocks at once;
  388. * but erasing may be slow, so we make multiple calls here to avoid
  389. * tripping the MCDI RPC timeout. */
  390. while (offset < end) {
  391. rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
  392. chunk);
  393. if (rc)
  394. goto out;
  395. offset += chunk;
  396. }
  397. out:
  398. return rc;
  399. }
  400. static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
  401. size_t len, size_t *retlen, const u8 *buffer)
  402. {
  403. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  404. struct efx_mtd *efx_mtd = mtd->priv;
  405. struct efx_nic *efx = efx_mtd->efx;
  406. loff_t offset = start;
  407. loff_t end = min_t(loff_t, start + len, mtd->size);
  408. size_t chunk;
  409. int rc = 0;
  410. if (!part->mcdi.updating) {
  411. rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
  412. if (rc)
  413. goto out;
  414. part->mcdi.updating = 1;
  415. }
  416. while (offset < end) {
  417. chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN);
  418. rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
  419. buffer, chunk);
  420. if (rc)
  421. goto out;
  422. offset += chunk;
  423. buffer += chunk;
  424. }
  425. out:
  426. *retlen = offset - start;
  427. return rc;
  428. }
  429. static int siena_mtd_sync(struct mtd_info *mtd)
  430. {
  431. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  432. struct efx_mtd *efx_mtd = mtd->priv;
  433. struct efx_nic *efx = efx_mtd->efx;
  434. int rc = 0;
  435. if (part->mcdi.updating) {
  436. part->mcdi.updating = 0;
  437. rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
  438. }
  439. return rc;
  440. }
  441. static struct efx_mtd_ops siena_mtd_ops = {
  442. .read = siena_mtd_read,
  443. .erase = siena_mtd_erase,
  444. .write = siena_mtd_write,
  445. .sync = siena_mtd_sync,
  446. };
  447. struct siena_nvram_type_info {
  448. int port;
  449. const char *name;
  450. };
  451. static struct siena_nvram_type_info siena_nvram_types[] = {
  452. [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
  453. [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
  454. [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
  455. [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
  456. [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
  457. [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
  458. [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
  459. [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
  460. [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
  461. [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
  462. [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
  463. [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
  464. };
  465. static int siena_mtd_probe_partition(struct efx_nic *efx,
  466. struct efx_mtd *efx_mtd,
  467. unsigned int part_id,
  468. unsigned int type)
  469. {
  470. struct efx_mtd_partition *part = &efx_mtd->part[part_id];
  471. struct siena_nvram_type_info *info;
  472. size_t size, erase_size;
  473. bool protected;
  474. int rc;
  475. if (type >= ARRAY_SIZE(siena_nvram_types))
  476. return -ENODEV;
  477. info = &siena_nvram_types[type];
  478. if (info->port != efx_port_num(efx))
  479. return -ENODEV;
  480. rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
  481. if (rc)
  482. return rc;
  483. if (protected)
  484. return -ENODEV; /* hide it */
  485. part->mcdi.nvram_type = type;
  486. part->type_name = info->name;
  487. part->mtd.type = MTD_NORFLASH;
  488. part->mtd.flags = MTD_CAP_NORFLASH;
  489. part->mtd.size = size;
  490. part->mtd.erasesize = erase_size;
  491. return 0;
  492. }
  493. static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
  494. struct efx_mtd *efx_mtd)
  495. {
  496. struct efx_mtd_partition *part;
  497. uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN /
  498. sizeof(uint16_t)];
  499. int rc;
  500. rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list);
  501. if (rc)
  502. return rc;
  503. efx_for_each_partition(part, efx_mtd)
  504. part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
  505. return 0;
  506. }
  507. static int siena_mtd_probe(struct efx_nic *efx)
  508. {
  509. struct efx_mtd *efx_mtd;
  510. int rc = -ENODEV;
  511. u32 nvram_types;
  512. unsigned int type;
  513. ASSERT_RTNL();
  514. rc = efx_mcdi_nvram_types(efx, &nvram_types);
  515. if (rc)
  516. return rc;
  517. efx_mtd = kzalloc(sizeof(*efx_mtd) +
  518. hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
  519. GFP_KERNEL);
  520. if (!efx_mtd)
  521. return -ENOMEM;
  522. efx_mtd->name = "Siena NVRAM manager";
  523. efx_mtd->ops = &siena_mtd_ops;
  524. type = 0;
  525. efx_mtd->n_parts = 0;
  526. while (nvram_types != 0) {
  527. if (nvram_types & 1) {
  528. rc = siena_mtd_probe_partition(efx, efx_mtd,
  529. efx_mtd->n_parts, type);
  530. if (rc == 0)
  531. efx_mtd->n_parts++;
  532. else if (rc != -ENODEV)
  533. goto fail;
  534. }
  535. type++;
  536. nvram_types >>= 1;
  537. }
  538. rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
  539. if (rc)
  540. goto fail;
  541. rc = efx_mtd_probe_device(efx, efx_mtd);
  542. fail:
  543. if (rc)
  544. kfree(efx_mtd);
  545. return rc;
  546. }