mtd.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /****************************************************************************
  2. * Driver for Solarflare Solarstorm network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2008 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/mtd/mtd.h>
  12. #include <linux/delay.h>
  13. #include <linux/rtnetlink.h>
  14. #define EFX_DRIVER_NAME "sfc_mtd"
  15. #include "net_driver.h"
  16. #include "spi.h"
  17. #include "efx.h"
  18. #include "falcon.h"
  19. #define EFX_SPI_VERIFY_BUF_LEN 16
  20. struct efx_mtd_partition {
  21. struct mtd_info mtd;
  22. size_t offset;
  23. const char *type_name;
  24. char name[IFNAMSIZ + 20];
  25. };
  26. struct efx_mtd_ops {
  27. int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
  28. size_t *retlen, u8 *buffer);
  29. int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
  30. int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
  31. size_t *retlen, const u8 *buffer);
  32. int (*sync)(struct mtd_info *mtd);
  33. };
  34. struct efx_mtd {
  35. struct list_head node;
  36. struct efx_nic *efx;
  37. const struct efx_spi_device *spi;
  38. const char *name;
  39. const struct efx_mtd_ops *ops;
  40. size_t n_parts;
  41. struct efx_mtd_partition part[0];
  42. };
  43. #define efx_for_each_partition(part, efx_mtd) \
  44. for ((part) = &(efx_mtd)->part[0]; \
  45. (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
  46. (part)++)
  47. #define to_efx_mtd_partition(mtd) \
  48. container_of(mtd, struct efx_mtd_partition, mtd)
  49. static int falcon_mtd_probe(struct efx_nic *efx);
  50. /* SPI utilities */
  51. static int efx_spi_slow_wait(struct efx_mtd *efx_mtd, bool uninterruptible)
  52. {
  53. const struct efx_spi_device *spi = efx_mtd->spi;
  54. struct efx_nic *efx = efx_mtd->efx;
  55. u8 status;
  56. int rc, i;
  57. /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
  58. for (i = 0; i < 40; i++) {
  59. __set_current_state(uninterruptible ?
  60. TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
  61. schedule_timeout(HZ / 10);
  62. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  63. &status, sizeof(status));
  64. if (rc)
  65. return rc;
  66. if (!(status & SPI_STATUS_NRDY))
  67. return 0;
  68. if (signal_pending(current))
  69. return -EINTR;
  70. }
  71. EFX_ERR(efx, "timed out waiting for %s\n", efx_mtd->name);
  72. return -ETIMEDOUT;
  73. }
  74. static int
  75. efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
  76. {
  77. const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
  78. SPI_STATUS_BP0);
  79. u8 status;
  80. int rc;
  81. rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
  82. &status, sizeof(status));
  83. if (rc)
  84. return rc;
  85. if (!(status & unlock_mask))
  86. return 0; /* already unlocked */
  87. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  88. if (rc)
  89. return rc;
  90. rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
  91. if (rc)
  92. return rc;
  93. status &= ~unlock_mask;
  94. rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
  95. NULL, sizeof(status));
  96. if (rc)
  97. return rc;
  98. rc = falcon_spi_wait_write(efx, spi);
  99. if (rc)
  100. return rc;
  101. return 0;
  102. }
  103. static int efx_spi_erase(struct efx_mtd *efx_mtd, loff_t start, size_t len)
  104. {
  105. const struct efx_spi_device *spi = efx_mtd->spi;
  106. struct efx_nic *efx = efx_mtd->efx;
  107. unsigned pos, block_len;
  108. u8 empty[EFX_SPI_VERIFY_BUF_LEN];
  109. u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
  110. int rc;
  111. if (len != spi->erase_size)
  112. return -EINVAL;
  113. if (spi->erase_command == 0)
  114. return -EOPNOTSUPP;
  115. rc = efx_spi_unlock(efx, spi);
  116. if (rc)
  117. return rc;
  118. rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
  119. if (rc)
  120. return rc;
  121. rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
  122. NULL, 0);
  123. if (rc)
  124. return rc;
  125. rc = efx_spi_slow_wait(efx_mtd, false);
  126. /* Verify the entire region has been wiped */
  127. memset(empty, 0xff, sizeof(empty));
  128. for (pos = 0; pos < len; pos += block_len) {
  129. block_len = min(len - pos, sizeof(buffer));
  130. rc = falcon_spi_read(efx, spi, start + pos, block_len,
  131. NULL, buffer);
  132. if (rc)
  133. return rc;
  134. if (memcmp(empty, buffer, block_len))
  135. return -EIO;
  136. /* Avoid locking up the system */
  137. cond_resched();
  138. if (signal_pending(current))
  139. return -EINTR;
  140. }
  141. return rc;
  142. }
  143. /* MTD interface */
  144. static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
  145. {
  146. struct efx_mtd *efx_mtd = mtd->priv;
  147. int rc;
  148. rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
  149. if (rc == 0) {
  150. erase->state = MTD_ERASE_DONE;
  151. } else {
  152. erase->state = MTD_ERASE_FAILED;
  153. erase->fail_addr = 0xffffffff;
  154. }
  155. mtd_erase_callback(erase);
  156. return rc;
  157. }
  158. static void efx_mtd_sync(struct mtd_info *mtd)
  159. {
  160. struct efx_mtd *efx_mtd = mtd->priv;
  161. struct efx_nic *efx = efx_mtd->efx;
  162. int rc;
  163. rc = efx_mtd->ops->sync(mtd);
  164. if (rc)
  165. EFX_ERR(efx, "%s sync failed (%d)\n", efx_mtd->name, rc);
  166. }
  167. static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
  168. {
  169. int rc;
  170. for (;;) {
  171. rc = del_mtd_device(&part->mtd);
  172. if (rc != -EBUSY)
  173. break;
  174. ssleep(1);
  175. }
  176. WARN_ON(rc);
  177. }
  178. static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
  179. {
  180. struct efx_mtd_partition *part;
  181. efx_for_each_partition(part, efx_mtd)
  182. efx_mtd_remove_partition(part);
  183. list_del(&efx_mtd->node);
  184. kfree(efx_mtd);
  185. }
  186. static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
  187. {
  188. struct efx_mtd_partition *part;
  189. efx_for_each_partition(part, efx_mtd)
  190. snprintf(part->name, sizeof(part->name),
  191. "%s %s", efx_mtd->efx->name,
  192. part->type_name);
  193. }
  194. static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
  195. {
  196. struct efx_mtd_partition *part;
  197. efx_mtd->efx = efx;
  198. efx_mtd_rename_device(efx_mtd);
  199. efx_for_each_partition(part, efx_mtd) {
  200. part->mtd.writesize = 1;
  201. part->mtd.owner = THIS_MODULE;
  202. part->mtd.priv = efx_mtd;
  203. part->mtd.name = part->name;
  204. part->mtd.erase = efx_mtd_erase;
  205. part->mtd.read = efx_mtd->ops->read;
  206. part->mtd.write = efx_mtd->ops->write;
  207. part->mtd.sync = efx_mtd_sync;
  208. if (add_mtd_device(&part->mtd))
  209. goto fail;
  210. }
  211. list_add(&efx_mtd->node, &efx->mtd_list);
  212. return 0;
  213. fail:
  214. while (part != &efx_mtd->part[0]) {
  215. --part;
  216. efx_mtd_remove_partition(part);
  217. }
  218. /* add_mtd_device() returns 1 if the MTD table is full */
  219. return -ENOMEM;
  220. }
  221. void efx_mtd_remove(struct efx_nic *efx)
  222. {
  223. struct efx_mtd *efx_mtd, *next;
  224. WARN_ON(efx_dev_registered(efx));
  225. list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
  226. efx_mtd_remove_device(efx_mtd);
  227. }
  228. void efx_mtd_rename(struct efx_nic *efx)
  229. {
  230. struct efx_mtd *efx_mtd;
  231. ASSERT_RTNL();
  232. list_for_each_entry(efx_mtd, &efx->mtd_list, node)
  233. efx_mtd_rename_device(efx_mtd);
  234. }
  235. int efx_mtd_probe(struct efx_nic *efx)
  236. {
  237. return falcon_mtd_probe(efx);
  238. }
  239. /* Implementation of MTD operations for Falcon */
  240. static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
  241. size_t len, size_t *retlen, u8 *buffer)
  242. {
  243. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  244. struct efx_mtd *efx_mtd = mtd->priv;
  245. const struct efx_spi_device *spi = efx_mtd->spi;
  246. struct efx_nic *efx = efx_mtd->efx;
  247. int rc;
  248. rc = mutex_lock_interruptible(&efx->spi_lock);
  249. if (rc)
  250. return rc;
  251. rc = falcon_spi_read(efx, spi, part->offset + start, len,
  252. retlen, buffer);
  253. mutex_unlock(&efx->spi_lock);
  254. return rc;
  255. }
  256. static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
  257. {
  258. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  259. struct efx_mtd *efx_mtd = mtd->priv;
  260. struct efx_nic *efx = efx_mtd->efx;
  261. int rc;
  262. rc = mutex_lock_interruptible(&efx->spi_lock);
  263. if (rc)
  264. return rc;
  265. rc = efx_spi_erase(efx_mtd, part->offset + start, len);
  266. mutex_unlock(&efx->spi_lock);
  267. return rc;
  268. }
  269. static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
  270. size_t len, size_t *retlen, const u8 *buffer)
  271. {
  272. struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
  273. struct efx_mtd *efx_mtd = mtd->priv;
  274. const struct efx_spi_device *spi = efx_mtd->spi;
  275. struct efx_nic *efx = efx_mtd->efx;
  276. int rc;
  277. rc = mutex_lock_interruptible(&efx->spi_lock);
  278. if (rc)
  279. return rc;
  280. rc = falcon_spi_write(efx, spi, part->offset + start, len,
  281. retlen, buffer);
  282. mutex_unlock(&efx->spi_lock);
  283. return rc;
  284. }
  285. static int falcon_mtd_sync(struct mtd_info *mtd)
  286. {
  287. struct efx_mtd *efx_mtd = mtd->priv;
  288. struct efx_nic *efx = efx_mtd->efx;
  289. int rc;
  290. mutex_lock(&efx->spi_lock);
  291. rc = efx_spi_slow_wait(efx_mtd, true);
  292. mutex_unlock(&efx->spi_lock);
  293. return rc;
  294. }
  295. static struct efx_mtd_ops falcon_mtd_ops = {
  296. .read = falcon_mtd_read,
  297. .erase = falcon_mtd_erase,
  298. .write = falcon_mtd_write,
  299. .sync = falcon_mtd_sync,
  300. };
  301. static int falcon_mtd_probe(struct efx_nic *efx)
  302. {
  303. struct efx_spi_device *spi = efx->spi_flash;
  304. struct efx_mtd *efx_mtd;
  305. int rc;
  306. ASSERT_RTNL();
  307. if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
  308. return -ENODEV;
  309. efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
  310. GFP_KERNEL);
  311. if (!efx_mtd)
  312. return -ENOMEM;
  313. efx_mtd->spi = spi;
  314. efx_mtd->name = "flash";
  315. efx_mtd->ops = &falcon_mtd_ops;
  316. efx_mtd->n_parts = 1;
  317. efx_mtd->part[0].mtd.type = MTD_NORFLASH;
  318. efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
  319. efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
  320. efx_mtd->part[0].mtd.erasesize = spi->erase_size;
  321. efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
  322. efx_mtd->part[0].type_name = "sfc_flash_bootrom";
  323. rc = efx_mtd_probe_device(efx, efx_mtd);
  324. if (rc)
  325. kfree(efx_mtd);
  326. return rc;
  327. }