nand_ecc.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /*
  2. * This file contains an ECC algorithm that detects and corrects 1 bit
  3. * errors in a 256 byte block of data.
  4. *
  5. * drivers/mtd/nand/nand_ecc.c
  6. *
  7. * Copyright © 2008 Koninklijke Philips Electronics NV.
  8. * Author: Frans Meulenbroeks
  9. *
  10. * Completely replaces the previous ECC implementation which was written by:
  11. * Steven J. Hill (sjhill@realitydiluted.com)
  12. * Thomas Gleixner (tglx@linutronix.de)
  13. *
  14. * Information on how this algorithm works and how it was developed
  15. * can be found in Documentation/mtd/nand_ecc.txt
  16. *
  17. * This file is free software; you can redistribute it and/or modify it
  18. * under the terms of the GNU General Public License as published by the
  19. * Free Software Foundation; either version 2 or (at your option) any
  20. * later version.
  21. *
  22. * This file is distributed in the hope that it will be useful, but WITHOUT
  23. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  24. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  25. * for more details.
  26. *
  27. * You should have received a copy of the GNU General Public License along
  28. * with this file; if not, write to the Free Software Foundation, Inc.,
  29. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  30. *
  31. */
  32. /*
  33. * The STANDALONE macro is useful when running the code outside the kernel
  34. * e.g. when running the code in a testbed or a benchmark program.
  35. * When STANDALONE is used, the module related macros are commented out
  36. * as well as the linux include files.
  37. * Instead a private definition of mtd_info is given to satisfy the compiler
  38. * (the code does not use mtd_info, so the code does not care)
  39. */
  40. #ifndef STANDALONE
  41. #include <linux/types.h>
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/mtd/nand_ecc.h>
  45. #include <asm/byteorder.h>
  46. #else
  47. #include <stdint.h>
  48. struct mtd_info;
  49. #define EXPORT_SYMBOL(x) /* x */
  50. #define MODULE_LICENSE(x) /* x */
  51. #define MODULE_AUTHOR(x) /* x */
  52. #define MODULE_DESCRIPTION(x) /* x */
  53. #define printk printf
  54. #define KERN_ERR ""
  55. #endif
  56. /*
  57. * invparity is a 256 byte table that contains the odd parity
  58. * for each byte. So if the number of bits in a byte is even,
  59. * the array element is 1, and when the number of bits is odd
  60. * the array eleemnt is 0.
  61. */
  62. static const char invparity[256] = {
  63. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  64. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  65. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  66. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  67. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  68. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  69. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  70. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  71. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  72. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  73. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  74. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  75. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
  76. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  77. 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
  78. 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
  79. };
  80. /*
  81. * bitsperbyte contains the number of bits per byte
  82. * this is only used for testing and repairing parity
  83. * (a precalculated value slightly improves performance)
  84. */
  85. static const char bitsperbyte[256] = {
  86. 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
  87. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  88. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  89. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  90. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  91. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  92. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  93. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  94. 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
  95. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  96. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  97. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  98. 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
  99. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  100. 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
  101. 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
  102. };
  103. /*
  104. * addressbits is a lookup table to filter out the bits from the xor-ed
  105. * ecc data that identify the faulty location.
  106. * this is only used for repairing parity
  107. * see the comments in nand_correct_data for more details
  108. */
  109. static const char addressbits[256] = {
  110. 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
  111. 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
  112. 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
  113. 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
  114. 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
  115. 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
  116. 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
  117. 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
  118. 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
  119. 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
  120. 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
  121. 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
  122. 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
  123. 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
  124. 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
  125. 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
  126. 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
  127. 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
  128. 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
  129. 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
  130. 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
  131. 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
  132. 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
  133. 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
  134. 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
  135. 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
  136. 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
  137. 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
  138. 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
  139. 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
  140. 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
  141. 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
  142. };
  143. /**
  144. * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
  145. * @mtd: MTD block structure (unused)
  146. * @dat: raw data
  147. * @ecc_code: buffer for ECC
  148. */
  149. int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
  150. unsigned char *code)
  151. {
  152. int i;
  153. const uint32_t *bp = (uint32_t *)buf;
  154. uint32_t cur; /* current value in buffer */
  155. /* rp0..rp15 are the various accumulated parities (per byte) */
  156. uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
  157. uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15;
  158. uint32_t par; /* the cumulative parity for all data */
  159. uint32_t tmppar; /* the cumulative parity for this iteration;
  160. for rp12 and rp14 at the end of the loop */
  161. par = 0;
  162. rp4 = 0;
  163. rp6 = 0;
  164. rp8 = 0;
  165. rp10 = 0;
  166. rp12 = 0;
  167. rp14 = 0;
  168. /*
  169. * The loop is unrolled a number of times;
  170. * This avoids if statements to decide on which rp value to update
  171. * Also we process the data by longwords.
  172. * Note: passing unaligned data might give a performance penalty.
  173. * It is assumed that the buffers are aligned.
  174. * tmppar is the cumulative sum of this iteration.
  175. * needed for calculating rp12, rp14 and par
  176. * also used as a performance improvement for rp6, rp8 and rp10
  177. */
  178. for (i = 0; i < 4; i++) {
  179. cur = *bp++;
  180. tmppar = cur;
  181. rp4 ^= cur;
  182. cur = *bp++;
  183. tmppar ^= cur;
  184. rp6 ^= tmppar;
  185. cur = *bp++;
  186. tmppar ^= cur;
  187. rp4 ^= cur;
  188. cur = *bp++;
  189. tmppar ^= cur;
  190. rp8 ^= tmppar;
  191. cur = *bp++;
  192. tmppar ^= cur;
  193. rp4 ^= cur;
  194. rp6 ^= cur;
  195. cur = *bp++;
  196. tmppar ^= cur;
  197. rp6 ^= cur;
  198. cur = *bp++;
  199. tmppar ^= cur;
  200. rp4 ^= cur;
  201. cur = *bp++;
  202. tmppar ^= cur;
  203. rp10 ^= tmppar;
  204. cur = *bp++;
  205. tmppar ^= cur;
  206. rp4 ^= cur;
  207. rp6 ^= cur;
  208. rp8 ^= cur;
  209. cur = *bp++;
  210. tmppar ^= cur;
  211. rp6 ^= cur;
  212. rp8 ^= cur;
  213. cur = *bp++;
  214. tmppar ^= cur;
  215. rp4 ^= cur;
  216. rp8 ^= cur;
  217. cur = *bp++;
  218. tmppar ^= cur;
  219. rp8 ^= cur;
  220. cur = *bp++;
  221. tmppar ^= cur;
  222. rp4 ^= cur;
  223. rp6 ^= cur;
  224. cur = *bp++;
  225. tmppar ^= cur;
  226. rp6 ^= cur;
  227. cur = *bp++;
  228. tmppar ^= cur;
  229. rp4 ^= cur;
  230. cur = *bp++;
  231. tmppar ^= cur;
  232. par ^= tmppar;
  233. if ((i & 0x1) == 0)
  234. rp12 ^= tmppar;
  235. if ((i & 0x2) == 0)
  236. rp14 ^= tmppar;
  237. }
  238. /*
  239. * handle the fact that we use longword operations
  240. * we'll bring rp4..rp14 back to single byte entities by shifting and
  241. * xoring first fold the upper and lower 16 bits,
  242. * then the upper and lower 8 bits.
  243. */
  244. rp4 ^= (rp4 >> 16);
  245. rp4 ^= (rp4 >> 8);
  246. rp4 &= 0xff;
  247. rp6 ^= (rp6 >> 16);
  248. rp6 ^= (rp6 >> 8);
  249. rp6 &= 0xff;
  250. rp8 ^= (rp8 >> 16);
  251. rp8 ^= (rp8 >> 8);
  252. rp8 &= 0xff;
  253. rp10 ^= (rp10 >> 16);
  254. rp10 ^= (rp10 >> 8);
  255. rp10 &= 0xff;
  256. rp12 ^= (rp12 >> 16);
  257. rp12 ^= (rp12 >> 8);
  258. rp12 &= 0xff;
  259. rp14 ^= (rp14 >> 16);
  260. rp14 ^= (rp14 >> 8);
  261. rp14 &= 0xff;
  262. /*
  263. * we also need to calculate the row parity for rp0..rp3
  264. * This is present in par, because par is now
  265. * rp3 rp3 rp2 rp2 in little endian and
  266. * rp2 rp2 rp3 rp3 in big endian
  267. * as well as
  268. * rp1 rp0 rp1 rp0 in little endian and
  269. * rp0 rp1 rp0 rp1 in big endian
  270. * First calculate rp2 and rp3
  271. */
  272. #ifdef __BIG_ENDIAN
  273. rp2 = (par >> 16);
  274. rp2 ^= (rp2 >> 8);
  275. rp2 &= 0xff;
  276. rp3 = par & 0xffff;
  277. rp3 ^= (rp3 >> 8);
  278. rp3 &= 0xff;
  279. #else
  280. rp3 = (par >> 16);
  281. rp3 ^= (rp3 >> 8);
  282. rp3 &= 0xff;
  283. rp2 = par & 0xffff;
  284. rp2 ^= (rp2 >> 8);
  285. rp2 &= 0xff;
  286. #endif
  287. /* reduce par to 16 bits then calculate rp1 and rp0 */
  288. par ^= (par >> 16);
  289. #ifdef __BIG_ENDIAN
  290. rp0 = (par >> 8) & 0xff;
  291. rp1 = (par & 0xff);
  292. #else
  293. rp1 = (par >> 8) & 0xff;
  294. rp0 = (par & 0xff);
  295. #endif
  296. /* finally reduce par to 8 bits */
  297. par ^= (par >> 8);
  298. par &= 0xff;
  299. /*
  300. * and calculate rp5..rp15
  301. * note that par = rp4 ^ rp5 and due to the commutative property
  302. * of the ^ operator we can say:
  303. * rp5 = (par ^ rp4);
  304. * The & 0xff seems superfluous, but benchmarking learned that
  305. * leaving it out gives slightly worse results. No idea why, probably
  306. * it has to do with the way the pipeline in pentium is organized.
  307. */
  308. rp5 = (par ^ rp4) & 0xff;
  309. rp7 = (par ^ rp6) & 0xff;
  310. rp9 = (par ^ rp8) & 0xff;
  311. rp11 = (par ^ rp10) & 0xff;
  312. rp13 = (par ^ rp12) & 0xff;
  313. rp15 = (par ^ rp14) & 0xff;
  314. /*
  315. * Finally calculate the ecc bits.
  316. * Again here it might seem that there are performance optimisations
  317. * possible, but benchmarks showed that on the system this is developed
  318. * the code below is the fastest
  319. */
  320. #ifdef CONFIG_MTD_NAND_ECC_SMC
  321. code[0] =
  322. (invparity[rp7] << 7) |
  323. (invparity[rp6] << 6) |
  324. (invparity[rp5] << 5) |
  325. (invparity[rp4] << 4) |
  326. (invparity[rp3] << 3) |
  327. (invparity[rp2] << 2) |
  328. (invparity[rp1] << 1) |
  329. (invparity[rp0]);
  330. code[1] =
  331. (invparity[rp15] << 7) |
  332. (invparity[rp14] << 6) |
  333. (invparity[rp13] << 5) |
  334. (invparity[rp12] << 4) |
  335. (invparity[rp11] << 3) |
  336. (invparity[rp10] << 2) |
  337. (invparity[rp9] << 1) |
  338. (invparity[rp8]);
  339. #else
  340. code[1] =
  341. (invparity[rp7] << 7) |
  342. (invparity[rp6] << 6) |
  343. (invparity[rp5] << 5) |
  344. (invparity[rp4] << 4) |
  345. (invparity[rp3] << 3) |
  346. (invparity[rp2] << 2) |
  347. (invparity[rp1] << 1) |
  348. (invparity[rp0]);
  349. code[0] =
  350. (invparity[rp15] << 7) |
  351. (invparity[rp14] << 6) |
  352. (invparity[rp13] << 5) |
  353. (invparity[rp12] << 4) |
  354. (invparity[rp11] << 3) |
  355. (invparity[rp10] << 2) |
  356. (invparity[rp9] << 1) |
  357. (invparity[rp8]);
  358. #endif
  359. code[2] =
  360. (invparity[par & 0xf0] << 7) |
  361. (invparity[par & 0x0f] << 6) |
  362. (invparity[par & 0xcc] << 5) |
  363. (invparity[par & 0x33] << 4) |
  364. (invparity[par & 0xaa] << 3) |
  365. (invparity[par & 0x55] << 2) |
  366. 3;
  367. return 0;
  368. }
  369. EXPORT_SYMBOL(nand_calculate_ecc);
  370. /**
  371. * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
  372. * @mtd: MTD block structure (unused)
  373. * @dat: raw data read from the chip
  374. * @read_ecc: ECC from the chip
  375. * @calc_ecc: the ECC calculated from raw data
  376. *
  377. * Detect and correct a 1 bit error for 256 byte block
  378. */
  379. int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
  380. unsigned char *read_ecc, unsigned char *calc_ecc)
  381. {
  382. unsigned char b0, b1, b2;
  383. unsigned char byte_addr, bit_addr;
  384. /*
  385. * b0 to b2 indicate which bit is faulty (if any)
  386. * we might need the xor result more than once,
  387. * so keep them in a local var
  388. */
  389. #ifdef CONFIG_MTD_NAND_ECC_SMC
  390. b0 = read_ecc[0] ^ calc_ecc[0];
  391. b1 = read_ecc[1] ^ calc_ecc[1];
  392. #else
  393. b0 = read_ecc[1] ^ calc_ecc[1];
  394. b1 = read_ecc[0] ^ calc_ecc[0];
  395. #endif
  396. b2 = read_ecc[2] ^ calc_ecc[2];
  397. /* check if there are any bitfaults */
  398. /* repeated if statements are slightly more efficient than switch ... */
  399. /* ordered in order of likelihood */
  400. if ((b0 | b1 | b2) == 0)
  401. return 0; /* no error */
  402. if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
  403. (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
  404. (((b2 ^ (b2 >> 1)) & 0x54) == 0x54)) { /* single bit error */
  405. /*
  406. * rp15/13/11/9/7/5/3/1 indicate which byte is the faulty byte
  407. * cp 5/3/1 indicate the faulty bit.
  408. * A lookup table (called addressbits) is used to filter
  409. * the bits from the byte they are in.
  410. * A marginal optimisation is possible by having three
  411. * different lookup tables.
  412. * One as we have now (for b0), one for b2
  413. * (that would avoid the >> 1), and one for b1 (with all values
  414. * << 4). However it was felt that introducing two more tables
  415. * hardly justify the gain.
  416. *
  417. * The b2 shift is there to get rid of the lowest two bits.
  418. * We could also do addressbits[b2] >> 1 but for the
  419. * performace it does not make any difference
  420. */
  421. byte_addr = (addressbits[b1] << 4) + addressbits[b0];
  422. bit_addr = addressbits[b2 >> 2];
  423. /* flip the bit */
  424. buf[byte_addr] ^= (1 << bit_addr);
  425. return 1;
  426. }
  427. /* count nr of bits; use table lookup, faster than calculating it */
  428. if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
  429. return 1; /* error in ecc data; no action needed */
  430. printk(KERN_ERR "uncorrectable error : ");
  431. return -1;
  432. }
  433. EXPORT_SYMBOL(nand_correct_data);
  434. MODULE_LICENSE("GPL");
  435. MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
  436. MODULE_DESCRIPTION("Generic NAND ECC support");