lld_nand.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright (c) 2009, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include "lld.h"
  20. #include "lld_nand.h"
  21. #include "lld_cdma.h"
  22. #include "spectraswconfig.h"
  23. #include "flash.h"
  24. #include "ffsdefs.h"
  25. #include <linux/interrupt.h>
  26. #include <linux/delay.h>
  27. #include <linux/wait.h>
  28. #include <linux/mutex.h>
  29. #include "nand_regs.h"
  30. #define SPECTRA_NAND_NAME "nd"
  31. #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
  32. #define MAX_PAGES_PER_RW 128
  33. #define INT_IDLE_STATE 0
  34. #define INT_READ_PAGE_MAIN 0x01
  35. #define INT_WRITE_PAGE_MAIN 0x02
  36. #define INT_PIPELINE_READ_AHEAD 0x04
  37. #define INT_PIPELINE_WRITE_AHEAD 0x08
  38. #define INT_MULTI_PLANE_READ 0x10
  39. #define INT_MULTI_PLANE_WRITE 0x11
  40. static u32 enable_ecc;
  41. struct mrst_nand_info info;
  42. int totalUsedBanks;
  43. u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
  44. void __iomem *FlashReg;
  45. void __iomem *FlashMem;
  46. u16 conf_parameters[] = {
  47. 0x0000,
  48. 0x0000,
  49. 0x01F4,
  50. 0x01F4,
  51. 0x01F4,
  52. 0x01F4,
  53. 0x0000,
  54. 0x0000,
  55. 0x0001,
  56. 0x0000,
  57. 0x0000,
  58. 0x0000,
  59. 0x0000,
  60. 0x0040,
  61. 0x0001,
  62. 0x000A,
  63. 0x000A,
  64. 0x000A,
  65. 0x0000,
  66. 0x0000,
  67. 0x0005,
  68. 0x0012,
  69. 0x000C
  70. };
  71. u16 NAND_Get_Bad_Block(u32 block)
  72. {
  73. u32 status = PASS;
  74. u32 flag_bytes = 0;
  75. u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
  76. u32 page, i;
  77. u8 *pReadSpareBuf = buf_get_bad_block;
  78. if (enable_ecc)
  79. flag_bytes = DeviceInfo.wNumPageSpareFlag;
  80. for (page = 0; page < 2; page++) {
  81. status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
  82. if (status != PASS)
  83. return READ_ERROR;
  84. for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
  85. if (pReadSpareBuf[i] != 0xff)
  86. return DEFECTIVE_BLOCK;
  87. }
  88. for (page = 1; page < 3; page++) {
  89. status = NAND_Read_Page_Spare(pReadSpareBuf, block,
  90. DeviceInfo.wPagesPerBlock - page , 1);
  91. if (status != PASS)
  92. return READ_ERROR;
  93. for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
  94. if (pReadSpareBuf[i] != 0xff)
  95. return DEFECTIVE_BLOCK;
  96. }
  97. return GOOD_BLOCK;
  98. }
  99. u16 NAND_Flash_Reset(void)
  100. {
  101. u32 i;
  102. u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
  103. INTR_STATUS1__RST_COMP,
  104. INTR_STATUS2__RST_COMP,
  105. INTR_STATUS3__RST_COMP};
  106. u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
  107. INTR_STATUS1__TIME_OUT,
  108. INTR_STATUS2__TIME_OUT,
  109. INTR_STATUS3__TIME_OUT};
  110. u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
  111. INTR_STATUS2, INTR_STATUS3};
  112. u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
  113. DEVICE_RESET__BANK1,
  114. DEVICE_RESET__BANK2,
  115. DEVICE_RESET__BANK3};
  116. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  117. __FILE__, __LINE__, __func__);
  118. for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
  119. iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
  120. FlashReg + intr_status[i]);
  121. for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
  122. iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
  123. while (!(ioread32(FlashReg + intr_status[i]) &
  124. (intr_status_rst_comp[i] | intr_status_time_out[i])))
  125. ;
  126. if (ioread32(FlashReg + intr_status[i]) &
  127. intr_status_time_out[i])
  128. nand_dbg_print(NAND_DBG_WARN,
  129. "NAND Reset operation timed out on bank %d\n", i);
  130. }
  131. for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
  132. iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
  133. FlashReg + intr_status[i]);
  134. return PASS;
  135. }
  136. static void NAND_ONFi_Timing_Mode(u16 mode)
  137. {
  138. u16 Trea[6] = {40, 30, 25, 20, 20, 16};
  139. u16 Trp[6] = {50, 25, 17, 15, 12, 10};
  140. u16 Treh[6] = {30, 15, 15, 10, 10, 7};
  141. u16 Trc[6] = {100, 50, 35, 30, 25, 20};
  142. u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
  143. u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
  144. u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
  145. u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
  146. u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
  147. u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
  148. u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
  149. u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
  150. u16 TclsRising = 1;
  151. u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
  152. u16 dv_window = 0;
  153. u16 en_lo, en_hi;
  154. u16 acc_clks;
  155. u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
  156. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  157. __FILE__, __LINE__, __func__);
  158. en_lo = CEIL_DIV(Trp[mode], CLK_X);
  159. en_hi = CEIL_DIV(Treh[mode], CLK_X);
  160. #if ONFI_BLOOM_TIME
  161. if ((en_hi * CLK_X) < (Treh[mode] + 2))
  162. en_hi++;
  163. #endif
  164. if ((en_lo + en_hi) * CLK_X < Trc[mode])
  165. en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
  166. if ((en_lo + en_hi) < CLK_MULTI)
  167. en_lo += CLK_MULTI - en_lo - en_hi;
  168. while (dv_window < 8) {
  169. data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
  170. data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
  171. data_invalid =
  172. data_invalid_rhoh <
  173. data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
  174. dv_window = data_invalid - Trea[mode];
  175. if (dv_window < 8)
  176. en_lo++;
  177. }
  178. acc_clks = CEIL_DIV(Trea[mode], CLK_X);
  179. while (((acc_clks * CLK_X) - Trea[mode]) < 3)
  180. acc_clks++;
  181. if ((data_invalid - acc_clks * CLK_X) < 2)
  182. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
  183. __FILE__, __LINE__);
  184. addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
  185. re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
  186. re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
  187. we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
  188. cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
  189. if (!TclsRising)
  190. cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
  191. if (cs_cnt == 0)
  192. cs_cnt = 1;
  193. if (Tcea[mode]) {
  194. while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
  195. cs_cnt++;
  196. }
  197. #if MODE5_WORKAROUND
  198. if (mode == 5)
  199. acc_clks = 5;
  200. #endif
  201. /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
  202. if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
  203. (ioread32(FlashReg + DEVICE_ID) == 0x88))
  204. acc_clks = 6;
  205. iowrite32(acc_clks, FlashReg + ACC_CLKS);
  206. iowrite32(re_2_we, FlashReg + RE_2_WE);
  207. iowrite32(re_2_re, FlashReg + RE_2_RE);
  208. iowrite32(we_2_re, FlashReg + WE_2_RE);
  209. iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
  210. iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
  211. iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
  212. iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
  213. }
  214. static void index_addr(u32 address, u32 data)
  215. {
  216. iowrite32(address, FlashMem);
  217. iowrite32(data, FlashMem + 0x10);
  218. }
  219. static void index_addr_read_data(u32 address, u32 *pdata)
  220. {
  221. iowrite32(address, FlashMem);
  222. *pdata = ioread32(FlashMem + 0x10);
  223. }
  224. static void set_ecc_config(void)
  225. {
  226. #if SUPPORT_8BITECC
  227. if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
  228. (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
  229. iowrite32(8, FlashReg + ECC_CORRECTION);
  230. #endif
  231. if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
  232. == 1) {
  233. DeviceInfo.wECCBytesPerSector = 4;
  234. DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
  235. DeviceInfo.wNumPageSpareFlag =
  236. DeviceInfo.wPageSpareSize -
  237. DeviceInfo.wPageDataSize /
  238. (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
  239. DeviceInfo.wECCBytesPerSector
  240. - DeviceInfo.wSpareSkipBytes;
  241. } else {
  242. DeviceInfo.wECCBytesPerSector =
  243. (ioread32(FlashReg + ECC_CORRECTION) &
  244. ECC_CORRECTION__VALUE) * 13 / 8;
  245. if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
  246. DeviceInfo.wECCBytesPerSector += 2;
  247. else
  248. DeviceInfo.wECCBytesPerSector += 1;
  249. DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
  250. DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
  251. DeviceInfo.wPageDataSize /
  252. (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
  253. DeviceInfo.wECCBytesPerSector
  254. - DeviceInfo.wSpareSkipBytes;
  255. }
  256. }
  257. static u16 get_onfi_nand_para(void)
  258. {
  259. int i;
  260. u16 blks_lun_l, blks_lun_h, n_of_luns;
  261. u32 blockperlun, id;
  262. iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
  263. while (!((ioread32(FlashReg + INTR_STATUS0) &
  264. INTR_STATUS0__RST_COMP) |
  265. (ioread32(FlashReg + INTR_STATUS0) &
  266. INTR_STATUS0__TIME_OUT)))
  267. ;
  268. if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
  269. iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
  270. while (!((ioread32(FlashReg + INTR_STATUS1) &
  271. INTR_STATUS1__RST_COMP) |
  272. (ioread32(FlashReg + INTR_STATUS1) &
  273. INTR_STATUS1__TIME_OUT)))
  274. ;
  275. if (ioread32(FlashReg + INTR_STATUS1) &
  276. INTR_STATUS1__RST_COMP) {
  277. iowrite32(DEVICE_RESET__BANK2,
  278. FlashReg + DEVICE_RESET);
  279. while (!((ioread32(FlashReg + INTR_STATUS2) &
  280. INTR_STATUS2__RST_COMP) |
  281. (ioread32(FlashReg + INTR_STATUS2) &
  282. INTR_STATUS2__TIME_OUT)))
  283. ;
  284. if (ioread32(FlashReg + INTR_STATUS2) &
  285. INTR_STATUS2__RST_COMP) {
  286. iowrite32(DEVICE_RESET__BANK3,
  287. FlashReg + DEVICE_RESET);
  288. while (!((ioread32(FlashReg + INTR_STATUS3) &
  289. INTR_STATUS3__RST_COMP) |
  290. (ioread32(FlashReg + INTR_STATUS3) &
  291. INTR_STATUS3__TIME_OUT)))
  292. ;
  293. } else {
  294. printk(KERN_ERR "Getting a time out for bank 2!\n");
  295. }
  296. } else {
  297. printk(KERN_ERR "Getting a time out for bank 1!\n");
  298. }
  299. }
  300. iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
  301. iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
  302. iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
  303. iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
  304. DeviceInfo.wONFIDevFeatures =
  305. ioread32(FlashReg + ONFI_DEVICE_FEATURES);
  306. DeviceInfo.wONFIOptCommands =
  307. ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
  308. DeviceInfo.wONFITimingMode =
  309. ioread32(FlashReg + ONFI_TIMING_MODE);
  310. DeviceInfo.wONFIPgmCacheTimingMode =
  311. ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
  312. n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
  313. ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
  314. blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
  315. blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
  316. blockperlun = (blks_lun_h << 16) | blks_lun_l;
  317. DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
  318. if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
  319. ONFI_TIMING_MODE__VALUE))
  320. return FAIL;
  321. for (i = 5; i > 0; i--) {
  322. if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
  323. break;
  324. }
  325. NAND_ONFi_Timing_Mode(i);
  326. index_addr(MODE_11 | 0, 0x90);
  327. index_addr(MODE_11 | 1, 0);
  328. for (i = 0; i < 3; i++)
  329. index_addr_read_data(MODE_11 | 2, &id);
  330. nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
  331. DeviceInfo.MLCDevice = id & 0x0C;
  332. /* By now, all the ONFI devices we know support the page cache */
  333. /* rw feature. So here we enable the pipeline_rw_ahead feature */
  334. /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
  335. /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
  336. return PASS;
  337. }
  338. static void get_samsung_nand_para(void)
  339. {
  340. u8 no_of_planes;
  341. u32 blk_size;
  342. u64 plane_size, capacity;
  343. u32 id_bytes[5];
  344. int i;
  345. index_addr((u32)(MODE_11 | 0), 0x90);
  346. index_addr((u32)(MODE_11 | 1), 0);
  347. for (i = 0; i < 5; i++)
  348. index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
  349. nand_dbg_print(NAND_DBG_DEBUG,
  350. "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
  351. id_bytes[0], id_bytes[1], id_bytes[2],
  352. id_bytes[3], id_bytes[4]);
  353. if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
  354. /* Set timing register values according to datasheet */
  355. iowrite32(5, FlashReg + ACC_CLKS);
  356. iowrite32(20, FlashReg + RE_2_WE);
  357. iowrite32(12, FlashReg + WE_2_RE);
  358. iowrite32(14, FlashReg + ADDR_2_DATA);
  359. iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
  360. iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
  361. iowrite32(2, FlashReg + CS_SETUP_CNT);
  362. }
  363. no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
  364. plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
  365. blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
  366. capacity = (u64)128 * plane_size * no_of_planes;
  367. DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
  368. }
  369. static void get_toshiba_nand_para(void)
  370. {
  371. void __iomem *scratch_reg;
  372. u32 tmp;
  373. /* Workaround to fix a controller bug which reports a wrong */
  374. /* spare area size for some kind of Toshiba NAND device */
  375. if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
  376. (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
  377. iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
  378. tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
  379. ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
  380. iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
  381. #if SUPPORT_15BITECC
  382. iowrite32(15, FlashReg + ECC_CORRECTION);
  383. #elif SUPPORT_8BITECC
  384. iowrite32(8, FlashReg + ECC_CORRECTION);
  385. #endif
  386. }
  387. /* As Toshiba NAND can not provide it's block number, */
  388. /* so here we need user to provide the correct block */
  389. /* number in a scratch register before the Linux NAND */
  390. /* driver is loaded. If no valid value found in the scratch */
  391. /* register, then we use default block number value */
  392. scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
  393. if (!scratch_reg) {
  394. printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
  395. __FILE__, __LINE__);
  396. DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  397. } else {
  398. nand_dbg_print(NAND_DBG_WARN,
  399. "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
  400. DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
  401. if (DeviceInfo.wTotalBlocks < 512)
  402. DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  403. iounmap(scratch_reg);
  404. }
  405. }
  406. static void get_hynix_nand_para(void)
  407. {
  408. void __iomem *scratch_reg;
  409. u32 main_size, spare_size;
  410. switch (DeviceInfo.wDeviceID) {
  411. case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
  412. case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
  413. iowrite32(128, FlashReg + PAGES_PER_BLOCK);
  414. iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
  415. iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
  416. main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
  417. spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
  418. iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
  419. iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
  420. iowrite32(0, FlashReg + DEVICE_WIDTH);
  421. #if SUPPORT_15BITECC
  422. iowrite32(15, FlashReg + ECC_CORRECTION);
  423. #elif SUPPORT_8BITECC
  424. iowrite32(8, FlashReg + ECC_CORRECTION);
  425. #endif
  426. DeviceInfo.MLCDevice = 1;
  427. break;
  428. default:
  429. nand_dbg_print(NAND_DBG_WARN,
  430. "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
  431. "Will use default parameter values instead.\n",
  432. DeviceInfo.wDeviceID);
  433. }
  434. scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
  435. if (!scratch_reg) {
  436. printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
  437. __FILE__, __LINE__);
  438. DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  439. } else {
  440. nand_dbg_print(NAND_DBG_WARN,
  441. "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
  442. DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
  443. if (DeviceInfo.wTotalBlocks < 512)
  444. DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  445. iounmap(scratch_reg);
  446. }
  447. }
  448. static void find_valid_banks(void)
  449. {
  450. u32 id[LLD_MAX_FLASH_BANKS];
  451. int i;
  452. totalUsedBanks = 0;
  453. for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
  454. index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
  455. index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
  456. index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
  457. nand_dbg_print(NAND_DBG_DEBUG,
  458. "Return 1st ID for bank[%d]: %x\n", i, id[i]);
  459. if (i == 0) {
  460. if (id[i] & 0x0ff)
  461. GLOB_valid_banks[i] = 1;
  462. } else {
  463. if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
  464. GLOB_valid_banks[i] = 1;
  465. }
  466. totalUsedBanks += GLOB_valid_banks[i];
  467. }
  468. nand_dbg_print(NAND_DBG_DEBUG,
  469. "totalUsedBanks: %d\n", totalUsedBanks);
  470. }
  471. static void detect_partition_feature(void)
  472. {
  473. if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
  474. if ((ioread32(FlashReg + PERM_SRC_ID_1) &
  475. PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
  476. DeviceInfo.wSpectraStartBlock =
  477. ((ioread32(FlashReg + MIN_MAX_BANK_1) &
  478. MIN_MAX_BANK_1__MIN_VALUE) *
  479. DeviceInfo.wTotalBlocks)
  480. +
  481. (ioread32(FlashReg + MIN_BLK_ADDR_1) &
  482. MIN_BLK_ADDR_1__VALUE);
  483. DeviceInfo.wSpectraEndBlock =
  484. (((ioread32(FlashReg + MIN_MAX_BANK_1) &
  485. MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
  486. DeviceInfo.wTotalBlocks)
  487. +
  488. (ioread32(FlashReg + MAX_BLK_ADDR_1) &
  489. MAX_BLK_ADDR_1__VALUE);
  490. DeviceInfo.wTotalBlocks *= totalUsedBanks;
  491. if (DeviceInfo.wSpectraEndBlock >=
  492. DeviceInfo.wTotalBlocks) {
  493. DeviceInfo.wSpectraEndBlock =
  494. DeviceInfo.wTotalBlocks - 1;
  495. }
  496. DeviceInfo.wDataBlockNum =
  497. DeviceInfo.wSpectraEndBlock -
  498. DeviceInfo.wSpectraStartBlock + 1;
  499. } else {
  500. DeviceInfo.wTotalBlocks *= totalUsedBanks;
  501. DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
  502. DeviceInfo.wSpectraEndBlock =
  503. DeviceInfo.wTotalBlocks - 1;
  504. DeviceInfo.wDataBlockNum =
  505. DeviceInfo.wSpectraEndBlock -
  506. DeviceInfo.wSpectraStartBlock + 1;
  507. }
  508. } else {
  509. DeviceInfo.wTotalBlocks *= totalUsedBanks;
  510. DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
  511. DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
  512. DeviceInfo.wDataBlockNum =
  513. DeviceInfo.wSpectraEndBlock -
  514. DeviceInfo.wSpectraStartBlock + 1;
  515. }
  516. }
  517. static void dump_device_info(void)
  518. {
  519. nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
  520. nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
  521. DeviceInfo.wDeviceMaker);
  522. nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
  523. DeviceInfo.wDeviceID);
  524. nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
  525. DeviceInfo.wDeviceType);
  526. nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
  527. DeviceInfo.wSpectraStartBlock);
  528. nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
  529. DeviceInfo.wSpectraEndBlock);
  530. nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
  531. DeviceInfo.wTotalBlocks);
  532. nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
  533. DeviceInfo.wPagesPerBlock);
  534. nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
  535. DeviceInfo.wPageSize);
  536. nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
  537. DeviceInfo.wPageDataSize);
  538. nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
  539. DeviceInfo.wPageSpareSize);
  540. nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
  541. DeviceInfo.wNumPageSpareFlag);
  542. nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
  543. DeviceInfo.wECCBytesPerSector);
  544. nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
  545. DeviceInfo.wBlockSize);
  546. nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
  547. DeviceInfo.wBlockDataSize);
  548. nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
  549. DeviceInfo.wDataBlockNum);
  550. nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
  551. DeviceInfo.bPlaneNum);
  552. nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
  553. DeviceInfo.wDeviceMainAreaSize);
  554. nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
  555. DeviceInfo.wDeviceSpareAreaSize);
  556. nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
  557. DeviceInfo.wDevicesConnected);
  558. nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
  559. DeviceInfo.wDeviceWidth);
  560. nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
  561. DeviceInfo.wHWRevision);
  562. nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
  563. DeviceInfo.wHWFeatures);
  564. nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
  565. DeviceInfo.wONFIDevFeatures);
  566. nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
  567. DeviceInfo.wONFIOptCommands);
  568. nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
  569. DeviceInfo.wONFITimingMode);
  570. nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
  571. DeviceInfo.wONFIPgmCacheTimingMode);
  572. nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
  573. DeviceInfo.MLCDevice ? "Yes" : "No");
  574. nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
  575. DeviceInfo.wSpareSkipBytes);
  576. nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
  577. DeviceInfo.nBitsInPageNumber);
  578. nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
  579. DeviceInfo.nBitsInPageDataSize);
  580. nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
  581. DeviceInfo.nBitsInBlockDataSize);
  582. }
  583. u16 NAND_Read_Device_ID(void)
  584. {
  585. u16 status = PASS;
  586. u8 no_of_planes;
  587. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  588. __FILE__, __LINE__, __func__);
  589. iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
  590. iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
  591. DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
  592. DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
  593. DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
  594. if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
  595. ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
  596. if (FAIL == get_onfi_nand_para())
  597. return FAIL;
  598. } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
  599. get_samsung_nand_para();
  600. } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
  601. get_toshiba_nand_para();
  602. } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
  603. get_hynix_nand_para();
  604. } else {
  605. DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  606. }
  607. nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
  608. "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
  609. "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
  610. "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
  611. ioread32(FlashReg + ACC_CLKS),
  612. ioread32(FlashReg + RE_2_WE),
  613. ioread32(FlashReg + WE_2_RE),
  614. ioread32(FlashReg + ADDR_2_DATA),
  615. ioread32(FlashReg + RDWR_EN_LO_CNT),
  616. ioread32(FlashReg + RDWR_EN_HI_CNT),
  617. ioread32(FlashReg + CS_SETUP_CNT));
  618. DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
  619. DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
  620. DeviceInfo.wDeviceMainAreaSize =
  621. ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
  622. DeviceInfo.wDeviceSpareAreaSize =
  623. ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
  624. DeviceInfo.wPageDataSize =
  625. ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
  626. /* Note: When using the Micon 4K NAND device, the controller will report
  627. * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
  628. * And if force set it to 218 bytes, the controller can not work
  629. * correctly. So just let it be. But keep in mind that this bug may
  630. * cause
  631. * other problems in future. - Yunpeng 2008-10-10
  632. */
  633. DeviceInfo.wPageSpareSize =
  634. ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
  635. DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
  636. DeviceInfo.wPageSize =
  637. DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
  638. DeviceInfo.wBlockSize =
  639. DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
  640. DeviceInfo.wBlockDataSize =
  641. DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
  642. DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
  643. DeviceInfo.wDeviceType =
  644. ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
  645. DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
  646. DeviceInfo.wSpareSkipBytes =
  647. ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
  648. DeviceInfo.wDevicesConnected;
  649. DeviceInfo.nBitsInPageNumber =
  650. (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
  651. DeviceInfo.nBitsInPageDataSize =
  652. (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
  653. DeviceInfo.nBitsInBlockDataSize =
  654. (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
  655. set_ecc_config();
  656. no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
  657. NUMBER_OF_PLANES__VALUE;
  658. switch (no_of_planes) {
  659. case 0:
  660. case 1:
  661. case 3:
  662. case 7:
  663. DeviceInfo.bPlaneNum = no_of_planes + 1;
  664. break;
  665. default:
  666. status = FAIL;
  667. break;
  668. }
  669. find_valid_banks();
  670. detect_partition_feature();
  671. dump_device_info();
  672. return status;
  673. }
  674. u16 NAND_UnlockArrayAll(void)
  675. {
  676. u64 start_addr, end_addr;
  677. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  678. __FILE__, __LINE__, __func__);
  679. start_addr = 0;
  680. end_addr = ((u64)DeviceInfo.wBlockSize *
  681. (DeviceInfo.wTotalBlocks - 1)) >>
  682. DeviceInfo.nBitsInPageDataSize;
  683. index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
  684. index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
  685. return PASS;
  686. }
  687. void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
  688. {
  689. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  690. __FILE__, __LINE__, __func__);
  691. if (INT_ENABLE)
  692. iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
  693. else
  694. iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
  695. }
  696. u16 NAND_Erase_Block(u32 block)
  697. {
  698. u16 status = PASS;
  699. u64 flash_add;
  700. u16 flash_bank;
  701. u32 intr_status = 0;
  702. u32 intr_status_addresses[4] = {INTR_STATUS0,
  703. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  704. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  705. __FILE__, __LINE__, __func__);
  706. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  707. * DeviceInfo.wBlockDataSize;
  708. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  709. if (block >= DeviceInfo.wTotalBlocks)
  710. status = FAIL;
  711. if (status == PASS) {
  712. intr_status = intr_status_addresses[flash_bank];
  713. iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
  714. FlashReg + intr_status);
  715. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  716. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
  717. while (!(ioread32(FlashReg + intr_status) &
  718. (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
  719. ;
  720. if (ioread32(FlashReg + intr_status) &
  721. INTR_STATUS0__ERASE_FAIL)
  722. status = FAIL;
  723. iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
  724. FlashReg + intr_status);
  725. }
  726. return status;
  727. }
  728. static u32 Boundary_Check_Block_Page(u32 block, u16 page,
  729. u16 page_count)
  730. {
  731. u32 status = PASS;
  732. if (block >= DeviceInfo.wTotalBlocks)
  733. status = FAIL;
  734. if (page + page_count > DeviceInfo.wPagesPerBlock)
  735. status = FAIL;
  736. return status;
  737. }
  738. u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
  739. u16 page_count)
  740. {
  741. u32 status = PASS;
  742. u32 i;
  743. u64 flash_add;
  744. u32 PageSpareSize = DeviceInfo.wPageSpareSize;
  745. u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  746. u32 flash_bank;
  747. u32 intr_status = 0;
  748. u32 intr_status_addresses[4] = {INTR_STATUS0,
  749. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  750. u8 *page_spare = buf_read_page_spare;
  751. if (block >= DeviceInfo.wTotalBlocks) {
  752. printk(KERN_ERR "block too big: %d\n", (int)block);
  753. status = FAIL;
  754. }
  755. if (page >= DeviceInfo.wPagesPerBlock) {
  756. printk(KERN_ERR "page too big: %d\n", page);
  757. status = FAIL;
  758. }
  759. if (page_count > 1) {
  760. printk(KERN_ERR "page count too big: %d\n", page_count);
  761. status = FAIL;
  762. }
  763. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  764. * DeviceInfo.wBlockDataSize +
  765. (u64)page * DeviceInfo.wPageDataSize;
  766. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  767. if (status == PASS) {
  768. intr_status = intr_status_addresses[flash_bank];
  769. iowrite32(ioread32(FlashReg + intr_status),
  770. FlashReg + intr_status);
  771. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  772. (flash_add >> DeviceInfo.nBitsInPageDataSize)),
  773. 0x41);
  774. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  775. (flash_add >> DeviceInfo.nBitsInPageDataSize)),
  776. 0x2000 | page_count);
  777. while (!(ioread32(FlashReg + intr_status) &
  778. INTR_STATUS0__LOAD_COMP))
  779. ;
  780. iowrite32((u32)(MODE_01 | (flash_bank << 24) |
  781. (flash_add >> DeviceInfo.nBitsInPageDataSize)),
  782. FlashMem);
  783. for (i = 0; i < (PageSpareSize / 4); i++)
  784. *((u32 *)page_spare + i) =
  785. ioread32(FlashMem + 0x10);
  786. if (enable_ecc) {
  787. for (i = 0; i < spareFlagBytes; i++)
  788. read_data[i] =
  789. page_spare[PageSpareSize -
  790. spareFlagBytes + i];
  791. for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
  792. read_data[spareFlagBytes + i] =
  793. page_spare[i];
  794. } else {
  795. for (i = 0; i < PageSpareSize; i++)
  796. read_data[i] = page_spare[i];
  797. }
  798. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  799. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  800. }
  801. return status;
  802. }
  803. /* No use function. Should be removed later */
  804. u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
  805. u16 page_count)
  806. {
  807. printk(KERN_ERR
  808. "Error! This function (NAND_Write_Page_Spare) should never"
  809. " be called!\n");
  810. return ERR;
  811. }
  812. /* op value: 0 - DDMA read; 1 - DDMA write */
  813. static void ddma_trans(u8 *data, u64 flash_add,
  814. u32 flash_bank, int op, u32 numPages)
  815. {
  816. u32 data_addr;
  817. /* Map virtual address to bus address for DDMA */
  818. data_addr = virt_to_bus(data);
  819. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  820. (flash_add >> DeviceInfo.nBitsInPageDataSize)),
  821. (u16)(2 << 12) | (op << 8) | numPages);
  822. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  823. ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
  824. (u16)(2 << 12) | (2 << 8) | 0);
  825. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  826. ((u16)(0x0FFFF & data_addr) << 8)),
  827. (u16)(2 << 12) | (3 << 8) | 0);
  828. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  829. (1 << 16) | (0x40 << 8)),
  830. (u16)(2 << 12) | (4 << 8) | 0);
  831. }
  832. /* If data in buf are all 0xff, then return 1; otherwise return 0 */
  833. static int check_all_1(u8 *buf)
  834. {
  835. int i, j, cnt;
  836. for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
  837. if (buf[i] != 0xff) {
  838. cnt = 0;
  839. nand_dbg_print(NAND_DBG_WARN,
  840. "the first non-0xff data byte is: %d\n", i);
  841. for (j = i; j < DeviceInfo.wPageDataSize; j++) {
  842. nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
  843. cnt++;
  844. if (cnt > 8)
  845. break;
  846. }
  847. nand_dbg_print(NAND_DBG_WARN, "\n");
  848. return 0;
  849. }
  850. }
  851. return 1;
  852. }
  853. static int do_ecc_new(unsigned long bank, u8 *buf,
  854. u32 block, u16 page)
  855. {
  856. int status = PASS;
  857. u16 err_page = 0;
  858. u16 err_byte;
  859. u8 err_sect;
  860. u8 err_dev;
  861. u16 err_fix_info;
  862. u16 err_addr;
  863. u32 ecc_sect_size;
  864. u8 *err_pos;
  865. u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
  866. ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
  867. ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
  868. do {
  869. err_page = ioread32(FlashReg + err_page_addr[bank]);
  870. err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
  871. err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
  872. err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
  873. err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
  874. err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
  875. >> 8);
  876. if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
  877. nand_dbg_print(NAND_DBG_WARN,
  878. "%s, Line %d Uncorrectable ECC error "
  879. "when read block %d page %d."
  880. "PTN_INTR register: 0x%x "
  881. "err_page: %d, err_sect: %d, err_byte: %d, "
  882. "err_dev: %d, ecc_sect_size: %d, "
  883. "err_fix_info: 0x%x\n",
  884. __FILE__, __LINE__, block, page,
  885. ioread32(FlashReg + PTN_INTR),
  886. err_page, err_sect, err_byte, err_dev,
  887. ecc_sect_size, (u32)err_fix_info);
  888. if (check_all_1(buf))
  889. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
  890. "All 0xff!\n",
  891. __FILE__, __LINE__);
  892. else
  893. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
  894. "Not all 0xff!\n",
  895. __FILE__, __LINE__);
  896. status = FAIL;
  897. } else {
  898. nand_dbg_print(NAND_DBG_WARN,
  899. "%s, Line %d Found ECC error "
  900. "when read block %d page %d."
  901. "err_page: %d, err_sect: %d, err_byte: %d, "
  902. "err_dev: %d, ecc_sect_size: %d, "
  903. "err_fix_info: 0x%x\n",
  904. __FILE__, __LINE__, block, page,
  905. err_page, err_sect, err_byte, err_dev,
  906. ecc_sect_size, (u32)err_fix_info);
  907. if (err_byte < ECC_SECTOR_SIZE) {
  908. err_pos = buf +
  909. (err_page - page) *
  910. DeviceInfo.wPageDataSize +
  911. err_sect * ecc_sect_size +
  912. err_byte *
  913. DeviceInfo.wDevicesConnected +
  914. err_dev;
  915. *err_pos ^= err_fix_info &
  916. ERR_CORRECTION_INFO__BYTEMASK;
  917. }
  918. }
  919. } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
  920. return status;
  921. }
  922. u16 NAND_Read_Page_Main_Polling(u8 *read_data,
  923. u32 block, u16 page, u16 page_count)
  924. {
  925. u32 status = PASS;
  926. u64 flash_add;
  927. u32 intr_status = 0;
  928. u32 flash_bank;
  929. u32 intr_status_addresses[4] = {INTR_STATUS0,
  930. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  931. u8 *read_data_l;
  932. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  933. __FILE__, __LINE__, __func__);
  934. status = Boundary_Check_Block_Page(block, page, page_count);
  935. if (status != PASS)
  936. return status;
  937. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  938. * DeviceInfo.wBlockDataSize +
  939. (u64)page * DeviceInfo.wPageDataSize;
  940. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  941. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  942. intr_status = intr_status_addresses[flash_bank];
  943. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  944. if (page_count > 1) {
  945. read_data_l = read_data;
  946. while (page_count > MAX_PAGES_PER_RW) {
  947. if (ioread32(FlashReg + MULTIPLANE_OPERATION))
  948. status = NAND_Multiplane_Read(read_data_l,
  949. block, page, MAX_PAGES_PER_RW);
  950. else
  951. status = NAND_Pipeline_Read_Ahead_Polling(
  952. read_data_l, block, page,
  953. MAX_PAGES_PER_RW);
  954. if (status == FAIL)
  955. return status;
  956. read_data_l += DeviceInfo.wPageDataSize *
  957. MAX_PAGES_PER_RW;
  958. page_count -= MAX_PAGES_PER_RW;
  959. page += MAX_PAGES_PER_RW;
  960. }
  961. if (ioread32(FlashReg + MULTIPLANE_OPERATION))
  962. status = NAND_Multiplane_Read(read_data_l,
  963. block, page, page_count);
  964. else
  965. status = NAND_Pipeline_Read_Ahead_Polling(
  966. read_data_l, block, page, page_count);
  967. return status;
  968. }
  969. iowrite32(1, FlashReg + DMA_ENABLE);
  970. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  971. ;
  972. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  973. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  974. ddma_trans(read_data, flash_add, flash_bank, 0, 1);
  975. if (enable_ecc) {
  976. while (!(ioread32(FlashReg + intr_status) &
  977. (INTR_STATUS0__ECC_TRANSACTION_DONE |
  978. INTR_STATUS0__ECC_ERR)))
  979. ;
  980. if (ioread32(FlashReg + intr_status) &
  981. INTR_STATUS0__ECC_ERR) {
  982. iowrite32(INTR_STATUS0__ECC_ERR,
  983. FlashReg + intr_status);
  984. status = do_ecc_new(flash_bank, read_data,
  985. block, page);
  986. }
  987. if (ioread32(FlashReg + intr_status) &
  988. INTR_STATUS0__ECC_TRANSACTION_DONE &
  989. INTR_STATUS0__ECC_ERR)
  990. iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
  991. INTR_STATUS0__ECC_ERR,
  992. FlashReg + intr_status);
  993. else if (ioread32(FlashReg + intr_status) &
  994. INTR_STATUS0__ECC_TRANSACTION_DONE)
  995. iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
  996. FlashReg + intr_status);
  997. else if (ioread32(FlashReg + intr_status) &
  998. INTR_STATUS0__ECC_ERR)
  999. iowrite32(INTR_STATUS0__ECC_ERR,
  1000. FlashReg + intr_status);
  1001. } else {
  1002. while (!(ioread32(FlashReg + intr_status) &
  1003. INTR_STATUS0__DMA_CMD_COMP))
  1004. ;
  1005. iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
  1006. }
  1007. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1008. iowrite32(0, FlashReg + DMA_ENABLE);
  1009. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1010. ;
  1011. return status;
  1012. }
  1013. u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
  1014. u32 block, u16 page, u16 page_count)
  1015. {
  1016. u32 status = PASS;
  1017. u32 NumPages = page_count;
  1018. u64 flash_add;
  1019. u32 flash_bank;
  1020. u32 intr_status = 0;
  1021. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1022. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1023. u32 ecc_done_OR_dma_comp;
  1024. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1025. __FILE__, __LINE__, __func__);
  1026. status = Boundary_Check_Block_Page(block, page, page_count);
  1027. if (page_count < 2)
  1028. status = FAIL;
  1029. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1030. *DeviceInfo.wBlockDataSize +
  1031. (u64)page * DeviceInfo.wPageDataSize;
  1032. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1033. if (status == PASS) {
  1034. intr_status = intr_status_addresses[flash_bank];
  1035. iowrite32(ioread32(FlashReg + intr_status),
  1036. FlashReg + intr_status);
  1037. iowrite32(1, FlashReg + DMA_ENABLE);
  1038. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1039. ;
  1040. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1041. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1042. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  1043. ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
  1044. ecc_done_OR_dma_comp = 0;
  1045. while (1) {
  1046. if (enable_ecc) {
  1047. while (!ioread32(FlashReg + intr_status))
  1048. ;
  1049. if (ioread32(FlashReg + intr_status) &
  1050. INTR_STATUS0__ECC_ERR) {
  1051. iowrite32(INTR_STATUS0__ECC_ERR,
  1052. FlashReg + intr_status);
  1053. status = do_ecc_new(flash_bank,
  1054. read_data, block, page);
  1055. } else if (ioread32(FlashReg + intr_status) &
  1056. INTR_STATUS0__DMA_CMD_COMP) {
  1057. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1058. FlashReg + intr_status);
  1059. if (1 == ecc_done_OR_dma_comp)
  1060. break;
  1061. ecc_done_OR_dma_comp = 1;
  1062. } else if (ioread32(FlashReg + intr_status) &
  1063. INTR_STATUS0__ECC_TRANSACTION_DONE) {
  1064. iowrite32(
  1065. INTR_STATUS0__ECC_TRANSACTION_DONE,
  1066. FlashReg + intr_status);
  1067. if (1 == ecc_done_OR_dma_comp)
  1068. break;
  1069. ecc_done_OR_dma_comp = 1;
  1070. }
  1071. } else {
  1072. while (!(ioread32(FlashReg + intr_status) &
  1073. INTR_STATUS0__DMA_CMD_COMP))
  1074. ;
  1075. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1076. FlashReg + intr_status);
  1077. break;
  1078. }
  1079. iowrite32((~INTR_STATUS0__ECC_ERR) &
  1080. (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
  1081. (~INTR_STATUS0__DMA_CMD_COMP),
  1082. FlashReg + intr_status);
  1083. }
  1084. iowrite32(ioread32(FlashReg + intr_status),
  1085. FlashReg + intr_status);
  1086. iowrite32(0, FlashReg + DMA_ENABLE);
  1087. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1088. ;
  1089. }
  1090. return status;
  1091. }
  1092. u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
  1093. u16 page_count)
  1094. {
  1095. u32 status = PASS;
  1096. u64 flash_add;
  1097. u32 intr_status = 0;
  1098. u32 flash_bank;
  1099. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1100. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1101. int ret;
  1102. u8 *read_data_l;
  1103. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1104. __FILE__, __LINE__, __func__);
  1105. status = Boundary_Check_Block_Page(block, page, page_count);
  1106. if (status != PASS)
  1107. return status;
  1108. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1109. * DeviceInfo.wBlockDataSize +
  1110. (u64)page * DeviceInfo.wPageDataSize;
  1111. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1112. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1113. intr_status = intr_status_addresses[flash_bank];
  1114. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1115. if (page_count > 1) {
  1116. read_data_l = read_data;
  1117. while (page_count > MAX_PAGES_PER_RW) {
  1118. if (ioread32(FlashReg + MULTIPLANE_OPERATION))
  1119. status = NAND_Multiplane_Read(read_data_l,
  1120. block, page, MAX_PAGES_PER_RW);
  1121. else
  1122. status = NAND_Pipeline_Read_Ahead(
  1123. read_data_l, block, page,
  1124. MAX_PAGES_PER_RW);
  1125. if (status == FAIL)
  1126. return status;
  1127. read_data_l += DeviceInfo.wPageDataSize *
  1128. MAX_PAGES_PER_RW;
  1129. page_count -= MAX_PAGES_PER_RW;
  1130. page += MAX_PAGES_PER_RW;
  1131. }
  1132. if (ioread32(FlashReg + MULTIPLANE_OPERATION))
  1133. status = NAND_Multiplane_Read(read_data_l,
  1134. block, page, page_count);
  1135. else
  1136. status = NAND_Pipeline_Read_Ahead(
  1137. read_data_l, block, page, page_count);
  1138. return status;
  1139. }
  1140. iowrite32(1, FlashReg + DMA_ENABLE);
  1141. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1142. ;
  1143. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1144. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1145. /* Fill the mrst_nand_info structure */
  1146. info.state = INT_READ_PAGE_MAIN;
  1147. info.read_data = read_data;
  1148. info.flash_bank = flash_bank;
  1149. info.block = block;
  1150. info.page = page;
  1151. info.ret = PASS;
  1152. ddma_trans(read_data, flash_add, flash_bank, 0, 1);
  1153. iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
  1154. ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
  1155. if (!ret) {
  1156. printk(KERN_ERR "Wait for completion timeout "
  1157. "in %s, Line %d\n", __FILE__, __LINE__);
  1158. status = ERR;
  1159. } else {
  1160. status = info.ret;
  1161. }
  1162. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1163. iowrite32(0, FlashReg + DMA_ENABLE);
  1164. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1165. ;
  1166. return status;
  1167. }
  1168. void Conv_Spare_Data_Log2Phy_Format(u8 *data)
  1169. {
  1170. int i;
  1171. const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  1172. const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
  1173. if (enable_ecc) {
  1174. for (i = spareFlagBytes - 1; i >= 0; i++)
  1175. data[PageSpareSize - spareFlagBytes + i] = data[i];
  1176. }
  1177. }
  1178. void Conv_Spare_Data_Phy2Log_Format(u8 *data)
  1179. {
  1180. int i;
  1181. const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  1182. const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
  1183. if (enable_ecc) {
  1184. for (i = 0; i < spareFlagBytes; i++)
  1185. data[i] = data[PageSpareSize - spareFlagBytes + i];
  1186. }
  1187. }
  1188. void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
  1189. {
  1190. const u32 PageSize = DeviceInfo.wPageSize;
  1191. const u32 PageDataSize = DeviceInfo.wPageDataSize;
  1192. const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
  1193. const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
  1194. const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  1195. u32 eccSectorSize;
  1196. u32 page_offset;
  1197. int i, j;
  1198. eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
  1199. if (enable_ecc) {
  1200. while (page_count > 0) {
  1201. page_offset = (page_count - 1) * PageSize;
  1202. j = (DeviceInfo.wPageDataSize / eccSectorSize);
  1203. for (i = spareFlagBytes - 1; i >= 0; i--)
  1204. data[page_offset +
  1205. (eccSectorSize + eccBytes) * j + i] =
  1206. data[page_offset + PageDataSize + i];
  1207. for (j--; j >= 1; j--) {
  1208. for (i = eccSectorSize - 1; i >= 0; i--)
  1209. data[page_offset +
  1210. (eccSectorSize + eccBytes) * j + i] =
  1211. data[page_offset +
  1212. eccSectorSize * j + i];
  1213. }
  1214. for (i = (PageSize - spareSkipBytes) - 1;
  1215. i >= PageDataSize; i--)
  1216. data[page_offset + i + spareSkipBytes] =
  1217. data[page_offset + i];
  1218. page_count--;
  1219. }
  1220. }
  1221. }
  1222. void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
  1223. {
  1224. const u32 PageSize = DeviceInfo.wPageSize;
  1225. const u32 PageDataSize = DeviceInfo.wPageDataSize;
  1226. const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
  1227. const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
  1228. const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  1229. u32 eccSectorSize;
  1230. u32 page_offset;
  1231. int i, j;
  1232. eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
  1233. if (enable_ecc) {
  1234. while (page_count > 0) {
  1235. page_offset = (page_count - 1) * PageSize;
  1236. for (i = PageDataSize;
  1237. i < PageSize - spareSkipBytes;
  1238. i++)
  1239. data[page_offset + i] =
  1240. data[page_offset + i +
  1241. spareSkipBytes];
  1242. for (j = 1;
  1243. j < DeviceInfo.wPageDataSize / eccSectorSize;
  1244. j++) {
  1245. for (i = 0; i < eccSectorSize; i++)
  1246. data[page_offset +
  1247. eccSectorSize * j + i] =
  1248. data[page_offset +
  1249. (eccSectorSize + eccBytes) * j
  1250. + i];
  1251. }
  1252. for (i = 0; i < spareFlagBytes; i++)
  1253. data[page_offset + PageDataSize + i] =
  1254. data[page_offset +
  1255. (eccSectorSize + eccBytes) * j + i];
  1256. page_count--;
  1257. }
  1258. }
  1259. }
  1260. /* Un-tested function */
  1261. u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
  1262. u16 page_count)
  1263. {
  1264. u32 status = PASS;
  1265. u32 NumPages = page_count;
  1266. u64 flash_add;
  1267. u32 flash_bank;
  1268. u32 intr_status = 0;
  1269. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1270. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1271. u32 ecc_done_OR_dma_comp;
  1272. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1273. __FILE__, __LINE__, __func__);
  1274. status = Boundary_Check_Block_Page(block, page, page_count);
  1275. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1276. * DeviceInfo.wBlockDataSize +
  1277. (u64)page * DeviceInfo.wPageDataSize;
  1278. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1279. if (status == PASS) {
  1280. intr_status = intr_status_addresses[flash_bank];
  1281. iowrite32(ioread32(FlashReg + intr_status),
  1282. FlashReg + intr_status);
  1283. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1284. iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
  1285. iowrite32(1, FlashReg + DMA_ENABLE);
  1286. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1287. ;
  1288. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1289. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  1290. ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
  1291. ecc_done_OR_dma_comp = 0;
  1292. while (1) {
  1293. if (enable_ecc) {
  1294. while (!ioread32(FlashReg + intr_status))
  1295. ;
  1296. if (ioread32(FlashReg + intr_status) &
  1297. INTR_STATUS0__ECC_ERR) {
  1298. iowrite32(INTR_STATUS0__ECC_ERR,
  1299. FlashReg + intr_status);
  1300. status = do_ecc_new(flash_bank,
  1301. read_data, block, page);
  1302. } else if (ioread32(FlashReg + intr_status) &
  1303. INTR_STATUS0__DMA_CMD_COMP) {
  1304. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1305. FlashReg + intr_status);
  1306. if (1 == ecc_done_OR_dma_comp)
  1307. break;
  1308. ecc_done_OR_dma_comp = 1;
  1309. } else if (ioread32(FlashReg + intr_status) &
  1310. INTR_STATUS0__ECC_TRANSACTION_DONE) {
  1311. iowrite32(
  1312. INTR_STATUS0__ECC_TRANSACTION_DONE,
  1313. FlashReg + intr_status);
  1314. if (1 == ecc_done_OR_dma_comp)
  1315. break;
  1316. ecc_done_OR_dma_comp = 1;
  1317. }
  1318. } else {
  1319. while (!(ioread32(FlashReg + intr_status) &
  1320. INTR_STATUS0__DMA_CMD_COMP))
  1321. ;
  1322. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1323. FlashReg + intr_status);
  1324. break;
  1325. }
  1326. iowrite32((~INTR_STATUS0__ECC_ERR) &
  1327. (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
  1328. (~INTR_STATUS0__DMA_CMD_COMP),
  1329. FlashReg + intr_status);
  1330. }
  1331. iowrite32(ioread32(FlashReg + intr_status),
  1332. FlashReg + intr_status);
  1333. iowrite32(0, FlashReg + DMA_ENABLE);
  1334. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1335. ;
  1336. iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
  1337. }
  1338. return status;
  1339. }
  1340. u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
  1341. u16 page, u16 page_count)
  1342. {
  1343. u32 status = PASS;
  1344. u32 NumPages = page_count;
  1345. u64 flash_add;
  1346. u32 flash_bank;
  1347. u32 intr_status = 0;
  1348. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1349. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1350. int ret;
  1351. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1352. __FILE__, __LINE__, __func__);
  1353. status = Boundary_Check_Block_Page(block, page, page_count);
  1354. if (page_count < 2)
  1355. status = FAIL;
  1356. if (status != PASS)
  1357. return status;
  1358. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1359. *DeviceInfo.wBlockDataSize +
  1360. (u64)page * DeviceInfo.wPageDataSize;
  1361. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1362. intr_status = intr_status_addresses[flash_bank];
  1363. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1364. iowrite32(1, FlashReg + DMA_ENABLE);
  1365. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1366. ;
  1367. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1368. /* Fill the mrst_nand_info structure */
  1369. info.state = INT_PIPELINE_READ_AHEAD;
  1370. info.read_data = read_data;
  1371. info.flash_bank = flash_bank;
  1372. info.block = block;
  1373. info.page = page;
  1374. info.ret = PASS;
  1375. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1376. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  1377. ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
  1378. iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
  1379. ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
  1380. if (!ret) {
  1381. printk(KERN_ERR "Wait for completion timeout "
  1382. "in %s, Line %d\n", __FILE__, __LINE__);
  1383. status = ERR;
  1384. } else {
  1385. status = info.ret;
  1386. }
  1387. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1388. iowrite32(0, FlashReg + DMA_ENABLE);
  1389. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1390. ;
  1391. return status;
  1392. }
  1393. u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
  1394. u16 page_count)
  1395. {
  1396. u32 status = PASS;
  1397. u64 flash_add;
  1398. u32 intr_status = 0;
  1399. u32 flash_bank;
  1400. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1401. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1402. int ret;
  1403. u8 *write_data_l;
  1404. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1405. __FILE__, __LINE__, __func__);
  1406. status = Boundary_Check_Block_Page(block, page, page_count);
  1407. if (status != PASS)
  1408. return status;
  1409. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1410. * DeviceInfo.wBlockDataSize +
  1411. (u64)page * DeviceInfo.wPageDataSize;
  1412. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1413. intr_status = intr_status_addresses[flash_bank];
  1414. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1415. iowrite32(INTR_STATUS0__PROGRAM_COMP |
  1416. INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
  1417. if (page_count > 1) {
  1418. write_data_l = write_data;
  1419. while (page_count > MAX_PAGES_PER_RW) {
  1420. if (ioread32(FlashReg + MULTIPLANE_OPERATION))
  1421. status = NAND_Multiplane_Write(write_data_l,
  1422. block, page, MAX_PAGES_PER_RW);
  1423. else
  1424. status = NAND_Pipeline_Write_Ahead(
  1425. write_data_l, block, page,
  1426. MAX_PAGES_PER_RW);
  1427. if (status == FAIL)
  1428. return status;
  1429. write_data_l += DeviceInfo.wPageDataSize *
  1430. MAX_PAGES_PER_RW;
  1431. page_count -= MAX_PAGES_PER_RW;
  1432. page += MAX_PAGES_PER_RW;
  1433. }
  1434. if (ioread32(FlashReg + MULTIPLANE_OPERATION))
  1435. status = NAND_Multiplane_Write(write_data_l,
  1436. block, page, page_count);
  1437. else
  1438. status = NAND_Pipeline_Write_Ahead(write_data_l,
  1439. block, page, page_count);
  1440. return status;
  1441. }
  1442. iowrite32(1, FlashReg + DMA_ENABLE);
  1443. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1444. ;
  1445. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1446. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1447. /* Fill the mrst_nand_info structure */
  1448. info.state = INT_WRITE_PAGE_MAIN;
  1449. info.write_data = write_data;
  1450. info.flash_bank = flash_bank;
  1451. info.block = block;
  1452. info.page = page;
  1453. info.ret = PASS;
  1454. ddma_trans(write_data, flash_add, flash_bank, 1, 1);
  1455. iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
  1456. ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
  1457. if (!ret) {
  1458. printk(KERN_ERR "Wait for completion timeout "
  1459. "in %s, Line %d\n", __FILE__, __LINE__);
  1460. status = ERR;
  1461. } else {
  1462. status = info.ret;
  1463. }
  1464. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1465. iowrite32(0, FlashReg + DMA_ENABLE);
  1466. while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
  1467. ;
  1468. return status;
  1469. }
  1470. void NAND_ECC_Ctrl(int enable)
  1471. {
  1472. if (enable) {
  1473. nand_dbg_print(NAND_DBG_WARN,
  1474. "Will enable ECC in %s, Line %d, Function: %s\n",
  1475. __FILE__, __LINE__, __func__);
  1476. iowrite32(1, FlashReg + ECC_ENABLE);
  1477. enable_ecc = 1;
  1478. } else {
  1479. nand_dbg_print(NAND_DBG_WARN,
  1480. "Will disable ECC in %s, Line %d, Function: %s\n",
  1481. __FILE__, __LINE__, __func__);
  1482. iowrite32(0, FlashReg + ECC_ENABLE);
  1483. enable_ecc = 0;
  1484. }
  1485. }
  1486. u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
  1487. u16 page, u16 page_count)
  1488. {
  1489. u32 status = PASS;
  1490. u32 i, j, page_num = 0;
  1491. u32 PageSize = DeviceInfo.wPageSize;
  1492. u32 PageDataSize = DeviceInfo.wPageDataSize;
  1493. u32 eccBytes = DeviceInfo.wECCBytesPerSector;
  1494. u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  1495. u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
  1496. u64 flash_add;
  1497. u32 eccSectorSize;
  1498. u32 flash_bank;
  1499. u32 intr_status = 0;
  1500. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1501. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1502. u8 *page_main_spare = buf_write_page_main_spare;
  1503. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1504. __FILE__, __LINE__, __func__);
  1505. eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
  1506. status = Boundary_Check_Block_Page(block, page, page_count);
  1507. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1508. if (status == PASS) {
  1509. intr_status = intr_status_addresses[flash_bank];
  1510. iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
  1511. while ((status != FAIL) && (page_count > 0)) {
  1512. flash_add = (u64)(block %
  1513. (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
  1514. DeviceInfo.wBlockDataSize +
  1515. (u64)page * DeviceInfo.wPageDataSize;
  1516. iowrite32(ioread32(FlashReg + intr_status),
  1517. FlashReg + intr_status);
  1518. iowrite32((u32)(MODE_01 | (flash_bank << 24) |
  1519. (flash_add >>
  1520. DeviceInfo.nBitsInPageDataSize)),
  1521. FlashMem);
  1522. if (enable_ecc) {
  1523. for (j = 0;
  1524. j <
  1525. DeviceInfo.wPageDataSize / eccSectorSize;
  1526. j++) {
  1527. for (i = 0; i < eccSectorSize; i++)
  1528. page_main_spare[(eccSectorSize +
  1529. eccBytes) * j +
  1530. i] =
  1531. write_data[eccSectorSize *
  1532. j + i];
  1533. for (i = 0; i < eccBytes; i++)
  1534. page_main_spare[(eccSectorSize +
  1535. eccBytes) * j +
  1536. eccSectorSize +
  1537. i] =
  1538. write_data[PageDataSize +
  1539. spareFlagBytes +
  1540. eccBytes * j +
  1541. i];
  1542. }
  1543. for (i = 0; i < spareFlagBytes; i++)
  1544. page_main_spare[(eccSectorSize +
  1545. eccBytes) * j + i] =
  1546. write_data[PageDataSize + i];
  1547. for (i = PageSize - 1; i >= PageDataSize +
  1548. spareSkipBytes; i--)
  1549. page_main_spare[i] = page_main_spare[i -
  1550. spareSkipBytes];
  1551. for (i = PageDataSize; i < PageDataSize +
  1552. spareSkipBytes; i++)
  1553. page_main_spare[i] = 0xff;
  1554. for (i = 0; i < PageSize / 4; i++)
  1555. iowrite32(
  1556. *((u32 *)page_main_spare + i),
  1557. FlashMem + 0x10);
  1558. } else {
  1559. for (i = 0; i < PageSize / 4; i++)
  1560. iowrite32(*((u32 *)write_data + i),
  1561. FlashMem + 0x10);
  1562. }
  1563. while (!(ioread32(FlashReg + intr_status) &
  1564. (INTR_STATUS0__PROGRAM_COMP |
  1565. INTR_STATUS0__PROGRAM_FAIL)))
  1566. ;
  1567. if (ioread32(FlashReg + intr_status) &
  1568. INTR_STATUS0__PROGRAM_FAIL)
  1569. status = FAIL;
  1570. iowrite32(ioread32(FlashReg + intr_status),
  1571. FlashReg + intr_status);
  1572. page_num++;
  1573. page_count--;
  1574. write_data += PageSize;
  1575. }
  1576. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1577. }
  1578. return status;
  1579. }
  1580. u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
  1581. u16 page_count)
  1582. {
  1583. u32 status = PASS;
  1584. u32 i, j;
  1585. u64 flash_add = 0;
  1586. u32 PageSize = DeviceInfo.wPageSize;
  1587. u32 PageDataSize = DeviceInfo.wPageDataSize;
  1588. u32 PageSpareSize = DeviceInfo.wPageSpareSize;
  1589. u32 eccBytes = DeviceInfo.wECCBytesPerSector;
  1590. u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
  1591. u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
  1592. u32 eccSectorSize;
  1593. u32 flash_bank;
  1594. u32 intr_status = 0;
  1595. u8 *read_data_l = read_data;
  1596. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1597. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1598. u8 *page_main_spare = buf_read_page_main_spare;
  1599. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1600. __FILE__, __LINE__, __func__);
  1601. eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
  1602. status = Boundary_Check_Block_Page(block, page, page_count);
  1603. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1604. if (status == PASS) {
  1605. intr_status = intr_status_addresses[flash_bank];
  1606. iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
  1607. iowrite32(ioread32(FlashReg + intr_status),
  1608. FlashReg + intr_status);
  1609. while ((status != FAIL) && (page_count > 0)) {
  1610. flash_add = (u64)(block %
  1611. (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1612. * DeviceInfo.wBlockDataSize +
  1613. (u64)page * DeviceInfo.wPageDataSize;
  1614. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1615. (flash_add >> DeviceInfo.nBitsInPageDataSize)),
  1616. 0x43);
  1617. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1618. (flash_add >> DeviceInfo.nBitsInPageDataSize)),
  1619. 0x2000 | page_count);
  1620. while (!(ioread32(FlashReg + intr_status) &
  1621. INTR_STATUS0__LOAD_COMP))
  1622. ;
  1623. iowrite32((u32)(MODE_01 | (flash_bank << 24) |
  1624. (flash_add >>
  1625. DeviceInfo.nBitsInPageDataSize)),
  1626. FlashMem);
  1627. for (i = 0; i < PageSize / 4; i++)
  1628. *(((u32 *)page_main_spare) + i) =
  1629. ioread32(FlashMem + 0x10);
  1630. if (enable_ecc) {
  1631. for (i = PageDataSize; i < PageSize -
  1632. spareSkipBytes; i++)
  1633. page_main_spare[i] = page_main_spare[i +
  1634. spareSkipBytes];
  1635. for (j = 0;
  1636. j < DeviceInfo.wPageDataSize / eccSectorSize;
  1637. j++) {
  1638. for (i = 0; i < eccSectorSize; i++)
  1639. read_data_l[eccSectorSize * j +
  1640. i] =
  1641. page_main_spare[
  1642. (eccSectorSize +
  1643. eccBytes) * j + i];
  1644. for (i = 0; i < eccBytes; i++)
  1645. read_data_l[PageDataSize +
  1646. spareFlagBytes +
  1647. eccBytes * j + i] =
  1648. page_main_spare[
  1649. (eccSectorSize +
  1650. eccBytes) * j +
  1651. eccSectorSize + i];
  1652. }
  1653. for (i = 0; i < spareFlagBytes; i++)
  1654. read_data_l[PageDataSize + i] =
  1655. page_main_spare[(eccSectorSize +
  1656. eccBytes) * j + i];
  1657. } else {
  1658. for (i = 0; i < (PageDataSize + PageSpareSize);
  1659. i++)
  1660. read_data_l[i] = page_main_spare[i];
  1661. }
  1662. if (enable_ecc) {
  1663. while (!(ioread32(FlashReg + intr_status) &
  1664. (INTR_STATUS0__ECC_TRANSACTION_DONE |
  1665. INTR_STATUS0__ECC_ERR)))
  1666. ;
  1667. if (ioread32(FlashReg + intr_status) &
  1668. INTR_STATUS0__ECC_ERR) {
  1669. iowrite32(INTR_STATUS0__ECC_ERR,
  1670. FlashReg + intr_status);
  1671. status = do_ecc_new(flash_bank,
  1672. read_data, block, page);
  1673. }
  1674. if (ioread32(FlashReg + intr_status) &
  1675. INTR_STATUS0__ECC_TRANSACTION_DONE &
  1676. INTR_STATUS0__ECC_ERR) {
  1677. iowrite32(INTR_STATUS0__ECC_ERR |
  1678. INTR_STATUS0__ECC_TRANSACTION_DONE,
  1679. FlashReg + intr_status);
  1680. } else if (ioread32(FlashReg + intr_status) &
  1681. INTR_STATUS0__ECC_TRANSACTION_DONE) {
  1682. iowrite32(
  1683. INTR_STATUS0__ECC_TRANSACTION_DONE,
  1684. FlashReg + intr_status);
  1685. } else if (ioread32(FlashReg + intr_status) &
  1686. INTR_STATUS0__ECC_ERR) {
  1687. iowrite32(INTR_STATUS0__ECC_ERR,
  1688. FlashReg + intr_status);
  1689. }
  1690. }
  1691. page++;
  1692. page_count--;
  1693. read_data_l += PageSize;
  1694. }
  1695. }
  1696. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1697. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1698. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  1699. return status;
  1700. }
  1701. u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
  1702. u16 page, u16 page_count)
  1703. {
  1704. u16 status = PASS;
  1705. u32 NumPages = page_count;
  1706. u64 flash_add;
  1707. u32 flash_bank;
  1708. u32 intr_status = 0;
  1709. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1710. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1711. int ret;
  1712. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1713. __FILE__, __LINE__, __func__);
  1714. status = Boundary_Check_Block_Page(block, page, page_count);
  1715. if (page_count < 2)
  1716. status = FAIL;
  1717. if (status != PASS)
  1718. return status;
  1719. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1720. * DeviceInfo.wBlockDataSize +
  1721. (u64)page * DeviceInfo.wPageDataSize;
  1722. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1723. intr_status = intr_status_addresses[flash_bank];
  1724. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1725. iowrite32(1, FlashReg + DMA_ENABLE);
  1726. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1727. ;
  1728. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1729. /* Fill the mrst_nand_info structure */
  1730. info.state = INT_PIPELINE_WRITE_AHEAD;
  1731. info.write_data = write_data;
  1732. info.flash_bank = flash_bank;
  1733. info.block = block;
  1734. info.page = page;
  1735. info.ret = PASS;
  1736. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1737. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  1738. ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
  1739. iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
  1740. ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
  1741. if (!ret) {
  1742. printk(KERN_ERR "Wait for completion timeout "
  1743. "in %s, Line %d\n", __FILE__, __LINE__);
  1744. status = ERR;
  1745. } else {
  1746. status = info.ret;
  1747. }
  1748. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1749. iowrite32(0, FlashReg + DMA_ENABLE);
  1750. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1751. ;
  1752. return status;
  1753. }
  1754. /* Un-tested function */
  1755. u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
  1756. u16 page_count)
  1757. {
  1758. u16 status = PASS;
  1759. u32 NumPages = page_count;
  1760. u64 flash_add;
  1761. u32 flash_bank;
  1762. u32 intr_status = 0;
  1763. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1764. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1765. u16 status2 = PASS;
  1766. u32 t;
  1767. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1768. __FILE__, __LINE__, __func__);
  1769. status = Boundary_Check_Block_Page(block, page, page_count);
  1770. if (status != PASS)
  1771. return status;
  1772. flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
  1773. * DeviceInfo.wBlockDataSize +
  1774. (u64)page * DeviceInfo.wPageDataSize;
  1775. flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
  1776. intr_status = intr_status_addresses[flash_bank];
  1777. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1778. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1779. iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
  1780. iowrite32(1, FlashReg + DMA_ENABLE);
  1781. while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1782. ;
  1783. iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
  1784. index_addr((u32)(MODE_10 | (flash_bank << 24) |
  1785. (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
  1786. ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
  1787. while (1) {
  1788. while (!ioread32(FlashReg + intr_status))
  1789. ;
  1790. if (ioread32(FlashReg + intr_status) &
  1791. INTR_STATUS0__DMA_CMD_COMP) {
  1792. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1793. FlashReg + intr_status);
  1794. status = PASS;
  1795. if (status2 == FAIL)
  1796. status = FAIL;
  1797. break;
  1798. } else if (ioread32(FlashReg + intr_status) &
  1799. INTR_STATUS0__PROGRAM_FAIL) {
  1800. status2 = FAIL;
  1801. status = FAIL;
  1802. t = ioread32(FlashReg + intr_status) &
  1803. INTR_STATUS0__PROGRAM_FAIL;
  1804. iowrite32(t, FlashReg + intr_status);
  1805. } else {
  1806. iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
  1807. (~INTR_STATUS0__DMA_CMD_COMP),
  1808. FlashReg + intr_status);
  1809. }
  1810. }
  1811. iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
  1812. iowrite32(0, FlashReg + DMA_ENABLE);
  1813. while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
  1814. ;
  1815. iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
  1816. return status;
  1817. }
  1818. #if CMD_DMA
  1819. static irqreturn_t cdma_isr(int irq, void *dev_id)
  1820. {
  1821. struct mrst_nand_info *dev = dev_id;
  1822. int first_failed_cmd;
  1823. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1824. __FILE__, __LINE__, __func__);
  1825. if (!is_cdma_interrupt())
  1826. return IRQ_NONE;
  1827. /* Disable controller interrupts */
  1828. iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
  1829. GLOB_FTL_Event_Status(&first_failed_cmd);
  1830. complete(&dev->complete);
  1831. return IRQ_HANDLED;
  1832. }
  1833. #else
  1834. static void handle_nand_int_read(struct mrst_nand_info *dev)
  1835. {
  1836. u32 intr_status_addresses[4] = {INTR_STATUS0,
  1837. INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
  1838. u32 intr_status;
  1839. u32 ecc_done_OR_dma_comp = 0;
  1840. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1841. __FILE__, __LINE__, __func__);
  1842. dev->ret = PASS;
  1843. intr_status = intr_status_addresses[dev->flash_bank];
  1844. while (1) {
  1845. if (enable_ecc) {
  1846. if (ioread32(FlashReg + intr_status) &
  1847. INTR_STATUS0__ECC_ERR) {
  1848. iowrite32(INTR_STATUS0__ECC_ERR,
  1849. FlashReg + intr_status);
  1850. dev->ret = do_ecc_new(dev->flash_bank,
  1851. dev->read_data,
  1852. dev->block, dev->page);
  1853. } else if (ioread32(FlashReg + intr_status) &
  1854. INTR_STATUS0__DMA_CMD_COMP) {
  1855. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1856. FlashReg + intr_status);
  1857. if (1 == ecc_done_OR_dma_comp)
  1858. break;
  1859. ecc_done_OR_dma_comp = 1;
  1860. } else if (ioread32(FlashReg + intr_status) &
  1861. INTR_STATUS0__ECC_TRANSACTION_DONE) {
  1862. iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
  1863. FlashReg + intr_status);
  1864. if (1 == ecc_done_OR_dma_comp)
  1865. break;
  1866. ecc_done_OR_dma_comp = 1;
  1867. }
  1868. } else {
  1869. if (ioread32(FlashReg + intr_status) &
  1870. INTR_STATUS0__DMA_CMD_COMP) {
  1871. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1872. FlashReg + intr_status);
  1873. break;
  1874. } else {
  1875. printk(KERN_ERR "Illegal INTS "
  1876. "(offset addr 0x%x) value: 0x%x\n",
  1877. intr_status,
  1878. ioread32(FlashReg + intr_status));
  1879. }
  1880. }
  1881. iowrite32((~INTR_STATUS0__ECC_ERR) &
  1882. (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
  1883. (~INTR_STATUS0__DMA_CMD_COMP),
  1884. FlashReg + intr_status);
  1885. }
  1886. }
  1887. static void handle_nand_int_write(struct mrst_nand_info *dev)
  1888. {
  1889. u32 intr_status;
  1890. u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
  1891. INTR_STATUS2, INTR_STATUS3};
  1892. int status = PASS;
  1893. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1894. __FILE__, __LINE__, __func__);
  1895. dev->ret = PASS;
  1896. intr_status = intr[dev->flash_bank];
  1897. while (1) {
  1898. while (!ioread32(FlashReg + intr_status))
  1899. ;
  1900. if (ioread32(FlashReg + intr_status) &
  1901. INTR_STATUS0__DMA_CMD_COMP) {
  1902. iowrite32(INTR_STATUS0__DMA_CMD_COMP,
  1903. FlashReg + intr_status);
  1904. if (FAIL == status)
  1905. dev->ret = FAIL;
  1906. break;
  1907. } else if (ioread32(FlashReg + intr_status) &
  1908. INTR_STATUS0__PROGRAM_FAIL) {
  1909. status = FAIL;
  1910. iowrite32(INTR_STATUS0__PROGRAM_FAIL,
  1911. FlashReg + intr_status);
  1912. } else {
  1913. iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
  1914. (~INTR_STATUS0__DMA_CMD_COMP),
  1915. FlashReg + intr_status);
  1916. }
  1917. }
  1918. }
  1919. static irqreturn_t ddma_isr(int irq, void *dev_id)
  1920. {
  1921. struct mrst_nand_info *dev = dev_id;
  1922. u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
  1923. u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
  1924. INTR_STATUS2, INTR_STATUS3};
  1925. int_mask = INTR_STATUS0__DMA_CMD_COMP |
  1926. INTR_STATUS0__ECC_TRANSACTION_DONE |
  1927. INTR_STATUS0__ECC_ERR |
  1928. INTR_STATUS0__PROGRAM_FAIL |
  1929. INTR_STATUS0__ERASE_FAIL;
  1930. ints0 = ioread32(FlashReg + INTR_STATUS0);
  1931. ints1 = ioread32(FlashReg + INTR_STATUS1);
  1932. ints2 = ioread32(FlashReg + INTR_STATUS2);
  1933. ints3 = ioread32(FlashReg + INTR_STATUS3);
  1934. ints_offset = intr[dev->flash_bank];
  1935. nand_dbg_print(NAND_DBG_DEBUG,
  1936. "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
  1937. "DMA_INTR: 0x%x, "
  1938. "dev->state: 0x%x, dev->flash_bank: %d\n",
  1939. ints0, ints1, ints2, ints3,
  1940. ioread32(FlashReg + DMA_INTR),
  1941. dev->state, dev->flash_bank);
  1942. if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
  1943. iowrite32(ints0, FlashReg + INTR_STATUS0);
  1944. iowrite32(ints1, FlashReg + INTR_STATUS1);
  1945. iowrite32(ints2, FlashReg + INTR_STATUS2);
  1946. iowrite32(ints3, FlashReg + INTR_STATUS3);
  1947. nand_dbg_print(NAND_DBG_WARN,
  1948. "ddma_isr: Invalid interrupt for NAND controller. "
  1949. "Ignore it\n");
  1950. return IRQ_NONE;
  1951. }
  1952. switch (dev->state) {
  1953. case INT_READ_PAGE_MAIN:
  1954. case INT_PIPELINE_READ_AHEAD:
  1955. /* Disable controller interrupts */
  1956. iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
  1957. handle_nand_int_read(dev);
  1958. break;
  1959. case INT_WRITE_PAGE_MAIN:
  1960. case INT_PIPELINE_WRITE_AHEAD:
  1961. iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
  1962. handle_nand_int_write(dev);
  1963. break;
  1964. default:
  1965. printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
  1966. dev->state);
  1967. return IRQ_NONE;
  1968. }
  1969. dev->state = INT_IDLE_STATE;
  1970. complete(&dev->complete);
  1971. return IRQ_HANDLED;
  1972. }
  1973. #endif
  1974. static const struct pci_device_id nand_pci_ids[] = {
  1975. {
  1976. .vendor = 0x8086,
  1977. .device = 0x0809,
  1978. .subvendor = PCI_ANY_ID,
  1979. .subdevice = PCI_ANY_ID,
  1980. },
  1981. { /* end: all zeroes */ }
  1982. };
  1983. static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
  1984. {
  1985. int ret = -ENODEV;
  1986. unsigned long csr_base;
  1987. unsigned long csr_len;
  1988. struct mrst_nand_info *pndev = &info;
  1989. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1990. __FILE__, __LINE__, __func__);
  1991. ret = pci_enable_device(dev);
  1992. if (ret) {
  1993. printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
  1994. return ret;
  1995. }
  1996. pci_set_master(dev);
  1997. pndev->dev = dev;
  1998. csr_base = pci_resource_start(dev, 0);
  1999. if (!csr_base) {
  2000. printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
  2001. return -ENODEV;
  2002. }
  2003. csr_len = pci_resource_len(dev, 0);
  2004. if (!csr_len) {
  2005. printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
  2006. return -ENODEV;
  2007. }
  2008. ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
  2009. if (ret) {
  2010. printk(KERN_ERR "Spectra: Unable to request "
  2011. "memory region\n");
  2012. goto failed_req_csr;
  2013. }
  2014. pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
  2015. if (!pndev->ioaddr) {
  2016. printk(KERN_ERR "Spectra: Unable to remap memory region\n");
  2017. ret = -ENOMEM;
  2018. goto failed_remap_csr;
  2019. }
  2020. nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
  2021. csr_base, pndev->ioaddr, csr_len);
  2022. init_completion(&pndev->complete);
  2023. nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
  2024. #if CMD_DMA
  2025. if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
  2026. SPECTRA_NAND_NAME, &info)) {
  2027. printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
  2028. ret = -ENODEV;
  2029. iounmap(pndev->ioaddr);
  2030. goto failed_remap_csr;
  2031. }
  2032. #else
  2033. if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
  2034. SPECTRA_NAND_NAME, &info)) {
  2035. printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
  2036. ret = -ENODEV;
  2037. iounmap(pndev->ioaddr);
  2038. goto failed_remap_csr;
  2039. }
  2040. #endif
  2041. pci_set_drvdata(dev, pndev);
  2042. return 0;
  2043. failed_remap_csr:
  2044. pci_release_regions(dev);
  2045. failed_req_csr:
  2046. return ret;
  2047. }
  2048. static void nand_pci_remove(struct pci_dev *dev)
  2049. {
  2050. struct mrst_nand_info *pndev = pci_get_drvdata(dev);
  2051. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  2052. __FILE__, __LINE__, __func__);
  2053. #if CMD_DMA
  2054. free_irq(dev->irq, pndev);
  2055. #endif
  2056. iounmap(pndev->ioaddr);
  2057. pci_release_regions(dev);
  2058. pci_disable_device(dev);
  2059. }
  2060. MODULE_DEVICE_TABLE(pci, nand_pci_ids);
  2061. static struct pci_driver nand_pci_driver = {
  2062. .name = SPECTRA_NAND_NAME,
  2063. .id_table = nand_pci_ids,
  2064. .probe = nand_pci_probe,
  2065. .remove = nand_pci_remove,
  2066. };
  2067. int NAND_Flash_Init(void)
  2068. {
  2069. int retval;
  2070. u32 int_mask;
  2071. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  2072. __FILE__, __LINE__, __func__);
  2073. FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
  2074. GLOB_HWCTL_REG_SIZE);
  2075. if (!FlashReg) {
  2076. printk(KERN_ERR "Spectra: ioremap_nocache failed!");
  2077. return -ENOMEM;
  2078. }
  2079. nand_dbg_print(NAND_DBG_WARN,
  2080. "Spectra: Remapped reg base address: "
  2081. "0x%p, len: %d\n",
  2082. FlashReg, GLOB_HWCTL_REG_SIZE);
  2083. FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
  2084. GLOB_HWCTL_MEM_SIZE);
  2085. if (!FlashMem) {
  2086. printk(KERN_ERR "Spectra: ioremap_nocache failed!");
  2087. iounmap(FlashReg);
  2088. return -ENOMEM;
  2089. }
  2090. nand_dbg_print(NAND_DBG_WARN,
  2091. "Spectra: Remapped flash base address: "
  2092. "0x%p, len: %d\n",
  2093. (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
  2094. nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
  2095. "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
  2096. "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
  2097. "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
  2098. ioread32(FlashReg + ACC_CLKS),
  2099. ioread32(FlashReg + RE_2_WE),
  2100. ioread32(FlashReg + WE_2_RE),
  2101. ioread32(FlashReg + ADDR_2_DATA),
  2102. ioread32(FlashReg + RDWR_EN_LO_CNT),
  2103. ioread32(FlashReg + RDWR_EN_HI_CNT),
  2104. ioread32(FlashReg + CS_SETUP_CNT));
  2105. NAND_Flash_Reset();
  2106. iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
  2107. #if CMD_DMA
  2108. info.pcmds_num = 0;
  2109. info.flash_bank = 0;
  2110. info.cdma_num = 0;
  2111. int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
  2112. DMA_INTR__DESC_COMP_CHANNEL1 |
  2113. DMA_INTR__DESC_COMP_CHANNEL2 |
  2114. DMA_INTR__DESC_COMP_CHANNEL3 |
  2115. DMA_INTR__MEMCOPY_DESC_COMP);
  2116. iowrite32(int_mask, FlashReg + DMA_INTR_EN);
  2117. iowrite32(0xFFFF, FlashReg + DMA_INTR);
  2118. int_mask = (INTR_STATUS0__ECC_ERR |
  2119. INTR_STATUS0__PROGRAM_FAIL |
  2120. INTR_STATUS0__ERASE_FAIL);
  2121. #else
  2122. int_mask = INTR_STATUS0__DMA_CMD_COMP |
  2123. INTR_STATUS0__ECC_TRANSACTION_DONE |
  2124. INTR_STATUS0__ECC_ERR |
  2125. INTR_STATUS0__PROGRAM_FAIL |
  2126. INTR_STATUS0__ERASE_FAIL;
  2127. #endif
  2128. iowrite32(int_mask, FlashReg + INTR_EN0);
  2129. iowrite32(int_mask, FlashReg + INTR_EN1);
  2130. iowrite32(int_mask, FlashReg + INTR_EN2);
  2131. iowrite32(int_mask, FlashReg + INTR_EN3);
  2132. /* Clear all status bits */
  2133. iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
  2134. iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
  2135. iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
  2136. iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
  2137. iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
  2138. iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
  2139. /* Should set value for these registers when init */
  2140. iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
  2141. iowrite32(1, FlashReg + ECC_ENABLE);
  2142. enable_ecc = 1;
  2143. retval = pci_register_driver(&nand_pci_driver);
  2144. if (retval)
  2145. return -ENOMEM;
  2146. return PASS;
  2147. }
  2148. /* Free memory */
  2149. int nand_release_spectra(void)
  2150. {
  2151. pci_unregister_driver(&nand_pci_driver);
  2152. iounmap(FlashMem);
  2153. iounmap(FlashReg);
  2154. return 0;
  2155. }