flash.c 133 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright (c) 2009, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include <linux/fs.h>
  20. #include <linux/slab.h>
  21. #include "flash.h"
  22. #include "ffsdefs.h"
  23. #include "lld.h"
  24. #include "lld_nand.h"
  25. #if CMD_DMA
  26. #include "lld_cdma.h"
  27. #endif
  28. #define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
  29. #define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
  30. DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
  31. #define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
  32. BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
  33. #define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
  34. #define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
  35. BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
  36. #define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
  37. #if DEBUG_BNDRY
  38. void debug_boundary_lineno_error(int chnl, int limit, int no,
  39. int lineno, char *filename)
  40. {
  41. if (chnl >= limit)
  42. printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
  43. "at %s:%d. Other info:%d. Aborting...\n",
  44. chnl, limit, filename, lineno, no);
  45. }
  46. /* static int globalmemsize; */
  47. #endif
  48. static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
  49. static int FTL_Cache_Read(u64 dwPageAddr);
  50. static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
  51. u16 cache_blk);
  52. static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
  53. u8 cache_blk, u16 flag);
  54. static int FTL_Cache_Write(void);
  55. static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr);
  56. static void FTL_Calculate_LRU(void);
  57. static u32 FTL_Get_Block_Index(u32 wBlockNum);
  58. static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
  59. u8 BT_Tag, u16 *Page);
  60. static int FTL_Read_Block_Table(void);
  61. static int FTL_Write_Block_Table(int wForce);
  62. static int FTL_Write_Block_Table_Data(void);
  63. static int FTL_Check_Block_Table(int wOldTable);
  64. static int FTL_Static_Wear_Leveling(void);
  65. static u32 FTL_Replace_Block_Table(void);
  66. static int FTL_Write_IN_Progress_Block_Table_Page(void);
  67. static u32 FTL_Get_Page_Num(u64 length);
  68. static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
  69. static u32 FTL_Replace_OneBlock(u32 wBlockNum,
  70. u32 wReplaceNum);
  71. static u32 FTL_Replace_LWBlock(u32 wBlockNum,
  72. int *pGarbageCollect);
  73. static u32 FTL_Replace_MWBlock(void);
  74. static int FTL_Replace_Block(u64 blk_addr);
  75. static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
  76. static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr, u64 blk_addr);
  77. struct device_info_tag DeviceInfo;
  78. struct flash_cache_tag Cache;
  79. static struct spectra_l2_cache_info cache_l2;
  80. static u8 *cache_l2_page_buf;
  81. static u8 *cache_l2_blk_buf;
  82. u8 *g_pBlockTable;
  83. u8 *g_pWearCounter;
  84. u16 *g_pReadCounter;
  85. u32 *g_pBTBlocks;
  86. static u16 g_wBlockTableOffset;
  87. static u32 g_wBlockTableIndex;
  88. static u8 g_cBlockTableStatus;
  89. static u8 *g_pTempBuf;
  90. static u8 *flag_check_blk_table;
  91. static u8 *tmp_buf_search_bt_in_block;
  92. static u8 *spare_buf_search_bt_in_block;
  93. static u8 *spare_buf_bt_search_bt_in_block;
  94. static u8 *tmp_buf1_read_blk_table;
  95. static u8 *tmp_buf2_read_blk_table;
  96. static u8 *flags_static_wear_leveling;
  97. static u8 *tmp_buf_write_blk_table_data;
  98. static u8 *tmp_buf_read_disturbance;
  99. u8 *buf_read_page_main_spare;
  100. u8 *buf_write_page_main_spare;
  101. u8 *buf_read_page_spare;
  102. u8 *buf_get_bad_block;
  103. #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
  104. struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
  105. struct flash_cache_tag cache_start_copy;
  106. #endif
  107. int g_wNumFreeBlocks;
  108. u8 g_SBDCmdIndex;
  109. static u8 *g_pIPF;
  110. static u8 bt_flag = FIRST_BT_ID;
  111. static u8 bt_block_changed;
  112. static u16 cache_block_to_write;
  113. static u8 last_erased = FIRST_BT_ID;
  114. static u8 GC_Called;
  115. static u8 BT_GC_Called;
  116. #if CMD_DMA
  117. #define COPY_BACK_BUF_NUM 10
  118. static u8 ftl_cmd_cnt; /* Init value is 0 */
  119. u8 *g_pBTDelta;
  120. u8 *g_pBTDelta_Free;
  121. u8 *g_pBTStartingCopy;
  122. u8 *g_pWearCounterCopy;
  123. u16 *g_pReadCounterCopy;
  124. u8 *g_pBlockTableCopies;
  125. u8 *g_pNextBlockTable;
  126. static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
  127. static int cp_back_buf_idx;
  128. static u8 *g_temp_buf;
  129. #pragma pack(push, 1)
  130. #pragma pack(1)
  131. struct BTableChangesDelta {
  132. u8 ftl_cmd_cnt;
  133. u8 ValidFields;
  134. u16 g_wBlockTableOffset;
  135. u32 g_wBlockTableIndex;
  136. u32 BT_Index;
  137. u32 BT_Entry_Value;
  138. u32 WC_Index;
  139. u8 WC_Entry_Value;
  140. u32 RC_Index;
  141. u16 RC_Entry_Value;
  142. };
  143. #pragma pack(pop)
  144. struct BTableChangesDelta *p_BTableChangesDelta;
  145. #endif
  146. #define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
  147. #define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
  148. #define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
  149. sizeof(u32))
  150. #define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
  151. sizeof(u8))
  152. #define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
  153. sizeof(u16))
  154. #if SUPPORT_LARGE_BLOCKNUM
  155. #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
  156. sizeof(u8) * 3)
  157. #else
  158. #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
  159. sizeof(u16))
  160. #endif
  161. #define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
  162. FTL_Get_WearCounter_Table_Mem_Size_Bytes
  163. #define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
  164. FTL_Get_ReadCounter_Table_Mem_Size_Bytes
  165. static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
  166. {
  167. u32 byte_num;
  168. if (DeviceInfo.MLCDevice) {
  169. byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
  170. DeviceInfo.wDataBlockNum * sizeof(u8) +
  171. DeviceInfo.wDataBlockNum * sizeof(u16);
  172. } else {
  173. byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
  174. DeviceInfo.wDataBlockNum * sizeof(u8);
  175. }
  176. byte_num += 4 * sizeof(u8);
  177. return byte_num;
  178. }
  179. static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
  180. {
  181. return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
  182. }
  183. static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
  184. u32 sizeTxed)
  185. {
  186. u32 wBytesCopied, blk_tbl_size, wBytes;
  187. u32 *pbt = (u32 *)g_pBlockTable;
  188. blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
  189. for (wBytes = 0;
  190. (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
  191. wBytes++) {
  192. #if SUPPORT_LARGE_BLOCKNUM
  193. flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
  194. >> (((wBytes + sizeTxed) % 3) ?
  195. ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
  196. #else
  197. flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
  198. >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
  199. #endif
  200. }
  201. sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
  202. blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
  203. wBytesCopied = wBytes;
  204. wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
  205. (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
  206. memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
  207. sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
  208. if (DeviceInfo.MLCDevice) {
  209. blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
  210. wBytesCopied += wBytes;
  211. for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
  212. ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
  213. flashBuf[wBytes + wBytesCopied] =
  214. (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
  215. (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
  216. }
  217. return wBytesCopied + wBytes;
  218. }
  219. static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
  220. u32 sizeToTx, u32 sizeTxed)
  221. {
  222. u32 wBytesCopied, blk_tbl_size, wBytes;
  223. u32 *pbt = (u32 *)g_pBlockTable;
  224. blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
  225. for (wBytes = 0; (wBytes < sizeToTx) &&
  226. ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
  227. #if SUPPORT_LARGE_BLOCKNUM
  228. if (!((wBytes + sizeTxed) % 3))
  229. pbt[(wBytes + sizeTxed) / 3] = 0;
  230. pbt[(wBytes + sizeTxed) / 3] |=
  231. (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
  232. ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
  233. #else
  234. if (!((wBytes + sizeTxed) % 2))
  235. pbt[(wBytes + sizeTxed) / 2] = 0;
  236. pbt[(wBytes + sizeTxed) / 2] |=
  237. (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
  238. 0 : 8));
  239. #endif
  240. }
  241. sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
  242. blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
  243. wBytesCopied = wBytes;
  244. wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
  245. (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
  246. memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
  247. sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
  248. if (DeviceInfo.MLCDevice) {
  249. wBytesCopied += wBytes;
  250. blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
  251. for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
  252. ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
  253. if (((wBytes + sizeTxed) % 2))
  254. g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
  255. g_pReadCounter[(wBytes + sizeTxed) / 2] |=
  256. (flashBuf[wBytes] <<
  257. (((wBytes + sizeTxed) % 2) ? 0 : 8));
  258. }
  259. }
  260. return wBytesCopied+wBytes;
  261. }
  262. static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
  263. {
  264. int i;
  265. for (i = 0; i < BTSIG_BYTES; i++)
  266. buf[BTSIG_OFFSET + i] =
  267. ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
  268. (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
  269. return PASS;
  270. }
  271. static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
  272. {
  273. static u8 tag[BTSIG_BYTES >> 1];
  274. int i, j, k, tagi, tagtemp, status;
  275. *tagarray = (u8 *)tag;
  276. tagi = 0;
  277. for (i = 0; i < (BTSIG_BYTES - 1); i++) {
  278. for (j = i + 1; (j < BTSIG_BYTES) &&
  279. (tagi < (BTSIG_BYTES >> 1)); j++) {
  280. tagtemp = buf[BTSIG_OFFSET + j] -
  281. buf[BTSIG_OFFSET + i];
  282. if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
  283. tagtemp = (buf[BTSIG_OFFSET + i] +
  284. (1 + LAST_BT_ID - FIRST_BT_ID) -
  285. (i * BTSIG_DELTA)) %
  286. (1 + LAST_BT_ID - FIRST_BT_ID);
  287. status = FAIL;
  288. for (k = 0; k < tagi; k++) {
  289. if (tagtemp == tag[k])
  290. status = PASS;
  291. }
  292. if (status == FAIL) {
  293. tag[tagi++] = tagtemp;
  294. i = (j == (i + 1)) ? i + 1 : i;
  295. j = (j == (i + 1)) ? i + 1 : i;
  296. }
  297. }
  298. }
  299. }
  300. return tagi;
  301. }
  302. static int FTL_Execute_SPL_Recovery(void)
  303. {
  304. u32 j, block, blks;
  305. u32 *pbt = (u32 *)g_pBlockTable;
  306. int ret;
  307. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  308. __FILE__, __LINE__, __func__);
  309. blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
  310. for (j = 0; j <= blks; j++) {
  311. block = (pbt[j]);
  312. if (((block & BAD_BLOCK) != BAD_BLOCK) &&
  313. ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
  314. ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
  315. if (FAIL == ret) {
  316. nand_dbg_print(NAND_DBG_WARN,
  317. "NAND Program fail in %s, Line %d, "
  318. "Function: %s, new Bad Block %d "
  319. "generated!\n",
  320. __FILE__, __LINE__, __func__,
  321. (int)(block & ~BAD_BLOCK));
  322. MARK_BLOCK_AS_BAD(pbt[j]);
  323. }
  324. }
  325. }
  326. return PASS;
  327. }
  328. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  329. * Function: GLOB_FTL_IdentifyDevice
  330. * Inputs: pointer to identify data structure
  331. * Outputs: PASS / FAIL
  332. * Description: the identify data structure is filled in with
  333. * information for the block driver.
  334. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  335. int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
  336. {
  337. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  338. __FILE__, __LINE__, __func__);
  339. dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
  340. dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
  341. dev_data->PageDataSize = DeviceInfo.wPageDataSize;
  342. dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
  343. dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
  344. return PASS;
  345. }
  346. /* ..... */
  347. static int allocate_memory(void)
  348. {
  349. u32 block_table_size, page_size, block_size, mem_size;
  350. u32 total_bytes = 0;
  351. int i;
  352. #if CMD_DMA
  353. int j;
  354. #endif
  355. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  356. __FILE__, __LINE__, __func__);
  357. page_size = DeviceInfo.wPageSize;
  358. block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
  359. block_table_size = DeviceInfo.wDataBlockNum *
  360. (sizeof(u32) + sizeof(u8) + sizeof(u16));
  361. block_table_size += (DeviceInfo.wPageDataSize -
  362. (block_table_size % DeviceInfo.wPageDataSize)) %
  363. DeviceInfo.wPageDataSize;
  364. /* Malloc memory for block tables */
  365. g_pBlockTable = kmalloc(block_table_size, GFP_ATOMIC);
  366. if (!g_pBlockTable)
  367. goto block_table_fail;
  368. memset(g_pBlockTable, 0, block_table_size);
  369. total_bytes += block_table_size;
  370. g_pWearCounter = (u8 *)(g_pBlockTable +
  371. DeviceInfo.wDataBlockNum * sizeof(u32));
  372. if (DeviceInfo.MLCDevice)
  373. g_pReadCounter = (u16 *)(g_pBlockTable +
  374. DeviceInfo.wDataBlockNum *
  375. (sizeof(u32) + sizeof(u8)));
  376. /* Malloc memory and init for cache items */
  377. for (i = 0; i < CACHE_ITEM_NUM; i++) {
  378. Cache.array[i].address = NAND_CACHE_INIT_ADDR;
  379. Cache.array[i].use_cnt = 0;
  380. Cache.array[i].changed = CLEAR;
  381. Cache.array[i].buf = kmalloc(Cache.cache_item_size,
  382. GFP_ATOMIC);
  383. if (!Cache.array[i].buf)
  384. goto cache_item_fail;
  385. memset(Cache.array[i].buf, 0, Cache.cache_item_size);
  386. total_bytes += Cache.cache_item_size;
  387. }
  388. /* Malloc memory for IPF */
  389. g_pIPF = kmalloc(page_size, GFP_ATOMIC);
  390. if (!g_pIPF)
  391. goto ipf_fail;
  392. memset(g_pIPF, 0, page_size);
  393. total_bytes += page_size;
  394. /* Malloc memory for data merging during Level2 Cache flush */
  395. cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
  396. if (!cache_l2_page_buf)
  397. goto cache_l2_page_buf_fail;
  398. memset(cache_l2_page_buf, 0xff, page_size);
  399. total_bytes += page_size;
  400. cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
  401. if (!cache_l2_blk_buf)
  402. goto cache_l2_blk_buf_fail;
  403. memset(cache_l2_blk_buf, 0xff, block_size);
  404. total_bytes += block_size;
  405. /* Malloc memory for temp buffer */
  406. g_pTempBuf = kmalloc(Cache.cache_item_size, GFP_ATOMIC);
  407. if (!g_pTempBuf)
  408. goto Temp_buf_fail;
  409. memset(g_pTempBuf, 0, Cache.cache_item_size);
  410. total_bytes += Cache.cache_item_size;
  411. /* Malloc memory for block table blocks */
  412. mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
  413. g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
  414. if (!g_pBTBlocks)
  415. goto bt_blocks_fail;
  416. memset(g_pBTBlocks, 0xff, mem_size);
  417. total_bytes += mem_size;
  418. /* Malloc memory for function FTL_Check_Block_Table */
  419. flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
  420. if (!flag_check_blk_table)
  421. goto flag_check_blk_table_fail;
  422. total_bytes += DeviceInfo.wDataBlockNum;
  423. /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
  424. tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
  425. if (!tmp_buf_search_bt_in_block)
  426. goto tmp_buf_search_bt_in_block_fail;
  427. memset(tmp_buf_search_bt_in_block, 0xff, page_size);
  428. total_bytes += page_size;
  429. mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
  430. spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
  431. if (!spare_buf_search_bt_in_block)
  432. goto spare_buf_search_bt_in_block_fail;
  433. memset(spare_buf_search_bt_in_block, 0xff, mem_size);
  434. total_bytes += mem_size;
  435. spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
  436. if (!spare_buf_bt_search_bt_in_block)
  437. goto spare_buf_bt_search_bt_in_block_fail;
  438. memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
  439. total_bytes += mem_size;
  440. /* Malloc memory for function FTL_Read_Block_Table */
  441. tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
  442. if (!tmp_buf1_read_blk_table)
  443. goto tmp_buf1_read_blk_table_fail;
  444. memset(tmp_buf1_read_blk_table, 0xff, page_size);
  445. total_bytes += page_size;
  446. tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
  447. if (!tmp_buf2_read_blk_table)
  448. goto tmp_buf2_read_blk_table_fail;
  449. memset(tmp_buf2_read_blk_table, 0xff, page_size);
  450. total_bytes += page_size;
  451. /* Malloc memory for function FTL_Static_Wear_Leveling */
  452. flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
  453. GFP_ATOMIC);
  454. if (!flags_static_wear_leveling)
  455. goto flags_static_wear_leveling_fail;
  456. total_bytes += DeviceInfo.wDataBlockNum;
  457. /* Malloc memory for function FTL_Write_Block_Table_Data */
  458. if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
  459. mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
  460. 2 * DeviceInfo.wPageSize;
  461. else
  462. mem_size = DeviceInfo.wPageSize;
  463. tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
  464. if (!tmp_buf_write_blk_table_data)
  465. goto tmp_buf_write_blk_table_data_fail;
  466. memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
  467. total_bytes += mem_size;
  468. /* Malloc memory for function FTL_Read_Disturbance */
  469. tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
  470. if (!tmp_buf_read_disturbance)
  471. goto tmp_buf_read_disturbance_fail;
  472. memset(tmp_buf_read_disturbance, 0xff, block_size);
  473. total_bytes += block_size;
  474. /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
  475. buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
  476. if (!buf_read_page_main_spare)
  477. goto buf_read_page_main_spare_fail;
  478. total_bytes += DeviceInfo.wPageSize;
  479. /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
  480. buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
  481. if (!buf_write_page_main_spare)
  482. goto buf_write_page_main_spare_fail;
  483. total_bytes += DeviceInfo.wPageSize;
  484. /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
  485. buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
  486. if (!buf_read_page_spare)
  487. goto buf_read_page_spare_fail;
  488. memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
  489. total_bytes += DeviceInfo.wPageSpareSize;
  490. /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
  491. buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
  492. if (!buf_get_bad_block)
  493. goto buf_get_bad_block_fail;
  494. memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
  495. total_bytes += DeviceInfo.wPageSpareSize;
  496. #if CMD_DMA
  497. g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
  498. if (!g_temp_buf)
  499. goto temp_buf_fail;
  500. memset(g_temp_buf, 0xff, block_size);
  501. total_bytes += block_size;
  502. /* Malloc memory for copy of block table used in CDMA mode */
  503. g_pBTStartingCopy = kmalloc(block_table_size, GFP_ATOMIC);
  504. if (!g_pBTStartingCopy)
  505. goto bt_starting_copy;
  506. memset(g_pBTStartingCopy, 0, block_table_size);
  507. total_bytes += block_table_size;
  508. g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
  509. DeviceInfo.wDataBlockNum * sizeof(u32));
  510. if (DeviceInfo.MLCDevice)
  511. g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
  512. DeviceInfo.wDataBlockNum *
  513. (sizeof(u32) + sizeof(u8)));
  514. /* Malloc memory for block table copies */
  515. mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
  516. 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
  517. if (DeviceInfo.MLCDevice)
  518. mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
  519. g_pBlockTableCopies = kmalloc(mem_size, GFP_ATOMIC);
  520. if (!g_pBlockTableCopies)
  521. goto blk_table_copies_fail;
  522. memset(g_pBlockTableCopies, 0, mem_size);
  523. total_bytes += mem_size;
  524. g_pNextBlockTable = g_pBlockTableCopies;
  525. /* Malloc memory for Block Table Delta */
  526. mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
  527. g_pBTDelta = kmalloc(mem_size, GFP_ATOMIC);
  528. if (!g_pBTDelta)
  529. goto bt_delta_fail;
  530. memset(g_pBTDelta, 0, mem_size);
  531. total_bytes += mem_size;
  532. g_pBTDelta_Free = g_pBTDelta;
  533. /* Malloc memory for Copy Back Buffers */
  534. for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
  535. cp_back_buf_copies[j] = kmalloc(block_size, GFP_ATOMIC);
  536. if (!cp_back_buf_copies[j])
  537. goto cp_back_buf_copies_fail;
  538. memset(cp_back_buf_copies[j], 0, block_size);
  539. total_bytes += block_size;
  540. }
  541. cp_back_buf_idx = 0;
  542. /* Malloc memory for pending commands list */
  543. mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
  544. info.pcmds = kzalloc(mem_size, GFP_KERNEL);
  545. if (!info.pcmds)
  546. goto pending_cmds_buf_fail;
  547. total_bytes += mem_size;
  548. /* Malloc memory for CDMA descripter table */
  549. mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
  550. info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
  551. if (!info.cdma_desc_buf)
  552. goto cdma_desc_buf_fail;
  553. total_bytes += mem_size;
  554. /* Malloc memory for Memcpy descripter table */
  555. mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
  556. info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
  557. if (!info.memcp_desc_buf)
  558. goto memcp_desc_buf_fail;
  559. total_bytes += mem_size;
  560. #endif
  561. nand_dbg_print(NAND_DBG_WARN,
  562. "Total memory allocated in FTL layer: %d\n", total_bytes);
  563. return PASS;
  564. #if CMD_DMA
  565. memcp_desc_buf_fail:
  566. kfree(info.cdma_desc_buf);
  567. cdma_desc_buf_fail:
  568. kfree(info.pcmds);
  569. pending_cmds_buf_fail:
  570. cp_back_buf_copies_fail:
  571. j--;
  572. for (; j >= 0; j--)
  573. kfree(cp_back_buf_copies[j]);
  574. kfree(g_pBTDelta);
  575. bt_delta_fail:
  576. kfree(g_pBlockTableCopies);
  577. blk_table_copies_fail:
  578. kfree(g_pBTStartingCopy);
  579. bt_starting_copy:
  580. kfree(g_temp_buf);
  581. temp_buf_fail:
  582. kfree(buf_get_bad_block);
  583. #endif
  584. buf_get_bad_block_fail:
  585. kfree(buf_read_page_spare);
  586. buf_read_page_spare_fail:
  587. kfree(buf_write_page_main_spare);
  588. buf_write_page_main_spare_fail:
  589. kfree(buf_read_page_main_spare);
  590. buf_read_page_main_spare_fail:
  591. kfree(tmp_buf_read_disturbance);
  592. tmp_buf_read_disturbance_fail:
  593. kfree(tmp_buf_write_blk_table_data);
  594. tmp_buf_write_blk_table_data_fail:
  595. kfree(flags_static_wear_leveling);
  596. flags_static_wear_leveling_fail:
  597. kfree(tmp_buf2_read_blk_table);
  598. tmp_buf2_read_blk_table_fail:
  599. kfree(tmp_buf1_read_blk_table);
  600. tmp_buf1_read_blk_table_fail:
  601. kfree(spare_buf_bt_search_bt_in_block);
  602. spare_buf_bt_search_bt_in_block_fail:
  603. kfree(spare_buf_search_bt_in_block);
  604. spare_buf_search_bt_in_block_fail:
  605. kfree(tmp_buf_search_bt_in_block);
  606. tmp_buf_search_bt_in_block_fail:
  607. kfree(flag_check_blk_table);
  608. flag_check_blk_table_fail:
  609. kfree(g_pBTBlocks);
  610. bt_blocks_fail:
  611. kfree(g_pTempBuf);
  612. Temp_buf_fail:
  613. kfree(cache_l2_blk_buf);
  614. cache_l2_blk_buf_fail:
  615. kfree(cache_l2_page_buf);
  616. cache_l2_page_buf_fail:
  617. kfree(g_pIPF);
  618. ipf_fail:
  619. cache_item_fail:
  620. i--;
  621. for (; i >= 0; i--)
  622. kfree(Cache.array[i].buf);
  623. kfree(g_pBlockTable);
  624. block_table_fail:
  625. printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
  626. __FILE__, __LINE__);
  627. return -ENOMEM;
  628. }
  629. /* .... */
  630. static int free_memory(void)
  631. {
  632. int i;
  633. #if CMD_DMA
  634. kfree(info.memcp_desc_buf);
  635. kfree(info.cdma_desc_buf);
  636. kfree(info.pcmds);
  637. for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
  638. kfree(cp_back_buf_copies[i]);
  639. kfree(g_pBTDelta);
  640. kfree(g_pBlockTableCopies);
  641. kfree(g_pBTStartingCopy);
  642. kfree(g_temp_buf);
  643. kfree(buf_get_bad_block);
  644. #endif
  645. kfree(buf_read_page_spare);
  646. kfree(buf_write_page_main_spare);
  647. kfree(buf_read_page_main_spare);
  648. kfree(tmp_buf_read_disturbance);
  649. kfree(tmp_buf_write_blk_table_data);
  650. kfree(flags_static_wear_leveling);
  651. kfree(tmp_buf2_read_blk_table);
  652. kfree(tmp_buf1_read_blk_table);
  653. kfree(spare_buf_bt_search_bt_in_block);
  654. kfree(spare_buf_search_bt_in_block);
  655. kfree(tmp_buf_search_bt_in_block);
  656. kfree(flag_check_blk_table);
  657. kfree(g_pBTBlocks);
  658. kfree(g_pTempBuf);
  659. kfree(g_pIPF);
  660. for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
  661. kfree(Cache.array[i].buf);
  662. kfree(g_pBlockTable);
  663. return 0;
  664. }
  665. static void dump_cache_l2_table(void)
  666. {
  667. struct list_head *p;
  668. struct spectra_l2_cache_list *pnd;
  669. int n, i;
  670. n = 0;
  671. list_for_each(p, &cache_l2.table.list) {
  672. pnd = list_entry(p, struct spectra_l2_cache_list, list);
  673. nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
  674. /*
  675. for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
  676. if (pnd->pages_array[i] != MAX_U32_VALUE)
  677. nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
  678. }
  679. */
  680. n++;
  681. }
  682. }
  683. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  684. * Function: GLOB_FTL_Init
  685. * Inputs: none
  686. * Outputs: PASS=0 / FAIL=1
  687. * Description: allocates the memory for cache array,
  688. * important data structures
  689. * clears the cache array
  690. * reads the block table from flash into array
  691. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  692. int GLOB_FTL_Init(void)
  693. {
  694. int i;
  695. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  696. __FILE__, __LINE__, __func__);
  697. Cache.pages_per_item = 1;
  698. Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
  699. if (allocate_memory() != PASS)
  700. return FAIL;
  701. #if CMD_DMA
  702. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  703. memcpy((void *)&cache_start_copy, (void *)&Cache,
  704. sizeof(struct flash_cache_tag));
  705. memset((void *)&int_cache, -1,
  706. sizeof(struct flash_cache_delta_list_tag) *
  707. (MAX_CHANS + MAX_DESCS));
  708. #endif
  709. ftl_cmd_cnt = 0;
  710. #endif
  711. if (FTL_Read_Block_Table() != PASS)
  712. return FAIL;
  713. /* Init the Level2 Cache data structure */
  714. for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
  715. cache_l2.blk_array[i] = MAX_U32_VALUE;
  716. cache_l2.cur_blk_idx = 0;
  717. cache_l2.cur_page_num = 0;
  718. INIT_LIST_HEAD(&cache_l2.table.list);
  719. cache_l2.table.logical_blk_num = MAX_U32_VALUE;
  720. dump_cache_l2_table();
  721. return 0;
  722. }
  723. #if CMD_DMA
  724. #if 0
  725. static void save_blk_table_changes(u16 idx)
  726. {
  727. u8 ftl_cmd;
  728. u32 *pbt = (u32 *)g_pBTStartingCopy;
  729. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  730. u16 id;
  731. u8 cache_blks;
  732. id = idx - MAX_CHANS;
  733. if (int_cache[id].item != -1) {
  734. cache_blks = int_cache[id].item;
  735. cache_start_copy.array[cache_blks].address =
  736. int_cache[id].cache.address;
  737. cache_start_copy.array[cache_blks].changed =
  738. int_cache[id].cache.changed;
  739. }
  740. #endif
  741. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  742. while (ftl_cmd <= PendingCMD[idx].Tag) {
  743. if (p_BTableChangesDelta->ValidFields == 0x01) {
  744. g_wBlockTableOffset =
  745. p_BTableChangesDelta->g_wBlockTableOffset;
  746. } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
  747. pbt[p_BTableChangesDelta->BT_Index] =
  748. p_BTableChangesDelta->BT_Entry_Value;
  749. debug_boundary_error(((
  750. p_BTableChangesDelta->BT_Index)),
  751. DeviceInfo.wDataBlockNum, 0);
  752. } else if (p_BTableChangesDelta->ValidFields == 0x03) {
  753. g_wBlockTableOffset =
  754. p_BTableChangesDelta->g_wBlockTableOffset;
  755. g_wBlockTableIndex =
  756. p_BTableChangesDelta->g_wBlockTableIndex;
  757. } else if (p_BTableChangesDelta->ValidFields == 0x30) {
  758. g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
  759. p_BTableChangesDelta->WC_Entry_Value;
  760. } else if ((DeviceInfo.MLCDevice) &&
  761. (p_BTableChangesDelta->ValidFields == 0xC0)) {
  762. g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
  763. p_BTableChangesDelta->RC_Entry_Value;
  764. nand_dbg_print(NAND_DBG_DEBUG,
  765. "In event status setting read counter "
  766. "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
  767. ftl_cmd,
  768. p_BTableChangesDelta->RC_Entry_Value,
  769. (unsigned int)p_BTableChangesDelta->RC_Index);
  770. } else {
  771. nand_dbg_print(NAND_DBG_DEBUG,
  772. "This should never occur \n");
  773. }
  774. p_BTableChangesDelta += 1;
  775. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  776. }
  777. }
  778. static void discard_cmds(u16 n)
  779. {
  780. u32 *pbt = (u32 *)g_pBTStartingCopy;
  781. u8 ftl_cmd;
  782. unsigned long k;
  783. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  784. u8 cache_blks;
  785. u16 id;
  786. #endif
  787. if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
  788. (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
  789. for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
  790. if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
  791. MARK_BLK_AS_DISCARD(pbt[k]);
  792. }
  793. }
  794. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  795. while (ftl_cmd <= PendingCMD[n].Tag) {
  796. p_BTableChangesDelta += 1;
  797. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  798. }
  799. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  800. id = n - MAX_CHANS;
  801. if (int_cache[id].item != -1) {
  802. cache_blks = int_cache[id].item;
  803. if (PendingCMD[n].CMD == MEMCOPY_CMD) {
  804. if ((cache_start_copy.array[cache_blks].buf <=
  805. PendingCMD[n].DataDestAddr) &&
  806. ((cache_start_copy.array[cache_blks].buf +
  807. Cache.cache_item_size) >
  808. PendingCMD[n].DataDestAddr)) {
  809. cache_start_copy.array[cache_blks].address =
  810. NAND_CACHE_INIT_ADDR;
  811. cache_start_copy.array[cache_blks].use_cnt =
  812. 0;
  813. cache_start_copy.array[cache_blks].changed =
  814. CLEAR;
  815. }
  816. } else {
  817. cache_start_copy.array[cache_blks].address =
  818. int_cache[id].cache.address;
  819. cache_start_copy.array[cache_blks].changed =
  820. int_cache[id].cache.changed;
  821. }
  822. }
  823. #endif
  824. }
  825. static void process_cmd_pass(int *first_failed_cmd, u16 idx)
  826. {
  827. if (0 == *first_failed_cmd)
  828. save_blk_table_changes(idx);
  829. else
  830. discard_cmds(idx);
  831. }
  832. static void process_cmd_fail_abort(int *first_failed_cmd,
  833. u16 idx, int event)
  834. {
  835. u32 *pbt = (u32 *)g_pBTStartingCopy;
  836. u8 ftl_cmd;
  837. unsigned long i;
  838. int erase_fail, program_fail;
  839. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  840. u8 cache_blks;
  841. u16 id;
  842. #endif
  843. if (0 == *first_failed_cmd)
  844. *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
  845. nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occured "
  846. "while executing %u Command %u accesing Block %u\n",
  847. (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
  848. PendingCMD[idx].CMD,
  849. (unsigned int)PendingCMD[idx].Block);
  850. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  851. while (ftl_cmd <= PendingCMD[idx].Tag) {
  852. p_BTableChangesDelta += 1;
  853. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  854. }
  855. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  856. id = idx - MAX_CHANS;
  857. if (int_cache[id].item != -1) {
  858. cache_blks = int_cache[id].item;
  859. if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
  860. cache_start_copy.array[cache_blks].address =
  861. int_cache[id].cache.address;
  862. cache_start_copy.array[cache_blks].changed = SET;
  863. } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
  864. cache_start_copy.array[cache_blks].address =
  865. NAND_CACHE_INIT_ADDR;
  866. cache_start_copy.array[cache_blks].use_cnt = 0;
  867. cache_start_copy.array[cache_blks].changed =
  868. CLEAR;
  869. } else if (PendingCMD[idx].CMD == ERASE_CMD) {
  870. /* ? */
  871. } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
  872. /* ? */
  873. }
  874. }
  875. #endif
  876. erase_fail = (event == EVENT_ERASE_FAILURE) &&
  877. (PendingCMD[idx].CMD == ERASE_CMD);
  878. program_fail = (event == EVENT_PROGRAM_FAILURE) &&
  879. ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
  880. (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
  881. if (erase_fail || program_fail) {
  882. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  883. if (PendingCMD[idx].Block ==
  884. (pbt[i] & (~BAD_BLOCK)))
  885. MARK_BLOCK_AS_BAD(pbt[i]);
  886. }
  887. }
  888. }
  889. static void process_cmd(int *first_failed_cmd, u16 idx, int event)
  890. {
  891. u8 ftl_cmd;
  892. int cmd_match = 0;
  893. if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
  894. cmd_match = 1;
  895. if (PendingCMD[idx].Status == CMD_PASS) {
  896. process_cmd_pass(first_failed_cmd, idx);
  897. } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
  898. (PendingCMD[idx].Status == CMD_ABORT)) {
  899. process_cmd_fail_abort(first_failed_cmd, idx, event);
  900. } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
  901. PendingCMD[idx].Tag) {
  902. nand_dbg_print(NAND_DBG_DEBUG,
  903. " Command no. %hu is not executed\n",
  904. (unsigned int)PendingCMD[idx].Tag);
  905. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  906. while (ftl_cmd <= PendingCMD[idx].Tag) {
  907. p_BTableChangesDelta += 1;
  908. ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
  909. }
  910. }
  911. }
  912. #endif
  913. static void process_cmd(int *first_failed_cmd, u16 idx, int event)
  914. {
  915. printk(KERN_ERR "temporary workaround function. "
  916. "Should not be called! \n");
  917. }
  918. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  919. * Function: GLOB_FTL_Event_Status
  920. * Inputs: none
  921. * Outputs: Event Code
  922. * Description: It is called by SBD after hardware interrupt signalling
  923. * completion of commands chain
  924. * It does following things
  925. * get event status from LLD
  926. * analyze command chain status
  927. * determine last command executed
  928. * analyze results
  929. * rebuild the block table in case of uncorrectable error
  930. * return event code
  931. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  932. int GLOB_FTL_Event_Status(int *first_failed_cmd)
  933. {
  934. int event_code = PASS;
  935. u16 i_P;
  936. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  937. __FILE__, __LINE__, __func__);
  938. *first_failed_cmd = 0;
  939. event_code = GLOB_LLD_Event_Status();
  940. switch (event_code) {
  941. case EVENT_PASS:
  942. nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
  943. break;
  944. case EVENT_UNCORRECTABLE_DATA_ERROR:
  945. nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
  946. break;
  947. case EVENT_PROGRAM_FAILURE:
  948. case EVENT_ERASE_FAILURE:
  949. nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
  950. "Event code: 0x%x\n", event_code);
  951. p_BTableChangesDelta =
  952. (struct BTableChangesDelta *)g_pBTDelta;
  953. for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
  954. i_P++)
  955. process_cmd(first_failed_cmd, i_P, event_code);
  956. memcpy(g_pBlockTable, g_pBTStartingCopy,
  957. DeviceInfo.wDataBlockNum * sizeof(u32));
  958. memcpy(g_pWearCounter, g_pWearCounterCopy,
  959. DeviceInfo.wDataBlockNum * sizeof(u8));
  960. if (DeviceInfo.MLCDevice)
  961. memcpy(g_pReadCounter, g_pReadCounterCopy,
  962. DeviceInfo.wDataBlockNum * sizeof(u16));
  963. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  964. memcpy((void *)&Cache, (void *)&cache_start_copy,
  965. sizeof(struct flash_cache_tag));
  966. memset((void *)&int_cache, -1,
  967. sizeof(struct flash_cache_delta_list_tag) *
  968. (MAX_DESCS + MAX_CHANS));
  969. #endif
  970. break;
  971. default:
  972. nand_dbg_print(NAND_DBG_WARN,
  973. "Handling unexpected event code - 0x%x\n",
  974. event_code);
  975. event_code = ERR;
  976. break;
  977. }
  978. memcpy(g_pBTStartingCopy, g_pBlockTable,
  979. DeviceInfo.wDataBlockNum * sizeof(u32));
  980. memcpy(g_pWearCounterCopy, g_pWearCounter,
  981. DeviceInfo.wDataBlockNum * sizeof(u8));
  982. if (DeviceInfo.MLCDevice)
  983. memcpy(g_pReadCounterCopy, g_pReadCounter,
  984. DeviceInfo.wDataBlockNum * sizeof(u16));
  985. g_pBTDelta_Free = g_pBTDelta;
  986. ftl_cmd_cnt = 0;
  987. g_pNextBlockTable = g_pBlockTableCopies;
  988. cp_back_buf_idx = 0;
  989. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  990. memcpy((void *)&cache_start_copy, (void *)&Cache,
  991. sizeof(struct flash_cache_tag));
  992. memset((void *)&int_cache, -1,
  993. sizeof(struct flash_cache_delta_list_tag) *
  994. (MAX_DESCS + MAX_CHANS));
  995. #endif
  996. return event_code;
  997. }
  998. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  999. * Function: glob_ftl_execute_cmds
  1000. * Inputs: none
  1001. * Outputs: none
  1002. * Description: pass thru to LLD
  1003. ***************************************************************/
  1004. u16 glob_ftl_execute_cmds(void)
  1005. {
  1006. nand_dbg_print(NAND_DBG_TRACE,
  1007. "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
  1008. (unsigned int)ftl_cmd_cnt);
  1009. g_SBDCmdIndex = 0;
  1010. return glob_lld_execute_cmds();
  1011. }
  1012. #endif
  1013. #if !CMD_DMA
  1014. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1015. * Function: GLOB_FTL_Read Immediate
  1016. * Inputs: pointer to data
  1017. * address of data
  1018. * Outputs: PASS / FAIL
  1019. * Description: Reads one page of data into RAM directly from flash without
  1020. * using or disturbing cache.It is assumed this function is called
  1021. * with CMD-DMA disabled.
  1022. *****************************************************************/
  1023. int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
  1024. {
  1025. int wResult = FAIL;
  1026. u32 Block;
  1027. u16 Page;
  1028. u32 phy_blk;
  1029. u32 *pbt = (u32 *)g_pBlockTable;
  1030. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1031. __FILE__, __LINE__, __func__);
  1032. Block = BLK_FROM_ADDR(addr);
  1033. Page = PAGE_FROM_ADDR(addr, Block);
  1034. if (!IS_SPARE_BLOCK(Block))
  1035. return FAIL;
  1036. phy_blk = pbt[Block];
  1037. wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
  1038. if (DeviceInfo.MLCDevice) {
  1039. g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
  1040. if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
  1041. >= MAX_READ_COUNTER)
  1042. FTL_Read_Disturbance(phy_blk);
  1043. if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
  1044. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  1045. FTL_Write_IN_Progress_Block_Table_Page();
  1046. }
  1047. }
  1048. return wResult;
  1049. }
  1050. #endif
  1051. #ifdef SUPPORT_BIG_ENDIAN
  1052. /*********************************************************************
  1053. * Function: FTL_Invert_Block_Table
  1054. * Inputs: none
  1055. * Outputs: none
  1056. * Description: Re-format the block table in ram based on BIG_ENDIAN and
  1057. * LARGE_BLOCKNUM if necessary
  1058. **********************************************************************/
  1059. static void FTL_Invert_Block_Table(void)
  1060. {
  1061. u32 i;
  1062. u32 *pbt = (u32 *)g_pBlockTable;
  1063. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1064. __FILE__, __LINE__, __func__);
  1065. #ifdef SUPPORT_LARGE_BLOCKNUM
  1066. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  1067. pbt[i] = INVERTUINT32(pbt[i]);
  1068. g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
  1069. }
  1070. #else
  1071. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  1072. pbt[i] = INVERTUINT16(pbt[i]);
  1073. g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
  1074. }
  1075. #endif
  1076. }
  1077. #endif
  1078. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1079. * Function: GLOB_FTL_Flash_Init
  1080. * Inputs: none
  1081. * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
  1082. * Description: The flash controller is initialized
  1083. * The flash device is reset
  1084. * Perform a flash READ ID command to confirm that a
  1085. * valid device is attached and active.
  1086. * The DeviceInfo structure gets filled in
  1087. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1088. int GLOB_FTL_Flash_Init(void)
  1089. {
  1090. int status = FAIL;
  1091. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1092. __FILE__, __LINE__, __func__);
  1093. g_SBDCmdIndex = 0;
  1094. GLOB_LLD_Flash_Init();
  1095. status = GLOB_LLD_Read_Device_ID();
  1096. return status;
  1097. }
  1098. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1099. * Inputs: none
  1100. * Outputs: PASS=0 / FAIL=0x01 (based on read ID)
  1101. * Description: The flash controller is released
  1102. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1103. int GLOB_FTL_Flash_Release(void)
  1104. {
  1105. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1106. __FILE__, __LINE__, __func__);
  1107. return GLOB_LLD_Flash_Release();
  1108. }
  1109. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1110. * Function: GLOB_FTL_Cache_Release
  1111. * Inputs: none
  1112. * Outputs: none
  1113. * Description: release all allocated memory in GLOB_FTL_Init
  1114. * (allocated in GLOB_FTL_Init)
  1115. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1116. void GLOB_FTL_Cache_Release(void)
  1117. {
  1118. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1119. __FILE__, __LINE__, __func__);
  1120. free_memory();
  1121. }
  1122. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1123. * Function: FTL_Cache_If_Hit
  1124. * Inputs: Page Address
  1125. * Outputs: Block number/UNHIT BLOCK
  1126. * Description: Determines if the addressed page is in cache
  1127. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1128. static u16 FTL_Cache_If_Hit(u64 page_addr)
  1129. {
  1130. u16 item;
  1131. u64 addr;
  1132. int i;
  1133. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1134. __FILE__, __LINE__, __func__);
  1135. item = UNHIT_CACHE_ITEM;
  1136. for (i = 0; i < CACHE_ITEM_NUM; i++) {
  1137. addr = Cache.array[i].address;
  1138. if ((page_addr >= addr) &&
  1139. (page_addr < (addr + Cache.cache_item_size))) {
  1140. item = i;
  1141. break;
  1142. }
  1143. }
  1144. return item;
  1145. }
  1146. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1147. * Function: FTL_Calculate_LRU
  1148. * Inputs: None
  1149. * Outputs: None
  1150. * Description: Calculate the least recently block in a cache and record its
  1151. * index in LRU field.
  1152. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1153. static void FTL_Calculate_LRU(void)
  1154. {
  1155. u16 i, bCurrentLRU, bTempCount;
  1156. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1157. __FILE__, __LINE__, __func__);
  1158. bCurrentLRU = 0;
  1159. bTempCount = MAX_WORD_VALUE;
  1160. for (i = 0; i < CACHE_ITEM_NUM; i++) {
  1161. if (Cache.array[i].use_cnt < bTempCount) {
  1162. bCurrentLRU = i;
  1163. bTempCount = Cache.array[i].use_cnt;
  1164. }
  1165. }
  1166. Cache.LRU = bCurrentLRU;
  1167. }
  1168. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1169. * Function: FTL_Cache_Read_Page
  1170. * Inputs: pointer to read buffer, logical address and cache item number
  1171. * Outputs: None
  1172. * Description: Read the page from the cached block addressed by blocknumber
  1173. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1174. static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
  1175. {
  1176. u8 *start_addr;
  1177. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1178. __FILE__, __LINE__, __func__);
  1179. start_addr = Cache.array[cache_item].buf;
  1180. start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
  1181. DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
  1182. #if CMD_DMA
  1183. GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
  1184. DeviceInfo.wPageDataSize, 0);
  1185. ftl_cmd_cnt++;
  1186. #else
  1187. memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
  1188. #endif
  1189. if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
  1190. Cache.array[cache_item].use_cnt++;
  1191. }
  1192. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1193. * Function: FTL_Cache_Read_All
  1194. * Inputs: pointer to read buffer,block address
  1195. * Outputs: PASS=0 / FAIL =1
  1196. * Description: It reads pages in cache
  1197. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1198. static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
  1199. {
  1200. int wResult = PASS;
  1201. u32 Block;
  1202. u32 lba;
  1203. u16 Page;
  1204. u16 PageCount;
  1205. u32 *pbt = (u32 *)g_pBlockTable;
  1206. u32 i;
  1207. Block = BLK_FROM_ADDR(phy_addr);
  1208. Page = PAGE_FROM_ADDR(phy_addr, Block);
  1209. PageCount = Cache.pages_per_item;
  1210. nand_dbg_print(NAND_DBG_DEBUG,
  1211. "%s, Line %d, Function: %s, Block: 0x%x\n",
  1212. __FILE__, __LINE__, __func__, Block);
  1213. lba = 0xffffffff;
  1214. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  1215. if ((pbt[i] & (~BAD_BLOCK)) == Block) {
  1216. lba = i;
  1217. if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
  1218. IS_DISCARDED_BLOCK(i)) {
  1219. /* Add by yunpeng -2008.12.3 */
  1220. #if CMD_DMA
  1221. GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
  1222. PageCount * DeviceInfo.wPageDataSize, 0);
  1223. ftl_cmd_cnt++;
  1224. #else
  1225. memset(pData, 0xFF,
  1226. PageCount * DeviceInfo.wPageDataSize);
  1227. #endif
  1228. return wResult;
  1229. } else {
  1230. continue; /* break ?? */
  1231. }
  1232. }
  1233. }
  1234. if (0xffffffff == lba)
  1235. printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
  1236. #if CMD_DMA
  1237. wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
  1238. PageCount, LLD_CMD_FLAG_MODE_CDMA);
  1239. if (DeviceInfo.MLCDevice) {
  1240. g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
  1241. nand_dbg_print(NAND_DBG_DEBUG,
  1242. "Read Counter modified in ftl_cmd_cnt %u"
  1243. " Block %u Counter%u\n",
  1244. ftl_cmd_cnt, (unsigned int)Block,
  1245. g_pReadCounter[Block -
  1246. DeviceInfo.wSpectraStartBlock]);
  1247. p_BTableChangesDelta =
  1248. (struct BTableChangesDelta *)g_pBTDelta_Free;
  1249. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  1250. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  1251. p_BTableChangesDelta->RC_Index =
  1252. Block - DeviceInfo.wSpectraStartBlock;
  1253. p_BTableChangesDelta->RC_Entry_Value =
  1254. g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
  1255. p_BTableChangesDelta->ValidFields = 0xC0;
  1256. ftl_cmd_cnt++;
  1257. if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
  1258. MAX_READ_COUNTER)
  1259. FTL_Read_Disturbance(Block);
  1260. if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
  1261. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  1262. FTL_Write_IN_Progress_Block_Table_Page();
  1263. }
  1264. } else {
  1265. ftl_cmd_cnt++;
  1266. }
  1267. #else
  1268. wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
  1269. if (wResult == FAIL)
  1270. return wResult;
  1271. if (DeviceInfo.MLCDevice) {
  1272. g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
  1273. if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
  1274. MAX_READ_COUNTER)
  1275. FTL_Read_Disturbance(Block);
  1276. if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
  1277. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  1278. FTL_Write_IN_Progress_Block_Table_Page();
  1279. }
  1280. }
  1281. #endif
  1282. return wResult;
  1283. }
  1284. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1285. * Function: FTL_Cache_Write_All
  1286. * Inputs: pointer to cache in sys memory
  1287. * address of free block in flash
  1288. * Outputs: PASS=0 / FAIL=1
  1289. * Description: writes all the pages of the block in cache to flash
  1290. *
  1291. * NOTE:need to make sure this works ok when cache is limited
  1292. * to a partial block. This is where copy-back would be
  1293. * activated. This would require knowing which pages in the
  1294. * cached block are clean/dirty.Right now we only know if
  1295. * the whole block is clean/dirty.
  1296. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1297. static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
  1298. {
  1299. u16 wResult = PASS;
  1300. u32 Block;
  1301. u16 Page;
  1302. u16 PageCount;
  1303. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1304. __FILE__, __LINE__, __func__);
  1305. nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
  1306. "on %d\n", cache_block_to_write,
  1307. (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
  1308. Block = BLK_FROM_ADDR(blk_addr);
  1309. Page = PAGE_FROM_ADDR(blk_addr, Block);
  1310. PageCount = Cache.pages_per_item;
  1311. #if CMD_DMA
  1312. if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
  1313. Block, Page, PageCount)) {
  1314. nand_dbg_print(NAND_DBG_WARN,
  1315. "NAND Program fail in %s, Line %d, "
  1316. "Function: %s, new Bad Block %d generated! "
  1317. "Need Bad Block replacing.\n",
  1318. __FILE__, __LINE__, __func__, Block);
  1319. wResult = FAIL;
  1320. }
  1321. ftl_cmd_cnt++;
  1322. #else
  1323. if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
  1324. nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
  1325. " Line %d, Function %s, new Bad Block %d generated!"
  1326. "Need Bad Block replacing.\n",
  1327. __FILE__, __LINE__, __func__, Block);
  1328. wResult = FAIL;
  1329. }
  1330. #endif
  1331. return wResult;
  1332. }
  1333. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1334. * Function: FTL_Cache_Update_Block
  1335. * Inputs: pointer to buffer,page address,block address
  1336. * Outputs: PASS=0 / FAIL=1
  1337. * Description: It updates the cache
  1338. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1339. static int FTL_Cache_Update_Block(u8 *pData,
  1340. u64 old_page_addr, u64 blk_addr)
  1341. {
  1342. int i, j;
  1343. u8 *buf = pData;
  1344. int wResult = PASS;
  1345. int wFoundInCache;
  1346. u64 page_addr;
  1347. u64 addr;
  1348. u64 old_blk_addr;
  1349. u16 page_offset;
  1350. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1351. __FILE__, __LINE__, __func__);
  1352. old_blk_addr = (u64)(old_page_addr >>
  1353. DeviceInfo.nBitsInBlockDataSize) * DeviceInfo.wBlockDataSize;
  1354. page_offset = (u16)(GLOB_u64_Remainder(old_page_addr, 2) >>
  1355. DeviceInfo.nBitsInPageDataSize);
  1356. for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
  1357. page_addr = old_blk_addr + i * DeviceInfo.wPageDataSize;
  1358. if (i != page_offset) {
  1359. wFoundInCache = FAIL;
  1360. for (j = 0; j < CACHE_ITEM_NUM; j++) {
  1361. addr = Cache.array[j].address;
  1362. addr = FTL_Get_Physical_Block_Addr(addr) +
  1363. GLOB_u64_Remainder(addr, 2);
  1364. if ((addr >= page_addr) && addr <
  1365. (page_addr + Cache.cache_item_size)) {
  1366. wFoundInCache = PASS;
  1367. buf = Cache.array[j].buf;
  1368. Cache.array[j].changed = SET;
  1369. #if CMD_DMA
  1370. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  1371. int_cache[ftl_cmd_cnt].item = j;
  1372. int_cache[ftl_cmd_cnt].cache.address =
  1373. Cache.array[j].address;
  1374. int_cache[ftl_cmd_cnt].cache.changed =
  1375. Cache.array[j].changed;
  1376. #endif
  1377. #endif
  1378. break;
  1379. }
  1380. }
  1381. if (FAIL == wFoundInCache) {
  1382. if (ERR == FTL_Cache_Read_All(g_pTempBuf,
  1383. page_addr)) {
  1384. wResult = FAIL;
  1385. break;
  1386. }
  1387. buf = g_pTempBuf;
  1388. }
  1389. } else {
  1390. buf = pData;
  1391. }
  1392. if (FAIL == FTL_Cache_Write_All(buf,
  1393. blk_addr + (page_addr - old_blk_addr))) {
  1394. wResult = FAIL;
  1395. break;
  1396. }
  1397. }
  1398. return wResult;
  1399. }
  1400. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1401. * Function: FTL_Copy_Block
  1402. * Inputs: source block address
  1403. * Destination block address
  1404. * Outputs: PASS=0 / FAIL=1
  1405. * Description: used only for static wear leveling to move the block
  1406. * containing static data to new blocks(more worn)
  1407. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1408. int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
  1409. {
  1410. int i, r1, r2, wResult = PASS;
  1411. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1412. __FILE__, __LINE__, __func__);
  1413. for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
  1414. r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
  1415. i * DeviceInfo.wPageDataSize);
  1416. r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
  1417. i * DeviceInfo.wPageDataSize);
  1418. if ((ERR == r1) || (FAIL == r2)) {
  1419. wResult = FAIL;
  1420. break;
  1421. }
  1422. }
  1423. return wResult;
  1424. }
  1425. /* Search the block table to find out the least wear block and then return it */
  1426. static u32 find_least_worn_blk_for_l2_cache(void)
  1427. {
  1428. int i;
  1429. u32 *pbt = (u32 *)g_pBlockTable;
  1430. u8 least_wear_cnt = MAX_BYTE_VALUE;
  1431. u32 least_wear_blk_idx = MAX_U32_VALUE;
  1432. u32 phy_idx;
  1433. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  1434. if (IS_SPARE_BLOCK(i)) {
  1435. phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
  1436. if (phy_idx > DeviceInfo.wSpectraEndBlock)
  1437. printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
  1438. "Too big phy block num (%d)\n", phy_idx);
  1439. if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
  1440. least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
  1441. least_wear_blk_idx = i;
  1442. }
  1443. }
  1444. }
  1445. nand_dbg_print(NAND_DBG_WARN,
  1446. "find_least_worn_blk_for_l2_cache: "
  1447. "find block %d with least worn counter (%d)\n",
  1448. least_wear_blk_idx, least_wear_cnt);
  1449. return least_wear_blk_idx;
  1450. }
  1451. /* Get blocks for Level2 Cache */
  1452. static int get_l2_cache_blks(void)
  1453. {
  1454. int n;
  1455. u32 blk;
  1456. u32 *pbt = (u32 *)g_pBlockTable;
  1457. for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
  1458. blk = find_least_worn_blk_for_l2_cache();
  1459. if (blk > DeviceInfo.wDataBlockNum) {
  1460. nand_dbg_print(NAND_DBG_WARN,
  1461. "find_least_worn_blk_for_l2_cache: "
  1462. "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
  1463. return FAIL;
  1464. }
  1465. /* Tag the free block as discard in block table */
  1466. pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
  1467. /* Add the free block to the L2 Cache block array */
  1468. cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
  1469. }
  1470. return PASS;
  1471. }
  1472. static int erase_l2_cache_blocks(void)
  1473. {
  1474. int i, ret = PASS;
  1475. u32 pblk, lblk;
  1476. u64 addr;
  1477. u32 *pbt = (u32 *)g_pBlockTable;
  1478. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1479. __FILE__, __LINE__, __func__);
  1480. for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
  1481. pblk = cache_l2.blk_array[i];
  1482. /* If the L2 cache block is invalid, then just skip it */
  1483. if (MAX_U32_VALUE == pblk)
  1484. continue;
  1485. BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
  1486. addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
  1487. if (PASS == GLOB_FTL_Block_Erase(addr)) {
  1488. /* Get logical block number of the erased block */
  1489. lblk = FTL_Get_Block_Index(pblk);
  1490. BUG_ON(BAD_BLOCK == lblk);
  1491. /* Tag it as free in the block table */
  1492. pbt[lblk] &= (u32)(~DISCARD_BLOCK);
  1493. pbt[lblk] |= (u32)(SPARE_BLOCK);
  1494. } else {
  1495. MARK_BLOCK_AS_BAD(pbt[lblk]);
  1496. ret = ERR;
  1497. }
  1498. }
  1499. return ret;
  1500. }
  1501. /*
  1502. * Merge the valid data page in the L2 cache blocks into NAND.
  1503. */
  1504. static int flush_l2_cache(void)
  1505. {
  1506. struct list_head *p;
  1507. struct spectra_l2_cache_list *pnd, *tmp_pnd;
  1508. u32 *pbt = (u32 *)g_pBlockTable;
  1509. u32 phy_blk, l2_blk;
  1510. u64 addr;
  1511. u16 l2_page;
  1512. int i, ret = PASS;
  1513. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1514. __FILE__, __LINE__, __func__);
  1515. if (list_empty(&cache_l2.table.list)) /* No data to flush */
  1516. return ret;
  1517. //dump_cache_l2_table();
  1518. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
  1519. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  1520. FTL_Write_IN_Progress_Block_Table_Page();
  1521. }
  1522. list_for_each(p, &cache_l2.table.list) {
  1523. pnd = list_entry(p, struct spectra_l2_cache_list, list);
  1524. if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
  1525. IS_BAD_BLOCK(pnd->logical_blk_num) ||
  1526. IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
  1527. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
  1528. memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
  1529. } else {
  1530. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
  1531. phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
  1532. ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
  1533. phy_blk, 0, DeviceInfo.wPagesPerBlock);
  1534. if (ret == FAIL) {
  1535. printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
  1536. }
  1537. }
  1538. for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
  1539. if (pnd->pages_array[i] != MAX_U32_VALUE) {
  1540. l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
  1541. l2_page = pnd->pages_array[i] & 0xffff;
  1542. ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
  1543. if (ret == FAIL) {
  1544. printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
  1545. }
  1546. memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
  1547. }
  1548. }
  1549. /* Find a free block and tag the original block as discarded */
  1550. addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
  1551. ret = FTL_Replace_Block(addr);
  1552. if (ret == FAIL) {
  1553. printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
  1554. }
  1555. /* Write back the updated data into NAND */
  1556. phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
  1557. if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
  1558. nand_dbg_print(NAND_DBG_WARN,
  1559. "Program NAND block %d fail in %s, Line %d\n",
  1560. phy_blk, __FILE__, __LINE__);
  1561. /* This may not be really a bad block. So just tag it as discarded. */
  1562. /* Then it has a chance to be erased when garbage collection. */
  1563. /* If it is really bad, then the erase will fail and it will be marked */
  1564. /* as bad then. Otherwise it will be marked as free and can be used again */
  1565. MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
  1566. /* Find another free block and write it again */
  1567. FTL_Replace_Block(addr);
  1568. phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
  1569. if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
  1570. printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
  1571. "Some data will be lost!\n", phy_blk);
  1572. MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
  1573. }
  1574. } else {
  1575. /* tag the new free block as used block */
  1576. pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
  1577. }
  1578. }
  1579. /* Destroy the L2 Cache table and free the memory of all nodes */
  1580. list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
  1581. list_del(&pnd->list);
  1582. kfree(pnd);
  1583. }
  1584. /* Erase discard L2 cache blocks */
  1585. if (erase_l2_cache_blocks() != PASS)
  1586. nand_dbg_print(NAND_DBG_WARN,
  1587. " Erase L2 cache blocks error in %s, Line %d\n",
  1588. __FILE__, __LINE__);
  1589. /* Init the Level2 Cache data structure */
  1590. for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
  1591. cache_l2.blk_array[i] = MAX_U32_VALUE;
  1592. cache_l2.cur_blk_idx = 0;
  1593. cache_l2.cur_page_num = 0;
  1594. INIT_LIST_HEAD(&cache_l2.table.list);
  1595. cache_l2.table.logical_blk_num = MAX_U32_VALUE;
  1596. return ret;
  1597. }
  1598. /*
  1599. * Write back a changed victim cache item to the Level2 Cache
  1600. * and update the L2 Cache table to map the change.
  1601. * If the L2 Cache is full, then start to do the L2 Cache flush.
  1602. */
  1603. static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
  1604. {
  1605. u32 logical_blk_num;
  1606. u16 logical_page_num;
  1607. struct list_head *p;
  1608. struct spectra_l2_cache_list *pnd, *pnd_new;
  1609. u32 node_size;
  1610. int i, found;
  1611. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  1612. __FILE__, __LINE__, __func__);
  1613. /*
  1614. * If Level2 Cache table is empty, then it means either:
  1615. * 1. This is the first time that the function called after FTL_init
  1616. * or
  1617. * 2. The Level2 Cache has just been flushed
  1618. *
  1619. * So, 'steal' some free blocks from NAND for L2 Cache using
  1620. * by just mask them as discard in the block table
  1621. */
  1622. if (list_empty(&cache_l2.table.list)) {
  1623. BUG_ON(cache_l2.cur_blk_idx != 0);
  1624. BUG_ON(cache_l2.cur_page_num!= 0);
  1625. BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
  1626. if (FAIL == get_l2_cache_blks()) {
  1627. GLOB_FTL_Garbage_Collection();
  1628. if (FAIL == get_l2_cache_blks()) {
  1629. printk(KERN_ALERT "Fail to get L2 cache blks!\n");
  1630. return FAIL;
  1631. }
  1632. }
  1633. }
  1634. logical_blk_num = BLK_FROM_ADDR(logical_addr);
  1635. logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
  1636. BUG_ON(logical_blk_num == MAX_U32_VALUE);
  1637. /* Write the cache item data into the current position of L2 Cache */
  1638. #if CMD_DMA
  1639. /*
  1640. * TODO
  1641. */
  1642. #else
  1643. if (FAIL == GLOB_LLD_Write_Page_Main(buf,
  1644. cache_l2.blk_array[cache_l2.cur_blk_idx],
  1645. cache_l2.cur_page_num, 1)) {
  1646. nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
  1647. "%s, Line %d, new Bad Block %d generated!\n",
  1648. __FILE__, __LINE__,
  1649. cache_l2.blk_array[cache_l2.cur_blk_idx]);
  1650. /* TODO: tag the current block as bad and try again */
  1651. return FAIL;
  1652. }
  1653. #endif
  1654. /*
  1655. * Update the L2 Cache table.
  1656. *
  1657. * First seaching in the table to see whether the logical block
  1658. * has been mapped. If not, then kmalloc a new node for the
  1659. * logical block, fill data, and then insert it to the list.
  1660. * Otherwise, just update the mapped node directly.
  1661. */
  1662. found = 0;
  1663. list_for_each(p, &cache_l2.table.list) {
  1664. pnd = list_entry(p, struct spectra_l2_cache_list, list);
  1665. if (pnd->logical_blk_num == logical_blk_num) {
  1666. pnd->pages_array[logical_page_num] =
  1667. (cache_l2.cur_blk_idx << 16) |
  1668. cache_l2.cur_page_num;
  1669. found = 1;
  1670. break;
  1671. }
  1672. }
  1673. if (!found) { /* Create new node for the logical block here */
  1674. /* The logical pages to physical pages map array is
  1675. * located at the end of struct spectra_l2_cache_list.
  1676. */
  1677. node_size = sizeof(struct spectra_l2_cache_list) +
  1678. sizeof(u32) * DeviceInfo.wPagesPerBlock;
  1679. pnd_new = kmalloc(node_size, GFP_ATOMIC);
  1680. if (!pnd_new) {
  1681. printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
  1682. __FILE__, __LINE__);
  1683. /*
  1684. * TODO: Need to flush all the L2 cache into NAND ASAP
  1685. * since no memory available here
  1686. */
  1687. }
  1688. pnd_new->logical_blk_num = logical_blk_num;
  1689. for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
  1690. pnd_new->pages_array[i] = MAX_U32_VALUE;
  1691. pnd_new->pages_array[logical_page_num] =
  1692. (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
  1693. list_add(&pnd_new->list, &cache_l2.table.list);
  1694. }
  1695. /* Increasing the current position pointer of the L2 Cache */
  1696. cache_l2.cur_page_num++;
  1697. if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
  1698. cache_l2.cur_blk_idx++;
  1699. if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
  1700. /* The L2 Cache is full. Need to flush it now */
  1701. nand_dbg_print(NAND_DBG_WARN,
  1702. "L2 Cache is full, will start to flush it\n");
  1703. flush_l2_cache();
  1704. } else {
  1705. cache_l2.cur_page_num = 0;
  1706. }
  1707. }
  1708. return PASS;
  1709. }
  1710. /*
  1711. * Seach in the Level2 Cache table to find the cache item.
  1712. * If find, read the data from the NAND page of L2 Cache,
  1713. * Otherwise, return FAIL.
  1714. */
  1715. static int search_l2_cache(u8 *buf, u64 logical_addr)
  1716. {
  1717. u32 logical_blk_num;
  1718. u16 logical_page_num;
  1719. struct list_head *p;
  1720. struct spectra_l2_cache_list *pnd;
  1721. u32 tmp = MAX_U32_VALUE;
  1722. u32 phy_blk;
  1723. u16 phy_page;
  1724. int ret = FAIL;
  1725. logical_blk_num = BLK_FROM_ADDR(logical_addr);
  1726. logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
  1727. list_for_each(p, &cache_l2.table.list) {
  1728. pnd = list_entry(p, struct spectra_l2_cache_list, list);
  1729. if (pnd->logical_blk_num == logical_blk_num) {
  1730. tmp = pnd->pages_array[logical_page_num];
  1731. break;
  1732. }
  1733. }
  1734. if (tmp != MAX_U32_VALUE) { /* Found valid map */
  1735. phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
  1736. phy_page = tmp & 0xFFFF;
  1737. #if CMD_DMA
  1738. /* TODO */
  1739. #else
  1740. ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
  1741. #endif
  1742. }
  1743. return ret;
  1744. }
  1745. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1746. * Function: FTL_Cache_Write_Back
  1747. * Inputs: pointer to data cached in sys memory
  1748. * address of free block in flash
  1749. * Outputs: PASS=0 / FAIL=1
  1750. * Description: writes all the pages of Cache Block to flash
  1751. *
  1752. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1753. static int FTL_Cache_Write_Back(u8 *pData, u64 blk_addr)
  1754. {
  1755. int i, j, iErase;
  1756. u64 old_page_addr, addr, phy_addr;
  1757. u32 *pbt = (u32 *)g_pBlockTable;
  1758. u32 lba;
  1759. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1760. __FILE__, __LINE__, __func__);
  1761. old_page_addr = FTL_Get_Physical_Block_Addr(blk_addr) +
  1762. GLOB_u64_Remainder(blk_addr, 2);
  1763. iErase = (FAIL == FTL_Replace_Block(blk_addr)) ? PASS : FAIL;
  1764. pbt[BLK_FROM_ADDR(blk_addr)] &= (~SPARE_BLOCK);
  1765. #if CMD_DMA
  1766. p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
  1767. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  1768. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  1769. p_BTableChangesDelta->BT_Index = (u32)(blk_addr >>
  1770. DeviceInfo.nBitsInBlockDataSize);
  1771. p_BTableChangesDelta->BT_Entry_Value =
  1772. pbt[(u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)];
  1773. p_BTableChangesDelta->ValidFields = 0x0C;
  1774. #endif
  1775. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
  1776. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  1777. FTL_Write_IN_Progress_Block_Table_Page();
  1778. }
  1779. for (i = 0; i < RETRY_TIMES; i++) {
  1780. if (PASS == iErase) {
  1781. phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
  1782. if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
  1783. lba = BLK_FROM_ADDR(blk_addr);
  1784. MARK_BLOCK_AS_BAD(pbt[lba]);
  1785. i = RETRY_TIMES;
  1786. break;
  1787. }
  1788. }
  1789. for (j = 0; j < CACHE_ITEM_NUM; j++) {
  1790. addr = Cache.array[j].address;
  1791. if ((addr <= blk_addr) &&
  1792. ((addr + Cache.cache_item_size) > blk_addr))
  1793. cache_block_to_write = j;
  1794. }
  1795. phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
  1796. if (PASS == FTL_Cache_Update_Block(pData,
  1797. old_page_addr, phy_addr)) {
  1798. cache_block_to_write = UNHIT_CACHE_ITEM;
  1799. break;
  1800. } else {
  1801. iErase = PASS;
  1802. }
  1803. }
  1804. if (i >= RETRY_TIMES) {
  1805. if (ERR == FTL_Flash_Error_Handle(pData,
  1806. old_page_addr, blk_addr))
  1807. return ERR;
  1808. else
  1809. return FAIL;
  1810. }
  1811. return PASS;
  1812. }
  1813. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1814. * Function: FTL_Cache_Write_Page
  1815. * Inputs: Pointer to buffer, page address, cache block number
  1816. * Outputs: PASS=0 / FAIL=1
  1817. * Description: It writes the data in Cache Block
  1818. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1819. static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
  1820. u8 cache_blk, u16 flag)
  1821. {
  1822. u8 *pDest;
  1823. u64 addr;
  1824. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1825. __FILE__, __LINE__, __func__);
  1826. addr = Cache.array[cache_blk].address;
  1827. pDest = Cache.array[cache_blk].buf;
  1828. pDest += (unsigned long)(page_addr - addr);
  1829. Cache.array[cache_blk].changed = SET;
  1830. #if CMD_DMA
  1831. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  1832. int_cache[ftl_cmd_cnt].item = cache_blk;
  1833. int_cache[ftl_cmd_cnt].cache.address =
  1834. Cache.array[cache_blk].address;
  1835. int_cache[ftl_cmd_cnt].cache.changed =
  1836. Cache.array[cache_blk].changed;
  1837. #endif
  1838. GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
  1839. ftl_cmd_cnt++;
  1840. #else
  1841. memcpy(pDest, pData, DeviceInfo.wPageDataSize);
  1842. #endif
  1843. if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
  1844. Cache.array[cache_blk].use_cnt++;
  1845. }
  1846. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1847. * Function: FTL_Cache_Write
  1848. * Inputs: none
  1849. * Outputs: PASS=0 / FAIL=1
  1850. * Description: It writes least frequently used Cache block to flash if it
  1851. * has been changed
  1852. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1853. static int FTL_Cache_Write(void)
  1854. {
  1855. int i, bResult = PASS;
  1856. u16 bNO, least_count = 0xFFFF;
  1857. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1858. __FILE__, __LINE__, __func__);
  1859. FTL_Calculate_LRU();
  1860. bNO = Cache.LRU;
  1861. nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
  1862. "Least used cache block is %d\n", bNO);
  1863. if (Cache.array[bNO].changed != SET)
  1864. return bResult;
  1865. nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
  1866. " Block %d containing logical block %d is dirty\n",
  1867. bNO,
  1868. (u32)(Cache.array[bNO].address >>
  1869. DeviceInfo.nBitsInBlockDataSize));
  1870. #if CMD_DMA
  1871. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  1872. int_cache[ftl_cmd_cnt].item = bNO;
  1873. int_cache[ftl_cmd_cnt].cache.address =
  1874. Cache.array[bNO].address;
  1875. int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
  1876. #endif
  1877. #endif
  1878. bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
  1879. Cache.array[bNO].address);
  1880. if (bResult != ERR)
  1881. Cache.array[bNO].changed = CLEAR;
  1882. least_count = Cache.array[bNO].use_cnt;
  1883. for (i = 0; i < CACHE_ITEM_NUM; i++) {
  1884. if (i == bNO)
  1885. continue;
  1886. if (Cache.array[i].use_cnt > 0)
  1887. Cache.array[i].use_cnt -= least_count;
  1888. }
  1889. return bResult;
  1890. }
  1891. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1892. * Function: FTL_Cache_Read
  1893. * Inputs: Page address
  1894. * Outputs: PASS=0 / FAIL=1
  1895. * Description: It reads the block from device in Cache Block
  1896. * Set the LRU count to 1
  1897. * Mark the Cache Block as clean
  1898. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1899. static int FTL_Cache_Read(u64 logical_addr)
  1900. {
  1901. u64 item_addr, phy_addr;
  1902. u16 num;
  1903. int ret;
  1904. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1905. __FILE__, __LINE__, __func__);
  1906. num = Cache.LRU; /* The LRU cache item will be overwritten */
  1907. item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
  1908. Cache.cache_item_size;
  1909. Cache.array[num].address = item_addr;
  1910. Cache.array[num].use_cnt = 1;
  1911. Cache.array[num].changed = CLEAR;
  1912. #if CMD_DMA
  1913. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  1914. int_cache[ftl_cmd_cnt].item = num;
  1915. int_cache[ftl_cmd_cnt].cache.address =
  1916. Cache.array[num].address;
  1917. int_cache[ftl_cmd_cnt].cache.changed =
  1918. Cache.array[num].changed;
  1919. #endif
  1920. #endif
  1921. /*
  1922. * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
  1923. * Otherwise, read it from NAND
  1924. */
  1925. ret = search_l2_cache(Cache.array[num].buf, logical_addr);
  1926. if (PASS == ret) /* Hit in L2 Cache */
  1927. return ret;
  1928. /* Compute the physical start address of NAND device according to */
  1929. /* the logical start address of the cache item (LRU cache item) */
  1930. phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
  1931. GLOB_u64_Remainder(item_addr, 2);
  1932. return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
  1933. }
  1934. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1935. * Function: FTL_Check_Block_Table
  1936. * Inputs: ?
  1937. * Outputs: PASS=0 / FAIL=1
  1938. * Description: It checks the correctness of each block table entry
  1939. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1940. static int FTL_Check_Block_Table(int wOldTable)
  1941. {
  1942. u32 i;
  1943. int wResult = PASS;
  1944. u32 blk_idx;
  1945. u32 *pbt = (u32 *)g_pBlockTable;
  1946. u8 *pFlag = flag_check_blk_table;
  1947. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1948. __FILE__, __LINE__, __func__);
  1949. if (NULL != pFlag) {
  1950. memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
  1951. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  1952. blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
  1953. /*
  1954. * 20081006/KBV - Changed to pFlag[i] reference
  1955. * to avoid buffer overflow
  1956. */
  1957. /*
  1958. * 2008-10-20 Yunpeng Note: This change avoid
  1959. * buffer overflow, but changed function of
  1960. * the code, so it should be re-write later
  1961. */
  1962. if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
  1963. PASS == pFlag[i]) {
  1964. wResult = FAIL;
  1965. break;
  1966. } else {
  1967. pFlag[i] = PASS;
  1968. }
  1969. }
  1970. }
  1971. return wResult;
  1972. }
  1973. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  1974. * Function: FTL_Write_Block_Table
  1975. * Inputs: flasg
  1976. * Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
  1977. * happen. -1 Error
  1978. * Description: It writes the block table
  1979. * Block table always mapped to LBA 0 which inturn mapped
  1980. * to any physical block
  1981. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  1982. static int FTL_Write_Block_Table(int wForce)
  1983. {
  1984. u32 *pbt = (u32 *)g_pBlockTable;
  1985. int wSuccess = PASS;
  1986. u32 wTempBlockTableIndex;
  1987. u16 bt_pages, new_bt_offset;
  1988. u8 blockchangeoccured = 0;
  1989. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1990. __FILE__, __LINE__, __func__);
  1991. bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
  1992. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
  1993. return 0;
  1994. if (PASS == wForce) {
  1995. g_wBlockTableOffset =
  1996. (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
  1997. #if CMD_DMA
  1998. p_BTableChangesDelta =
  1999. (struct BTableChangesDelta *)g_pBTDelta_Free;
  2000. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  2001. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  2002. p_BTableChangesDelta->g_wBlockTableOffset =
  2003. g_wBlockTableOffset;
  2004. p_BTableChangesDelta->ValidFields = 0x01;
  2005. #endif
  2006. }
  2007. nand_dbg_print(NAND_DBG_DEBUG,
  2008. "Inside FTL_Write_Block_Table: block %d Page:%d\n",
  2009. g_wBlockTableIndex, g_wBlockTableOffset);
  2010. do {
  2011. new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
  2012. if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
  2013. (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
  2014. (FAIL == wSuccess)) {
  2015. wTempBlockTableIndex = FTL_Replace_Block_Table();
  2016. if (BAD_BLOCK == wTempBlockTableIndex)
  2017. return ERR;
  2018. if (!blockchangeoccured) {
  2019. bt_block_changed = 1;
  2020. blockchangeoccured = 1;
  2021. }
  2022. g_wBlockTableIndex = wTempBlockTableIndex;
  2023. g_wBlockTableOffset = 0;
  2024. pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
  2025. #if CMD_DMA
  2026. p_BTableChangesDelta =
  2027. (struct BTableChangesDelta *)g_pBTDelta_Free;
  2028. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  2029. p_BTableChangesDelta->ftl_cmd_cnt =
  2030. ftl_cmd_cnt;
  2031. p_BTableChangesDelta->g_wBlockTableOffset =
  2032. g_wBlockTableOffset;
  2033. p_BTableChangesDelta->g_wBlockTableIndex =
  2034. g_wBlockTableIndex;
  2035. p_BTableChangesDelta->ValidFields = 0x03;
  2036. p_BTableChangesDelta =
  2037. (struct BTableChangesDelta *)g_pBTDelta_Free;
  2038. g_pBTDelta_Free +=
  2039. sizeof(struct BTableChangesDelta);
  2040. p_BTableChangesDelta->ftl_cmd_cnt =
  2041. ftl_cmd_cnt;
  2042. p_BTableChangesDelta->BT_Index =
  2043. BLOCK_TABLE_INDEX;
  2044. p_BTableChangesDelta->BT_Entry_Value =
  2045. pbt[BLOCK_TABLE_INDEX];
  2046. p_BTableChangesDelta->ValidFields = 0x0C;
  2047. #endif
  2048. }
  2049. wSuccess = FTL_Write_Block_Table_Data();
  2050. if (FAIL == wSuccess)
  2051. MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
  2052. } while (FAIL == wSuccess);
  2053. g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
  2054. return 1;
  2055. }
  2056. /******************************************************************
  2057. * Function: GLOB_FTL_Flash_Format
  2058. * Inputs: none
  2059. * Outputs: PASS
  2060. * Description: The block table stores bad block info, including MDF+
  2061. * blocks gone bad over the ages. Therefore, if we have a
  2062. * block table in place, then use it to scan for bad blocks
  2063. * If not, then scan for MDF.
  2064. * Now, a block table will only be found if spectra was already
  2065. * being used. For a fresh flash, we'll go thru scanning for
  2066. * MDF. If spectra was being used, then there is a chance that
  2067. * the MDF has been corrupted. Spectra avoids writing to the
  2068. * first 2 bytes of the spare area to all pages in a block. This
  2069. * covers all known flash devices. However, since flash
  2070. * manufacturers have no standard of where the MDF is stored,
  2071. * this cannot guarantee that the MDF is protected for future
  2072. * devices too. The initial scanning for the block table assures
  2073. * this. It is ok even if the block table is outdated, as all
  2074. * we're looking for are bad block markers.
  2075. * Use this when mounting a file system or starting a
  2076. * new flash.
  2077. *
  2078. *********************************************************************/
  2079. static int FTL_Format_Flash(u8 valid_block_table)
  2080. {
  2081. u32 i, j;
  2082. u32 *pbt = (u32 *)g_pBlockTable;
  2083. u32 tempNode;
  2084. int ret;
  2085. #if CMD_DMA
  2086. u32 *pbtStartingCopy = (u32 *)g_pBTStartingCopy;
  2087. if (ftl_cmd_cnt)
  2088. return FAIL;
  2089. #endif
  2090. if (FAIL == FTL_Check_Block_Table(FAIL))
  2091. valid_block_table = 0;
  2092. if (valid_block_table) {
  2093. u8 switched = 1;
  2094. u32 block, k;
  2095. k = DeviceInfo.wSpectraStartBlock;
  2096. while (switched && (k < DeviceInfo.wSpectraEndBlock)) {
  2097. switched = 0;
  2098. k++;
  2099. for (j = DeviceInfo.wSpectraStartBlock, i = 0;
  2100. j <= DeviceInfo.wSpectraEndBlock;
  2101. j++, i++) {
  2102. block = (pbt[i] & ~BAD_BLOCK) -
  2103. DeviceInfo.wSpectraStartBlock;
  2104. if (block != i) {
  2105. switched = 1;
  2106. tempNode = pbt[i];
  2107. pbt[i] = pbt[block];
  2108. pbt[block] = tempNode;
  2109. }
  2110. }
  2111. }
  2112. if ((k == DeviceInfo.wSpectraEndBlock) && switched)
  2113. valid_block_table = 0;
  2114. }
  2115. if (!valid_block_table) {
  2116. memset(g_pBlockTable, 0,
  2117. DeviceInfo.wDataBlockNum * sizeof(u32));
  2118. memset(g_pWearCounter, 0,
  2119. DeviceInfo.wDataBlockNum * sizeof(u8));
  2120. if (DeviceInfo.MLCDevice)
  2121. memset(g_pReadCounter, 0,
  2122. DeviceInfo.wDataBlockNum * sizeof(u16));
  2123. #if CMD_DMA
  2124. memset(g_pBTStartingCopy, 0,
  2125. DeviceInfo.wDataBlockNum * sizeof(u32));
  2126. memset(g_pWearCounterCopy, 0,
  2127. DeviceInfo.wDataBlockNum * sizeof(u8));
  2128. if (DeviceInfo.MLCDevice)
  2129. memset(g_pReadCounterCopy, 0,
  2130. DeviceInfo.wDataBlockNum * sizeof(u16));
  2131. #endif
  2132. for (j = DeviceInfo.wSpectraStartBlock, i = 0;
  2133. j <= DeviceInfo.wSpectraEndBlock;
  2134. j++, i++) {
  2135. if (GLOB_LLD_Get_Bad_Block((u32)j))
  2136. pbt[i] = (u32)(BAD_BLOCK | j);
  2137. }
  2138. }
  2139. nand_dbg_print(NAND_DBG_WARN, "Erasing all blocks in the NAND\n");
  2140. for (j = DeviceInfo.wSpectraStartBlock, i = 0;
  2141. j <= DeviceInfo.wSpectraEndBlock;
  2142. j++, i++) {
  2143. if ((pbt[i] & BAD_BLOCK) != BAD_BLOCK) {
  2144. ret = GLOB_LLD_Erase_Block(j);
  2145. if (FAIL == ret) {
  2146. pbt[i] = (u32)(j);
  2147. MARK_BLOCK_AS_BAD(pbt[i]);
  2148. nand_dbg_print(NAND_DBG_WARN,
  2149. "NAND Program fail in %s, Line %d, "
  2150. "Function: %s, new Bad Block %d generated!\n",
  2151. __FILE__, __LINE__, __func__, (int)j);
  2152. } else {
  2153. pbt[i] = (u32)(SPARE_BLOCK | j);
  2154. }
  2155. }
  2156. #if CMD_DMA
  2157. pbtStartingCopy[i] = pbt[i];
  2158. #endif
  2159. }
  2160. g_wBlockTableOffset = 0;
  2161. for (i = 0; (i <= (DeviceInfo.wSpectraEndBlock -
  2162. DeviceInfo.wSpectraStartBlock))
  2163. && ((pbt[i] & BAD_BLOCK) == BAD_BLOCK); i++)
  2164. ;
  2165. if (i > (DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock)) {
  2166. printk(KERN_ERR "All blocks bad!\n");
  2167. return FAIL;
  2168. } else {
  2169. g_wBlockTableIndex = pbt[i] & ~BAD_BLOCK;
  2170. if (i != BLOCK_TABLE_INDEX) {
  2171. tempNode = pbt[i];
  2172. pbt[i] = pbt[BLOCK_TABLE_INDEX];
  2173. pbt[BLOCK_TABLE_INDEX] = tempNode;
  2174. }
  2175. }
  2176. pbt[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
  2177. #if CMD_DMA
  2178. pbtStartingCopy[BLOCK_TABLE_INDEX] &= (~SPARE_BLOCK);
  2179. #endif
  2180. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  2181. memset(g_pBTBlocks, 0xFF,
  2182. (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32));
  2183. g_pBTBlocks[FIRST_BT_ID-FIRST_BT_ID] = g_wBlockTableIndex;
  2184. FTL_Write_Block_Table(FAIL);
  2185. for (i = 0; i < CACHE_ITEM_NUM; i++) {
  2186. Cache.array[i].address = NAND_CACHE_INIT_ADDR;
  2187. Cache.array[i].use_cnt = 0;
  2188. Cache.array[i].changed = CLEAR;
  2189. }
  2190. #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
  2191. memcpy((void *)&cache_start_copy, (void *)&Cache,
  2192. sizeof(struct flash_cache_tag));
  2193. #endif
  2194. return PASS;
  2195. }
  2196. static int force_format_nand(void)
  2197. {
  2198. u32 i;
  2199. /* Force erase the whole unprotected physical partiton of NAND */
  2200. printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
  2201. printk(KERN_ALERT "From phyical block %d to %d\n",
  2202. DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
  2203. for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
  2204. if (GLOB_LLD_Erase_Block(i))
  2205. printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
  2206. }
  2207. printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
  2208. while(1);
  2209. return PASS;
  2210. }
  2211. int GLOB_FTL_Flash_Format(void)
  2212. {
  2213. //return FTL_Format_Flash(1);
  2214. return force_format_nand();
  2215. }
  2216. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2217. * Function: FTL_Search_Block_Table_IN_Block
  2218. * Inputs: Block Number
  2219. * Pointer to page
  2220. * Outputs: PASS / FAIL
  2221. * Page contatining the block table
  2222. * Description: It searches the block table in the block
  2223. * passed as an argument.
  2224. *
  2225. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2226. static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
  2227. u8 BT_Tag, u16 *Page)
  2228. {
  2229. u16 i, j, k;
  2230. u16 Result = PASS;
  2231. u16 Last_IPF = 0;
  2232. u8 BT_Found = 0;
  2233. u8 *tagarray;
  2234. u8 *tempbuf = tmp_buf_search_bt_in_block;
  2235. u8 *pSpareBuf = spare_buf_search_bt_in_block;
  2236. u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
  2237. u8 bt_flag_last_page = 0xFF;
  2238. u8 search_in_previous_pages = 0;
  2239. u16 bt_pages;
  2240. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  2241. __FILE__, __LINE__, __func__);
  2242. nand_dbg_print(NAND_DBG_DEBUG,
  2243. "Searching block table in %u block\n",
  2244. (unsigned int)BT_Block);
  2245. bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
  2246. for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
  2247. i += (bt_pages + 1)) {
  2248. nand_dbg_print(NAND_DBG_DEBUG,
  2249. "Searching last IPF: %d\n", i);
  2250. Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
  2251. BT_Block, i, 1);
  2252. if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
  2253. if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
  2254. continue;
  2255. } else {
  2256. search_in_previous_pages = 1;
  2257. Last_IPF = i;
  2258. }
  2259. }
  2260. if (!search_in_previous_pages) {
  2261. if (i != bt_pages) {
  2262. i -= (bt_pages + 1);
  2263. Last_IPF = i;
  2264. }
  2265. }
  2266. if (0 == Last_IPF)
  2267. break;
  2268. if (!search_in_previous_pages) {
  2269. i = i + 1;
  2270. nand_dbg_print(NAND_DBG_DEBUG,
  2271. "Reading the spare area of Block %u Page %u",
  2272. (unsigned int)BT_Block, i);
  2273. Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
  2274. BT_Block, i, 1);
  2275. nand_dbg_print(NAND_DBG_DEBUG,
  2276. "Reading the spare area of Block %u Page %u",
  2277. (unsigned int)BT_Block, i + bt_pages - 1);
  2278. Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
  2279. BT_Block, i + bt_pages - 1, 1);
  2280. k = 0;
  2281. j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
  2282. if (j) {
  2283. for (; k < j; k++) {
  2284. if (tagarray[k] == BT_Tag)
  2285. break;
  2286. }
  2287. }
  2288. if (k < j)
  2289. bt_flag = tagarray[k];
  2290. else
  2291. Result = FAIL;
  2292. if (Result == PASS) {
  2293. k = 0;
  2294. j = FTL_Extract_Block_Table_Tag(
  2295. pSpareBufBTLastPage, &tagarray);
  2296. if (j) {
  2297. for (; k < j; k++) {
  2298. if (tagarray[k] == BT_Tag)
  2299. break;
  2300. }
  2301. }
  2302. if (k < j)
  2303. bt_flag_last_page = tagarray[k];
  2304. else
  2305. Result = FAIL;
  2306. if (Result == PASS) {
  2307. if (bt_flag == bt_flag_last_page) {
  2308. nand_dbg_print(NAND_DBG_DEBUG,
  2309. "Block table is found"
  2310. " in page after IPF "
  2311. "at block %d "
  2312. "page %d\n",
  2313. (int)BT_Block, i);
  2314. BT_Found = 1;
  2315. *Page = i;
  2316. g_cBlockTableStatus =
  2317. CURRENT_BLOCK_TABLE;
  2318. break;
  2319. } else {
  2320. Result = FAIL;
  2321. }
  2322. }
  2323. }
  2324. }
  2325. if (search_in_previous_pages)
  2326. i = i - bt_pages;
  2327. else
  2328. i = i - (bt_pages + 1);
  2329. Result = PASS;
  2330. nand_dbg_print(NAND_DBG_DEBUG,
  2331. "Reading the spare area of Block %d Page %d",
  2332. (int)BT_Block, i);
  2333. Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
  2334. nand_dbg_print(NAND_DBG_DEBUG,
  2335. "Reading the spare area of Block %u Page %u",
  2336. (unsigned int)BT_Block, i + bt_pages - 1);
  2337. Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
  2338. BT_Block, i + bt_pages - 1, 1);
  2339. k = 0;
  2340. j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
  2341. if (j) {
  2342. for (; k < j; k++) {
  2343. if (tagarray[k] == BT_Tag)
  2344. break;
  2345. }
  2346. }
  2347. if (k < j)
  2348. bt_flag = tagarray[k];
  2349. else
  2350. Result = FAIL;
  2351. if (Result == PASS) {
  2352. k = 0;
  2353. j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
  2354. &tagarray);
  2355. if (j) {
  2356. for (; k < j; k++) {
  2357. if (tagarray[k] == BT_Tag)
  2358. break;
  2359. }
  2360. }
  2361. if (k < j) {
  2362. bt_flag_last_page = tagarray[k];
  2363. } else {
  2364. Result = FAIL;
  2365. break;
  2366. }
  2367. if (Result == PASS) {
  2368. if (bt_flag == bt_flag_last_page) {
  2369. nand_dbg_print(NAND_DBG_DEBUG,
  2370. "Block table is found "
  2371. "in page prior to IPF "
  2372. "at block %u page %d\n",
  2373. (unsigned int)BT_Block, i);
  2374. BT_Found = 1;
  2375. *Page = i;
  2376. g_cBlockTableStatus =
  2377. IN_PROGRESS_BLOCK_TABLE;
  2378. break;
  2379. } else {
  2380. Result = FAIL;
  2381. break;
  2382. }
  2383. }
  2384. }
  2385. }
  2386. if (Result == FAIL) {
  2387. if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
  2388. BT_Found = 1;
  2389. *Page = i - (bt_pages + 1);
  2390. }
  2391. if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
  2392. goto func_return;
  2393. }
  2394. if (Last_IPF == 0) {
  2395. i = 0;
  2396. Result = PASS;
  2397. nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
  2398. "Block %u Page %u", (unsigned int)BT_Block, i);
  2399. Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
  2400. nand_dbg_print(NAND_DBG_DEBUG,
  2401. "Reading the spare area of Block %u Page %u",
  2402. (unsigned int)BT_Block, i + bt_pages - 1);
  2403. Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
  2404. BT_Block, i + bt_pages - 1, 1);
  2405. k = 0;
  2406. j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
  2407. if (j) {
  2408. for (; k < j; k++) {
  2409. if (tagarray[k] == BT_Tag)
  2410. break;
  2411. }
  2412. }
  2413. if (k < j)
  2414. bt_flag = tagarray[k];
  2415. else
  2416. Result = FAIL;
  2417. if (Result == PASS) {
  2418. k = 0;
  2419. j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
  2420. &tagarray);
  2421. if (j) {
  2422. for (; k < j; k++) {
  2423. if (tagarray[k] == BT_Tag)
  2424. break;
  2425. }
  2426. }
  2427. if (k < j)
  2428. bt_flag_last_page = tagarray[k];
  2429. else
  2430. Result = FAIL;
  2431. if (Result == PASS) {
  2432. if (bt_flag == bt_flag_last_page) {
  2433. nand_dbg_print(NAND_DBG_DEBUG,
  2434. "Block table is found "
  2435. "in page after IPF at "
  2436. "block %u page %u\n",
  2437. (unsigned int)BT_Block,
  2438. (unsigned int)i);
  2439. BT_Found = 1;
  2440. *Page = i;
  2441. g_cBlockTableStatus =
  2442. CURRENT_BLOCK_TABLE;
  2443. goto func_return;
  2444. } else {
  2445. Result = FAIL;
  2446. }
  2447. }
  2448. }
  2449. if (Result == FAIL)
  2450. goto func_return;
  2451. }
  2452. func_return:
  2453. return Result;
  2454. }
  2455. u8 *get_blk_table_start_addr(void)
  2456. {
  2457. return g_pBlockTable;
  2458. }
  2459. unsigned long get_blk_table_len(void)
  2460. {
  2461. return DeviceInfo.wDataBlockNum * sizeof(u32);
  2462. }
  2463. u8 *get_wear_leveling_table_start_addr(void)
  2464. {
  2465. return g_pWearCounter;
  2466. }
  2467. unsigned long get_wear_leveling_table_len(void)
  2468. {
  2469. return DeviceInfo.wDataBlockNum * sizeof(u8);
  2470. }
  2471. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2472. * Function: FTL_Read_Block_Table
  2473. * Inputs: none
  2474. * Outputs: PASS / FAIL
  2475. * Description: read the flash spare area and find a block containing the
  2476. * most recent block table(having largest block_table_counter).
  2477. * Find the last written Block table in this block.
  2478. * Check the correctness of Block Table
  2479. * If CDMA is enabled, this function is called in
  2480. * polling mode.
  2481. * We don't need to store changes in Block table in this
  2482. * function as it is called only at initialization
  2483. *
  2484. * Note: Currently this function is called at initialization
  2485. * before any read/erase/write command issued to flash so,
  2486. * there is no need to wait for CDMA list to complete as of now
  2487. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2488. static int FTL_Read_Block_Table(void)
  2489. {
  2490. u16 i = 0;
  2491. int k, j;
  2492. u8 *tempBuf, *tagarray;
  2493. int wResult = FAIL;
  2494. int status = FAIL;
  2495. u8 block_table_found = 0;
  2496. int search_result;
  2497. u32 Block;
  2498. u16 Page = 0;
  2499. u16 PageCount;
  2500. u16 bt_pages;
  2501. int wBytesCopied = 0, tempvar;
  2502. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  2503. __FILE__, __LINE__, __func__);
  2504. tempBuf = tmp_buf1_read_blk_table;
  2505. bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
  2506. for (j = DeviceInfo.wSpectraStartBlock;
  2507. j <= (int)DeviceInfo.wSpectraEndBlock;
  2508. j++) {
  2509. status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
  2510. k = 0;
  2511. i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
  2512. if (i) {
  2513. status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
  2514. j, 0, 1);
  2515. for (; k < i; k++) {
  2516. if (tagarray[k] == tempBuf[3])
  2517. break;
  2518. }
  2519. }
  2520. if (k < i)
  2521. k = tagarray[k];
  2522. else
  2523. continue;
  2524. nand_dbg_print(NAND_DBG_DEBUG,
  2525. "Block table is contained in Block %d %d\n",
  2526. (unsigned int)j, (unsigned int)k);
  2527. if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
  2528. g_pBTBlocks[k-FIRST_BT_ID] = j;
  2529. block_table_found = 1;
  2530. } else {
  2531. printk(KERN_ERR "FTL_Read_Block_Table -"
  2532. "This should never happens. "
  2533. "Two block table have same counter %u!\n", k);
  2534. }
  2535. }
  2536. if (block_table_found) {
  2537. if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
  2538. g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
  2539. j = LAST_BT_ID;
  2540. while ((j > FIRST_BT_ID) &&
  2541. (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
  2542. j--;
  2543. if (j == FIRST_BT_ID) {
  2544. j = LAST_BT_ID;
  2545. last_erased = LAST_BT_ID;
  2546. } else {
  2547. last_erased = (u8)j + 1;
  2548. while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
  2549. g_pBTBlocks[j - FIRST_BT_ID]))
  2550. j--;
  2551. }
  2552. } else {
  2553. j = FIRST_BT_ID;
  2554. while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
  2555. j++;
  2556. last_erased = (u8)j;
  2557. while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
  2558. g_pBTBlocks[j - FIRST_BT_ID]))
  2559. j++;
  2560. if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
  2561. j--;
  2562. }
  2563. if (last_erased > j)
  2564. j += (1 + LAST_BT_ID - FIRST_BT_ID);
  2565. for (; (j >= last_erased) && (FAIL == wResult); j--) {
  2566. i = (j - FIRST_BT_ID) %
  2567. (1 + LAST_BT_ID - FIRST_BT_ID);
  2568. search_result =
  2569. FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
  2570. i + FIRST_BT_ID, &Page);
  2571. if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
  2572. block_table_found = 0;
  2573. while ((search_result == PASS) && (FAIL == wResult)) {
  2574. nand_dbg_print(NAND_DBG_DEBUG,
  2575. "FTL_Read_Block_Table:"
  2576. "Block: %u Page: %u "
  2577. "contains block table\n",
  2578. (unsigned int)g_pBTBlocks[i],
  2579. (unsigned int)Page);
  2580. tempBuf = tmp_buf2_read_blk_table;
  2581. for (k = 0; k < bt_pages; k++) {
  2582. Block = g_pBTBlocks[i];
  2583. PageCount = 1;
  2584. status =
  2585. GLOB_LLD_Read_Page_Main_Polling(
  2586. tempBuf, Block, Page, PageCount);
  2587. tempvar = k ? 0 : 4;
  2588. wBytesCopied +=
  2589. FTL_Copy_Block_Table_From_Flash(
  2590. tempBuf + tempvar,
  2591. DeviceInfo.wPageDataSize - tempvar,
  2592. wBytesCopied);
  2593. Page++;
  2594. }
  2595. wResult = FTL_Check_Block_Table(FAIL);
  2596. if (FAIL == wResult) {
  2597. block_table_found = 0;
  2598. if (Page > bt_pages)
  2599. Page -= ((bt_pages<<1) + 1);
  2600. else
  2601. search_result = FAIL;
  2602. }
  2603. }
  2604. }
  2605. }
  2606. if (PASS == wResult) {
  2607. if (!block_table_found)
  2608. FTL_Execute_SPL_Recovery();
  2609. if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
  2610. g_wBlockTableOffset = (u16)Page + 1;
  2611. else
  2612. g_wBlockTableOffset = (u16)Page - bt_pages;
  2613. g_wBlockTableIndex = (u32)g_pBTBlocks[i];
  2614. #if CMD_DMA
  2615. if (DeviceInfo.MLCDevice)
  2616. memcpy(g_pBTStartingCopy, g_pBlockTable,
  2617. DeviceInfo.wDataBlockNum * sizeof(u32)
  2618. + DeviceInfo.wDataBlockNum * sizeof(u8)
  2619. + DeviceInfo.wDataBlockNum * sizeof(u16));
  2620. else
  2621. memcpy(g_pBTStartingCopy, g_pBlockTable,
  2622. DeviceInfo.wDataBlockNum * sizeof(u32)
  2623. + DeviceInfo.wDataBlockNum * sizeof(u8));
  2624. #endif
  2625. }
  2626. if (FAIL == wResult)
  2627. printk(KERN_ERR "Yunpeng - "
  2628. "Can not find valid spectra block table!\n");
  2629. #if AUTO_FORMAT_FLASH
  2630. if (FAIL == wResult) {
  2631. nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
  2632. wResult = FTL_Format_Flash(0);
  2633. }
  2634. #endif
  2635. return wResult;
  2636. }
  2637. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2638. * Function: FTL_Flash_Error_Handle
  2639. * Inputs: Pointer to data
  2640. * Page address
  2641. * Block address
  2642. * Outputs: PASS=0 / FAIL=1
  2643. * Description: It handles any error occured during Spectra operation
  2644. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2645. static int FTL_Flash_Error_Handle(u8 *pData, u64 old_page_addr,
  2646. u64 blk_addr)
  2647. {
  2648. u32 i;
  2649. int j;
  2650. u32 tmp_node, blk_node = BLK_FROM_ADDR(blk_addr);
  2651. u64 phy_addr;
  2652. int wErase = FAIL;
  2653. int wResult = FAIL;
  2654. u32 *pbt = (u32 *)g_pBlockTable;
  2655. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  2656. __FILE__, __LINE__, __func__);
  2657. if (ERR == GLOB_FTL_Garbage_Collection())
  2658. return ERR;
  2659. do {
  2660. for (i = DeviceInfo.wSpectraEndBlock -
  2661. DeviceInfo.wSpectraStartBlock;
  2662. i > 0; i--) {
  2663. if (IS_SPARE_BLOCK(i)) {
  2664. tmp_node = (u32)(BAD_BLOCK |
  2665. pbt[blk_node]);
  2666. pbt[blk_node] = (u32)(pbt[i] &
  2667. (~SPARE_BLOCK));
  2668. pbt[i] = tmp_node;
  2669. #if CMD_DMA
  2670. p_BTableChangesDelta =
  2671. (struct BTableChangesDelta *)
  2672. g_pBTDelta_Free;
  2673. g_pBTDelta_Free +=
  2674. sizeof(struct BTableChangesDelta);
  2675. p_BTableChangesDelta->ftl_cmd_cnt =
  2676. ftl_cmd_cnt;
  2677. p_BTableChangesDelta->BT_Index =
  2678. blk_node;
  2679. p_BTableChangesDelta->BT_Entry_Value =
  2680. pbt[blk_node];
  2681. p_BTableChangesDelta->ValidFields = 0x0C;
  2682. p_BTableChangesDelta =
  2683. (struct BTableChangesDelta *)
  2684. g_pBTDelta_Free;
  2685. g_pBTDelta_Free +=
  2686. sizeof(struct BTableChangesDelta);
  2687. p_BTableChangesDelta->ftl_cmd_cnt =
  2688. ftl_cmd_cnt;
  2689. p_BTableChangesDelta->BT_Index = i;
  2690. p_BTableChangesDelta->BT_Entry_Value = pbt[i];
  2691. p_BTableChangesDelta->ValidFields = 0x0C;
  2692. #endif
  2693. wResult = PASS;
  2694. break;
  2695. }
  2696. }
  2697. if (FAIL == wResult) {
  2698. if (FAIL == GLOB_FTL_Garbage_Collection())
  2699. break;
  2700. else
  2701. continue;
  2702. }
  2703. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
  2704. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  2705. FTL_Write_IN_Progress_Block_Table_Page();
  2706. }
  2707. phy_addr = FTL_Get_Physical_Block_Addr(blk_addr);
  2708. for (j = 0; j < RETRY_TIMES; j++) {
  2709. if (PASS == wErase) {
  2710. if (FAIL == GLOB_FTL_Block_Erase(phy_addr)) {
  2711. MARK_BLOCK_AS_BAD(pbt[blk_node]);
  2712. break;
  2713. }
  2714. }
  2715. if (PASS == FTL_Cache_Update_Block(pData,
  2716. old_page_addr,
  2717. phy_addr)) {
  2718. wResult = PASS;
  2719. break;
  2720. } else {
  2721. wResult = FAIL;
  2722. wErase = PASS;
  2723. }
  2724. }
  2725. } while (FAIL == wResult);
  2726. FTL_Write_Block_Table(FAIL);
  2727. return wResult;
  2728. }
  2729. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2730. * Function: FTL_Get_Page_Num
  2731. * Inputs: Size in bytes
  2732. * Outputs: Size in pages
  2733. * Description: It calculates the pages required for the length passed
  2734. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2735. static u32 FTL_Get_Page_Num(u64 length)
  2736. {
  2737. return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
  2738. (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
  2739. }
  2740. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2741. * Function: FTL_Get_Physical_Block_Addr
  2742. * Inputs: Block Address (byte format)
  2743. * Outputs: Physical address of the block.
  2744. * Description: It translates LBA to PBA by returning address stored
  2745. * at the LBA location in the block table
  2746. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2747. static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
  2748. {
  2749. u32 *pbt;
  2750. u64 physical_addr;
  2751. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  2752. __FILE__, __LINE__, __func__);
  2753. pbt = (u32 *)g_pBlockTable;
  2754. physical_addr = (u64) DeviceInfo.wBlockDataSize *
  2755. (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
  2756. return physical_addr;
  2757. }
  2758. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2759. * Function: FTL_Get_Block_Index
  2760. * Inputs: Physical Block no.
  2761. * Outputs: Logical block no. /BAD_BLOCK
  2762. * Description: It returns the logical block no. for the PBA passed
  2763. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2764. static u32 FTL_Get_Block_Index(u32 wBlockNum)
  2765. {
  2766. u32 *pbt = (u32 *)g_pBlockTable;
  2767. u32 i;
  2768. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  2769. __FILE__, __LINE__, __func__);
  2770. for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
  2771. if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
  2772. return i;
  2773. return BAD_BLOCK;
  2774. }
  2775. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2776. * Function: GLOB_FTL_Wear_Leveling
  2777. * Inputs: none
  2778. * Outputs: PASS=0
  2779. * Description: This is static wear leveling (done by explicit call)
  2780. * do complete static wear leveling
  2781. * do complete garbage collection
  2782. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2783. int GLOB_FTL_Wear_Leveling(void)
  2784. {
  2785. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  2786. __FILE__, __LINE__, __func__);
  2787. FTL_Static_Wear_Leveling();
  2788. GLOB_FTL_Garbage_Collection();
  2789. return PASS;
  2790. }
  2791. static void find_least_most_worn(u8 *chg,
  2792. u32 *least_idx, u8 *least_cnt,
  2793. u32 *most_idx, u8 *most_cnt)
  2794. {
  2795. u32 *pbt = (u32 *)g_pBlockTable;
  2796. u32 idx;
  2797. u8 cnt;
  2798. int i;
  2799. for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
  2800. if (IS_BAD_BLOCK(i) || PASS == chg[i])
  2801. continue;
  2802. idx = (u32) ((~BAD_BLOCK) & pbt[i]);
  2803. cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
  2804. if (IS_SPARE_BLOCK(i)) {
  2805. if (cnt > *most_cnt) {
  2806. *most_cnt = cnt;
  2807. *most_idx = idx;
  2808. }
  2809. }
  2810. if (IS_DATA_BLOCK(i)) {
  2811. if (cnt < *least_cnt) {
  2812. *least_cnt = cnt;
  2813. *least_idx = idx;
  2814. }
  2815. }
  2816. if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
  2817. debug_boundary_error(*most_idx,
  2818. DeviceInfo.wDataBlockNum, 0);
  2819. debug_boundary_error(*least_idx,
  2820. DeviceInfo.wDataBlockNum, 0);
  2821. continue;
  2822. }
  2823. }
  2824. }
  2825. static int move_blks_for_wear_leveling(u8 *chg,
  2826. u32 *least_idx, u32 *rep_blk_num, int *result)
  2827. {
  2828. u32 *pbt = (u32 *)g_pBlockTable;
  2829. u32 rep_blk;
  2830. int j, ret_cp_blk, ret_erase;
  2831. int ret = PASS;
  2832. chg[*least_idx] = PASS;
  2833. debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
  2834. rep_blk = FTL_Replace_MWBlock();
  2835. if (rep_blk != BAD_BLOCK) {
  2836. nand_dbg_print(NAND_DBG_DEBUG,
  2837. "More than two spare blocks exist so do it\n");
  2838. nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
  2839. rep_blk);
  2840. chg[rep_blk] = PASS;
  2841. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
  2842. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  2843. FTL_Write_IN_Progress_Block_Table_Page();
  2844. }
  2845. for (j = 0; j < RETRY_TIMES; j++) {
  2846. ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
  2847. DeviceInfo.wBlockDataSize,
  2848. (u64)rep_blk * DeviceInfo.wBlockDataSize);
  2849. if (FAIL == ret_cp_blk) {
  2850. ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
  2851. * DeviceInfo.wBlockDataSize);
  2852. if (FAIL == ret_erase)
  2853. MARK_BLOCK_AS_BAD(pbt[rep_blk]);
  2854. } else {
  2855. nand_dbg_print(NAND_DBG_DEBUG,
  2856. "FTL_Copy_Block == OK\n");
  2857. break;
  2858. }
  2859. }
  2860. if (j < RETRY_TIMES) {
  2861. u32 tmp;
  2862. u32 old_idx = FTL_Get_Block_Index(*least_idx);
  2863. u32 rep_idx = FTL_Get_Block_Index(rep_blk);
  2864. tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
  2865. pbt[old_idx] = (u32)((~SPARE_BLOCK) &
  2866. pbt[rep_idx]);
  2867. pbt[rep_idx] = tmp;
  2868. #if CMD_DMA
  2869. p_BTableChangesDelta = (struct BTableChangesDelta *)
  2870. g_pBTDelta_Free;
  2871. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  2872. p_BTableChangesDelta->ftl_cmd_cnt =
  2873. ftl_cmd_cnt;
  2874. p_BTableChangesDelta->BT_Index = old_idx;
  2875. p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
  2876. p_BTableChangesDelta->ValidFields = 0x0C;
  2877. p_BTableChangesDelta = (struct BTableChangesDelta *)
  2878. g_pBTDelta_Free;
  2879. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  2880. p_BTableChangesDelta->ftl_cmd_cnt =
  2881. ftl_cmd_cnt;
  2882. p_BTableChangesDelta->BT_Index = rep_idx;
  2883. p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
  2884. p_BTableChangesDelta->ValidFields = 0x0C;
  2885. #endif
  2886. } else {
  2887. pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
  2888. #if CMD_DMA
  2889. p_BTableChangesDelta = (struct BTableChangesDelta *)
  2890. g_pBTDelta_Free;
  2891. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  2892. p_BTableChangesDelta->ftl_cmd_cnt =
  2893. ftl_cmd_cnt;
  2894. p_BTableChangesDelta->BT_Index =
  2895. FTL_Get_Block_Index(rep_blk);
  2896. p_BTableChangesDelta->BT_Entry_Value =
  2897. pbt[FTL_Get_Block_Index(rep_blk)];
  2898. p_BTableChangesDelta->ValidFields = 0x0C;
  2899. #endif
  2900. *result = FAIL;
  2901. ret = FAIL;
  2902. }
  2903. if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
  2904. ret = FAIL;
  2905. } else {
  2906. printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
  2907. ret = FAIL;
  2908. }
  2909. return ret;
  2910. }
  2911. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  2912. * Function: FTL_Static_Wear_Leveling
  2913. * Inputs: none
  2914. * Outputs: PASS=0 / FAIL=1
  2915. * Description: This is static wear leveling (done by explicit call)
  2916. * search for most&least used
  2917. * if difference < GATE:
  2918. * update the block table with exhange
  2919. * mark block table in flash as IN_PROGRESS
  2920. * copy flash block
  2921. * the caller should handle GC clean up after calling this function
  2922. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  2923. int FTL_Static_Wear_Leveling(void)
  2924. {
  2925. u8 most_worn_cnt;
  2926. u8 least_worn_cnt;
  2927. u32 most_worn_idx;
  2928. u32 least_worn_idx;
  2929. int result = PASS;
  2930. int go_on = PASS;
  2931. u32 replaced_blks = 0;
  2932. u8 *chang_flag = flags_static_wear_leveling;
  2933. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  2934. __FILE__, __LINE__, __func__);
  2935. if (!chang_flag)
  2936. return FAIL;
  2937. memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
  2938. while (go_on == PASS) {
  2939. nand_dbg_print(NAND_DBG_DEBUG,
  2940. "starting static wear leveling\n");
  2941. most_worn_cnt = 0;
  2942. least_worn_cnt = 0xFF;
  2943. least_worn_idx = BLOCK_TABLE_INDEX;
  2944. most_worn_idx = BLOCK_TABLE_INDEX;
  2945. find_least_most_worn(chang_flag, &least_worn_idx,
  2946. &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
  2947. nand_dbg_print(NAND_DBG_DEBUG,
  2948. "Used and least worn is block %u, whos count is %u\n",
  2949. (unsigned int)least_worn_idx,
  2950. (unsigned int)least_worn_cnt);
  2951. nand_dbg_print(NAND_DBG_DEBUG,
  2952. "Free and most worn is block %u, whos count is %u\n",
  2953. (unsigned int)most_worn_idx,
  2954. (unsigned int)most_worn_cnt);
  2955. if ((most_worn_cnt > least_worn_cnt) &&
  2956. (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
  2957. go_on = move_blks_for_wear_leveling(chang_flag,
  2958. &least_worn_idx, &replaced_blks, &result);
  2959. else
  2960. go_on = FAIL;
  2961. }
  2962. return result;
  2963. }
  2964. #if CMD_DMA
  2965. static int do_garbage_collection(u32 discard_cnt)
  2966. {
  2967. u32 *pbt = (u32 *)g_pBlockTable;
  2968. u32 pba;
  2969. u8 bt_block_erased = 0;
  2970. int i, cnt, ret = FAIL;
  2971. u64 addr;
  2972. i = 0;
  2973. while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
  2974. ((ftl_cmd_cnt + 28) < 256)) {
  2975. if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
  2976. (pbt[i] & DISCARD_BLOCK)) {
  2977. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
  2978. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  2979. FTL_Write_IN_Progress_Block_Table_Page();
  2980. }
  2981. addr = FTL_Get_Physical_Block_Addr((u64)i *
  2982. DeviceInfo.wBlockDataSize);
  2983. pba = BLK_FROM_ADDR(addr);
  2984. for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
  2985. if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
  2986. nand_dbg_print(NAND_DBG_DEBUG,
  2987. "GC will erase BT block %u\n",
  2988. (unsigned int)pba);
  2989. discard_cnt--;
  2990. i++;
  2991. bt_block_erased = 1;
  2992. break;
  2993. }
  2994. }
  2995. if (bt_block_erased) {
  2996. bt_block_erased = 0;
  2997. continue;
  2998. }
  2999. addr = FTL_Get_Physical_Block_Addr((u64)i *
  3000. DeviceInfo.wBlockDataSize);
  3001. if (PASS == GLOB_FTL_Block_Erase(addr)) {
  3002. pbt[i] &= (u32)(~DISCARD_BLOCK);
  3003. pbt[i] |= (u32)(SPARE_BLOCK);
  3004. p_BTableChangesDelta =
  3005. (struct BTableChangesDelta *)
  3006. g_pBTDelta_Free;
  3007. g_pBTDelta_Free +=
  3008. sizeof(struct BTableChangesDelta);
  3009. p_BTableChangesDelta->ftl_cmd_cnt =
  3010. ftl_cmd_cnt - 1;
  3011. p_BTableChangesDelta->BT_Index = i;
  3012. p_BTableChangesDelta->BT_Entry_Value = pbt[i];
  3013. p_BTableChangesDelta->ValidFields = 0x0C;
  3014. discard_cnt--;
  3015. ret = PASS;
  3016. } else {
  3017. MARK_BLOCK_AS_BAD(pbt[i]);
  3018. }
  3019. }
  3020. i++;
  3021. }
  3022. return ret;
  3023. }
  3024. #else
  3025. static int do_garbage_collection(u32 discard_cnt)
  3026. {
  3027. u32 *pbt = (u32 *)g_pBlockTable;
  3028. u32 pba;
  3029. u8 bt_block_erased = 0;
  3030. int i, cnt, ret = FAIL;
  3031. u64 addr;
  3032. i = 0;
  3033. while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
  3034. if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
  3035. (pbt[i] & DISCARD_BLOCK)) {
  3036. if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
  3037. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  3038. FTL_Write_IN_Progress_Block_Table_Page();
  3039. }
  3040. addr = FTL_Get_Physical_Block_Addr((u64)i *
  3041. DeviceInfo.wBlockDataSize);
  3042. pba = BLK_FROM_ADDR(addr);
  3043. for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
  3044. if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
  3045. nand_dbg_print(NAND_DBG_DEBUG,
  3046. "GC will erase BT block %d\n",
  3047. pba);
  3048. discard_cnt--;
  3049. i++;
  3050. bt_block_erased = 1;
  3051. break;
  3052. }
  3053. }
  3054. if (bt_block_erased) {
  3055. bt_block_erased = 0;
  3056. continue;
  3057. }
  3058. /* If the discard block is L2 cache block, then just skip it */
  3059. for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
  3060. if (cache_l2.blk_array[cnt] == pba) {
  3061. nand_dbg_print(NAND_DBG_DEBUG,
  3062. "GC will erase L2 cache blk %d\n",
  3063. pba);
  3064. break;
  3065. }
  3066. }
  3067. if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
  3068. discard_cnt--;
  3069. i++;
  3070. continue;
  3071. }
  3072. addr = FTL_Get_Physical_Block_Addr((u64)i *
  3073. DeviceInfo.wBlockDataSize);
  3074. if (PASS == GLOB_FTL_Block_Erase(addr)) {
  3075. pbt[i] &= (u32)(~DISCARD_BLOCK);
  3076. pbt[i] |= (u32)(SPARE_BLOCK);
  3077. discard_cnt--;
  3078. ret = PASS;
  3079. } else {
  3080. MARK_BLOCK_AS_BAD(pbt[i]);
  3081. }
  3082. }
  3083. i++;
  3084. }
  3085. return ret;
  3086. }
  3087. #endif
  3088. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3089. * Function: GLOB_FTL_Garbage_Collection
  3090. * Inputs: none
  3091. * Outputs: PASS / FAIL (returns the number of un-erased blocks
  3092. * Description: search the block table for all discarded blocks to erase
  3093. * for each discarded block:
  3094. * set the flash block to IN_PROGRESS
  3095. * erase the block
  3096. * update the block table
  3097. * write the block table to flash
  3098. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3099. int GLOB_FTL_Garbage_Collection(void)
  3100. {
  3101. u32 i;
  3102. u32 wDiscard = 0;
  3103. int wResult = FAIL;
  3104. u32 *pbt = (u32 *)g_pBlockTable;
  3105. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  3106. __FILE__, __LINE__, __func__);
  3107. if (GC_Called) {
  3108. printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
  3109. "has been re-entered! Exit.\n");
  3110. return PASS;
  3111. }
  3112. GC_Called = 1;
  3113. GLOB_FTL_BT_Garbage_Collection();
  3114. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  3115. if (IS_DISCARDED_BLOCK(i))
  3116. wDiscard++;
  3117. }
  3118. if (wDiscard <= 0) {
  3119. GC_Called = 0;
  3120. return wResult;
  3121. }
  3122. nand_dbg_print(NAND_DBG_DEBUG,
  3123. "Found %d discarded blocks\n", wDiscard);
  3124. FTL_Write_Block_Table(FAIL);
  3125. wResult = do_garbage_collection(wDiscard);
  3126. FTL_Write_Block_Table(FAIL);
  3127. GC_Called = 0;
  3128. return wResult;
  3129. }
  3130. #if CMD_DMA
  3131. static int do_bt_garbage_collection(void)
  3132. {
  3133. u32 pba, lba;
  3134. u32 *pbt = (u32 *)g_pBlockTable;
  3135. u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
  3136. u64 addr;
  3137. int i, ret = FAIL;
  3138. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3139. __FILE__, __LINE__, __func__);
  3140. if (BT_GC_Called)
  3141. return PASS;
  3142. BT_GC_Called = 1;
  3143. for (i = last_erased; (i <= LAST_BT_ID) &&
  3144. (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
  3145. FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
  3146. ((ftl_cmd_cnt + 28)) < 256; i++) {
  3147. pba = pBTBlocksNode[i - FIRST_BT_ID];
  3148. lba = FTL_Get_Block_Index(pba);
  3149. nand_dbg_print(NAND_DBG_DEBUG,
  3150. "do_bt_garbage_collection: pba %d, lba %d\n",
  3151. pba, lba);
  3152. nand_dbg_print(NAND_DBG_DEBUG,
  3153. "Block Table Entry: %d", pbt[lba]);
  3154. if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
  3155. (pbt[lba] & DISCARD_BLOCK)) {
  3156. nand_dbg_print(NAND_DBG_DEBUG,
  3157. "do_bt_garbage_collection_cdma: "
  3158. "Erasing Block tables present in block %d\n",
  3159. pba);
  3160. addr = FTL_Get_Physical_Block_Addr((u64)lba *
  3161. DeviceInfo.wBlockDataSize);
  3162. if (PASS == GLOB_FTL_Block_Erase(addr)) {
  3163. pbt[lba] &= (u32)(~DISCARD_BLOCK);
  3164. pbt[lba] |= (u32)(SPARE_BLOCK);
  3165. p_BTableChangesDelta =
  3166. (struct BTableChangesDelta *)
  3167. g_pBTDelta_Free;
  3168. g_pBTDelta_Free +=
  3169. sizeof(struct BTableChangesDelta);
  3170. p_BTableChangesDelta->ftl_cmd_cnt =
  3171. ftl_cmd_cnt - 1;
  3172. p_BTableChangesDelta->BT_Index = lba;
  3173. p_BTableChangesDelta->BT_Entry_Value =
  3174. pbt[lba];
  3175. p_BTableChangesDelta->ValidFields = 0x0C;
  3176. ret = PASS;
  3177. pBTBlocksNode[last_erased - FIRST_BT_ID] =
  3178. BTBLOCK_INVAL;
  3179. nand_dbg_print(NAND_DBG_DEBUG,
  3180. "resetting bt entry at index %d "
  3181. "value %d\n", i,
  3182. pBTBlocksNode[i - FIRST_BT_ID]);
  3183. if (last_erased == LAST_BT_ID)
  3184. last_erased = FIRST_BT_ID;
  3185. else
  3186. last_erased++;
  3187. } else {
  3188. MARK_BLOCK_AS_BAD(pbt[lba]);
  3189. }
  3190. }
  3191. }
  3192. BT_GC_Called = 0;
  3193. return ret;
  3194. }
  3195. #else
  3196. static int do_bt_garbage_collection(void)
  3197. {
  3198. u32 pba, lba;
  3199. u32 *pbt = (u32 *)g_pBlockTable;
  3200. u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
  3201. u64 addr;
  3202. int i, ret = FAIL;
  3203. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3204. __FILE__, __LINE__, __func__);
  3205. if (BT_GC_Called)
  3206. return PASS;
  3207. BT_GC_Called = 1;
  3208. for (i = last_erased; (i <= LAST_BT_ID) &&
  3209. (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
  3210. FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
  3211. pba = pBTBlocksNode[i - FIRST_BT_ID];
  3212. lba = FTL_Get_Block_Index(pba);
  3213. nand_dbg_print(NAND_DBG_DEBUG,
  3214. "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
  3215. pba, lba);
  3216. nand_dbg_print(NAND_DBG_DEBUG,
  3217. "Block Table Entry: %d", pbt[lba]);
  3218. if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
  3219. (pbt[lba] & DISCARD_BLOCK)) {
  3220. nand_dbg_print(NAND_DBG_DEBUG,
  3221. "do_bt_garbage_collection: "
  3222. "Erasing Block tables present in block %d\n",
  3223. pba);
  3224. addr = FTL_Get_Physical_Block_Addr((u64)lba *
  3225. DeviceInfo.wBlockDataSize);
  3226. if (PASS == GLOB_FTL_Block_Erase(addr)) {
  3227. pbt[lba] &= (u32)(~DISCARD_BLOCK);
  3228. pbt[lba] |= (u32)(SPARE_BLOCK);
  3229. ret = PASS;
  3230. pBTBlocksNode[last_erased - FIRST_BT_ID] =
  3231. BTBLOCK_INVAL;
  3232. nand_dbg_print(NAND_DBG_DEBUG,
  3233. "resetting bt entry at index %d "
  3234. "value %d\n", i,
  3235. pBTBlocksNode[i - FIRST_BT_ID]);
  3236. if (last_erased == LAST_BT_ID)
  3237. last_erased = FIRST_BT_ID;
  3238. else
  3239. last_erased++;
  3240. } else {
  3241. MARK_BLOCK_AS_BAD(pbt[lba]);
  3242. }
  3243. }
  3244. }
  3245. BT_GC_Called = 0;
  3246. return ret;
  3247. }
  3248. #endif
  3249. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3250. * Function: GLOB_FTL_BT_Garbage_Collection
  3251. * Inputs: none
  3252. * Outputs: PASS / FAIL (returns the number of un-erased blocks
  3253. * Description: Erases discarded blocks containing Block table
  3254. *
  3255. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3256. int GLOB_FTL_BT_Garbage_Collection(void)
  3257. {
  3258. return do_bt_garbage_collection();
  3259. }
  3260. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3261. * Function: FTL_Replace_OneBlock
  3262. * Inputs: Block number 1
  3263. * Block number 2
  3264. * Outputs: Replaced Block Number
  3265. * Description: Interchange block table entries at wBlockNum and wReplaceNum
  3266. *
  3267. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3268. static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
  3269. {
  3270. u32 tmp_blk;
  3271. u32 replace_node = BAD_BLOCK;
  3272. u32 *pbt = (u32 *)g_pBlockTable;
  3273. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3274. __FILE__, __LINE__, __func__);
  3275. if (rep_blk != BAD_BLOCK) {
  3276. if (IS_BAD_BLOCK(blk))
  3277. tmp_blk = pbt[blk];
  3278. else
  3279. tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
  3280. replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
  3281. pbt[blk] = replace_node;
  3282. pbt[rep_blk] = tmp_blk;
  3283. #if CMD_DMA
  3284. p_BTableChangesDelta =
  3285. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3286. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3287. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  3288. p_BTableChangesDelta->BT_Index = blk;
  3289. p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
  3290. p_BTableChangesDelta->ValidFields = 0x0C;
  3291. p_BTableChangesDelta =
  3292. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3293. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3294. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  3295. p_BTableChangesDelta->BT_Index = rep_blk;
  3296. p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
  3297. p_BTableChangesDelta->ValidFields = 0x0C;
  3298. #endif
  3299. }
  3300. return replace_node;
  3301. }
  3302. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3303. * Function: FTL_Write_Block_Table_Data
  3304. * Inputs: Block table size in pages
  3305. * Outputs: PASS=0 / FAIL=1
  3306. * Description: Write block table data in flash
  3307. * If first page and last page
  3308. * Write data+BT flag
  3309. * else
  3310. * Write data
  3311. * BT flag is a counter. Its value is incremented for block table
  3312. * write in a new Block
  3313. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3314. static int FTL_Write_Block_Table_Data(void)
  3315. {
  3316. u64 dwBlockTableAddr, pTempAddr;
  3317. u32 Block;
  3318. u16 Page, PageCount;
  3319. u8 *tempBuf = tmp_buf_write_blk_table_data;
  3320. int wBytesCopied;
  3321. u16 bt_pages;
  3322. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3323. __FILE__, __LINE__, __func__);
  3324. dwBlockTableAddr =
  3325. (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
  3326. (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
  3327. pTempAddr = dwBlockTableAddr;
  3328. bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
  3329. nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
  3330. "page= %d BlockTableIndex= %d "
  3331. "BlockTableOffset=%d\n", bt_pages,
  3332. g_wBlockTableIndex, g_wBlockTableOffset);
  3333. Block = BLK_FROM_ADDR(pTempAddr);
  3334. Page = PAGE_FROM_ADDR(pTempAddr, Block);
  3335. PageCount = 1;
  3336. if (bt_block_changed) {
  3337. if (bt_flag == LAST_BT_ID) {
  3338. bt_flag = FIRST_BT_ID;
  3339. g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
  3340. } else if (bt_flag < LAST_BT_ID) {
  3341. bt_flag++;
  3342. g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
  3343. }
  3344. if ((bt_flag > (LAST_BT_ID-4)) &&
  3345. g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
  3346. BTBLOCK_INVAL) {
  3347. bt_block_changed = 0;
  3348. GLOB_FTL_BT_Garbage_Collection();
  3349. }
  3350. bt_block_changed = 0;
  3351. nand_dbg_print(NAND_DBG_DEBUG,
  3352. "Block Table Counter is %u Block %u\n",
  3353. bt_flag, (unsigned int)Block);
  3354. }
  3355. memset(tempBuf, 0, 3);
  3356. tempBuf[3] = bt_flag;
  3357. wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
  3358. DeviceInfo.wPageDataSize - 4, 0);
  3359. memset(&tempBuf[wBytesCopied + 4], 0xff,
  3360. DeviceInfo.wPageSize - (wBytesCopied + 4));
  3361. FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
  3362. bt_flag);
  3363. #if CMD_DMA
  3364. memcpy(g_pNextBlockTable, tempBuf,
  3365. DeviceInfo.wPageSize * sizeof(u8));
  3366. nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
  3367. "Block %u Page %u\n", (unsigned int)Block, Page);
  3368. if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
  3369. Block, Page, 1,
  3370. LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
  3371. nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
  3372. "%s, Line %d, Function: %s, "
  3373. "new Bad Block %d generated!\n",
  3374. __FILE__, __LINE__, __func__, Block);
  3375. goto func_return;
  3376. }
  3377. ftl_cmd_cnt++;
  3378. g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
  3379. #else
  3380. if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
  3381. nand_dbg_print(NAND_DBG_WARN,
  3382. "NAND Program fail in %s, Line %d, Function: %s, "
  3383. "new Bad Block %d generated!\n",
  3384. __FILE__, __LINE__, __func__, Block);
  3385. goto func_return;
  3386. }
  3387. #endif
  3388. if (bt_pages > 1) {
  3389. PageCount = bt_pages - 1;
  3390. if (PageCount > 1) {
  3391. wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
  3392. DeviceInfo.wPageDataSize * (PageCount - 1),
  3393. wBytesCopied);
  3394. #if CMD_DMA
  3395. memcpy(g_pNextBlockTable, tempBuf,
  3396. (PageCount - 1) * DeviceInfo.wPageDataSize);
  3397. if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
  3398. g_pNextBlockTable, Block, Page + 1,
  3399. PageCount - 1)) {
  3400. nand_dbg_print(NAND_DBG_WARN,
  3401. "NAND Program fail in %s, Line %d, "
  3402. "Function: %s, "
  3403. "new Bad Block %d generated!\n",
  3404. __FILE__, __LINE__, __func__,
  3405. (int)Block);
  3406. goto func_return;
  3407. }
  3408. ftl_cmd_cnt++;
  3409. g_pNextBlockTable += (PageCount - 1) *
  3410. DeviceInfo.wPageDataSize * sizeof(u8);
  3411. #else
  3412. if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
  3413. Block, Page + 1, PageCount - 1)) {
  3414. nand_dbg_print(NAND_DBG_WARN,
  3415. "NAND Program fail in %s, Line %d, "
  3416. "Function: %s, "
  3417. "new Bad Block %d generated!\n",
  3418. __FILE__, __LINE__, __func__,
  3419. (int)Block);
  3420. goto func_return;
  3421. }
  3422. #endif
  3423. }
  3424. wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
  3425. DeviceInfo.wPageDataSize, wBytesCopied);
  3426. memset(&tempBuf[wBytesCopied], 0xff,
  3427. DeviceInfo.wPageSize-wBytesCopied);
  3428. FTL_Insert_Block_Table_Signature(
  3429. &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
  3430. #if CMD_DMA
  3431. memcpy(g_pNextBlockTable, tempBuf,
  3432. DeviceInfo.wPageSize * sizeof(u8));
  3433. nand_dbg_print(NAND_DBG_DEBUG,
  3434. "Writing the last Page of Block Table "
  3435. "Block %u Page %u\n",
  3436. (unsigned int)Block, Page + bt_pages - 1);
  3437. if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
  3438. g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
  3439. LLD_CMD_FLAG_MODE_CDMA |
  3440. LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
  3441. nand_dbg_print(NAND_DBG_WARN,
  3442. "NAND Program fail in %s, Line %d, "
  3443. "Function: %s, new Bad Block %d generated!\n",
  3444. __FILE__, __LINE__, __func__, Block);
  3445. goto func_return;
  3446. }
  3447. ftl_cmd_cnt++;
  3448. #else
  3449. if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
  3450. Block, Page+bt_pages - 1, 1)) {
  3451. nand_dbg_print(NAND_DBG_WARN,
  3452. "NAND Program fail in %s, Line %d, "
  3453. "Function: %s, "
  3454. "new Bad Block %d generated!\n",
  3455. __FILE__, __LINE__, __func__, Block);
  3456. goto func_return;
  3457. }
  3458. #endif
  3459. }
  3460. nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
  3461. func_return:
  3462. return PASS;
  3463. }
  3464. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3465. * Function: FTL_Replace_Block_Table
  3466. * Inputs: None
  3467. * Outputs: PASS=0 / FAIL=1
  3468. * Description: Get a new block to write block table
  3469. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3470. static u32 FTL_Replace_Block_Table(void)
  3471. {
  3472. u32 blk;
  3473. int gc;
  3474. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3475. __FILE__, __LINE__, __func__);
  3476. blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
  3477. if ((BAD_BLOCK == blk) && (PASS == gc)) {
  3478. GLOB_FTL_Garbage_Collection();
  3479. blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
  3480. }
  3481. if (BAD_BLOCK == blk)
  3482. printk(KERN_ERR "%s, %s: There is no spare block. "
  3483. "It should never happen\n",
  3484. __FILE__, __func__);
  3485. nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
  3486. return blk;
  3487. }
  3488. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3489. * Function: FTL_Replace_LWBlock
  3490. * Inputs: Block number
  3491. * Pointer to Garbage Collect flag
  3492. * Outputs:
  3493. * Description: Determine the least weared block by traversing
  3494. * block table
  3495. * Set Garbage collection to be called if number of spare
  3496. * block is less than Free Block Gate count
  3497. * Change Block table entry to map least worn block for current
  3498. * operation
  3499. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3500. static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
  3501. {
  3502. u32 i;
  3503. u32 *pbt = (u32 *)g_pBlockTable;
  3504. u8 wLeastWornCounter = 0xFF;
  3505. u32 wLeastWornIndex = BAD_BLOCK;
  3506. u32 wSpareBlockNum = 0;
  3507. u32 wDiscardBlockNum = 0;
  3508. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3509. __FILE__, __LINE__, __func__);
  3510. if (IS_SPARE_BLOCK(wBlockNum)) {
  3511. *pGarbageCollect = FAIL;
  3512. pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
  3513. #if CMD_DMA
  3514. p_BTableChangesDelta =
  3515. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3516. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3517. p_BTableChangesDelta->ftl_cmd_cnt =
  3518. ftl_cmd_cnt;
  3519. p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
  3520. p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
  3521. p_BTableChangesDelta->ValidFields = 0x0C;
  3522. #endif
  3523. return pbt[wBlockNum];
  3524. }
  3525. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  3526. if (IS_DISCARDED_BLOCK(i))
  3527. wDiscardBlockNum++;
  3528. if (IS_SPARE_BLOCK(i)) {
  3529. u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
  3530. if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
  3531. printk(KERN_ERR "FTL_Replace_LWBlock: "
  3532. "This should never occur!\n");
  3533. if (g_pWearCounter[wPhysicalIndex -
  3534. DeviceInfo.wSpectraStartBlock] <
  3535. wLeastWornCounter) {
  3536. wLeastWornCounter =
  3537. g_pWearCounter[wPhysicalIndex -
  3538. DeviceInfo.wSpectraStartBlock];
  3539. wLeastWornIndex = i;
  3540. }
  3541. wSpareBlockNum++;
  3542. }
  3543. }
  3544. nand_dbg_print(NAND_DBG_WARN,
  3545. "FTL_Replace_LWBlock: Least Worn Counter %d\n",
  3546. (int)wLeastWornCounter);
  3547. if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
  3548. (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
  3549. *pGarbageCollect = PASS;
  3550. else
  3551. *pGarbageCollect = FAIL;
  3552. nand_dbg_print(NAND_DBG_DEBUG,
  3553. "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
  3554. " Blocks %u\n",
  3555. (unsigned int)wDiscardBlockNum,
  3556. (unsigned int)wSpareBlockNum);
  3557. return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
  3558. }
  3559. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3560. * Function: FTL_Replace_MWBlock
  3561. * Inputs: None
  3562. * Outputs: most worn spare block no./BAD_BLOCK
  3563. * Description: It finds most worn spare block.
  3564. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3565. static u32 FTL_Replace_MWBlock(void)
  3566. {
  3567. u32 i;
  3568. u32 *pbt = (u32 *)g_pBlockTable;
  3569. u8 wMostWornCounter = 0;
  3570. u32 wMostWornIndex = BAD_BLOCK;
  3571. u32 wSpareBlockNum = 0;
  3572. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3573. __FILE__, __LINE__, __func__);
  3574. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  3575. if (IS_SPARE_BLOCK(i)) {
  3576. u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
  3577. if (g_pWearCounter[wPhysicalIndex -
  3578. DeviceInfo.wSpectraStartBlock] >
  3579. wMostWornCounter) {
  3580. wMostWornCounter =
  3581. g_pWearCounter[wPhysicalIndex -
  3582. DeviceInfo.wSpectraStartBlock];
  3583. wMostWornIndex = wPhysicalIndex;
  3584. }
  3585. wSpareBlockNum++;
  3586. }
  3587. }
  3588. if (wSpareBlockNum <= 2)
  3589. return BAD_BLOCK;
  3590. return wMostWornIndex;
  3591. }
  3592. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3593. * Function: FTL_Replace_Block
  3594. * Inputs: Block Address
  3595. * Outputs: PASS=0 / FAIL=1
  3596. * Description: If block specified by blk_addr parameter is not free,
  3597. * replace it with the least worn block.
  3598. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3599. static int FTL_Replace_Block(u64 blk_addr)
  3600. {
  3601. u32 current_blk = BLK_FROM_ADDR(blk_addr);
  3602. u32 *pbt = (u32 *)g_pBlockTable;
  3603. int wResult = PASS;
  3604. int GarbageCollect = FAIL;
  3605. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3606. __FILE__, __LINE__, __func__);
  3607. if (IS_SPARE_BLOCK(current_blk)) {
  3608. pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
  3609. #if CMD_DMA
  3610. p_BTableChangesDelta =
  3611. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3612. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3613. p_BTableChangesDelta->ftl_cmd_cnt =
  3614. ftl_cmd_cnt;
  3615. p_BTableChangesDelta->BT_Index = current_blk;
  3616. p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
  3617. p_BTableChangesDelta->ValidFields = 0x0C ;
  3618. #endif
  3619. return wResult;
  3620. }
  3621. FTL_Replace_LWBlock(current_blk, &GarbageCollect);
  3622. if (PASS == GarbageCollect)
  3623. wResult = GLOB_FTL_Garbage_Collection();
  3624. return wResult;
  3625. }
  3626. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3627. * Function: GLOB_FTL_Is_BadBlock
  3628. * Inputs: block number to test
  3629. * Outputs: PASS (block is BAD) / FAIL (block is not bad)
  3630. * Description: test if this block number is flagged as bad
  3631. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3632. int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
  3633. {
  3634. u32 *pbt = (u32 *)g_pBlockTable;
  3635. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3636. __FILE__, __LINE__, __func__);
  3637. if (wBlockNum >= DeviceInfo.wSpectraStartBlock
  3638. && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
  3639. return PASS;
  3640. else
  3641. return FAIL;
  3642. }
  3643. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3644. * Function: GLOB_FTL_Flush_Cache
  3645. * Inputs: none
  3646. * Outputs: PASS=0 / FAIL=1
  3647. * Description: flush all the cache blocks to flash
  3648. * if a cache block is not dirty, don't do anything with it
  3649. * else, write the block and update the block table
  3650. * Note: This function should be called at shutdown/power down.
  3651. * to write important data into device
  3652. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3653. int GLOB_FTL_Flush_Cache(void)
  3654. {
  3655. int i, ret;
  3656. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  3657. __FILE__, __LINE__, __func__);
  3658. for (i = 0; i < CACHE_ITEM_NUM; i++) {
  3659. if (SET == Cache.array[i].changed) {
  3660. #if CMD_DMA
  3661. #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
  3662. int_cache[ftl_cmd_cnt].item = i;
  3663. int_cache[ftl_cmd_cnt].cache.address =
  3664. Cache.array[i].address;
  3665. int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
  3666. #endif
  3667. #endif
  3668. ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
  3669. if (PASS == ret) {
  3670. Cache.array[i].changed = CLEAR;
  3671. } else {
  3672. printk(KERN_ALERT "Failed when write back to L2 cache!\n");
  3673. /* TODO - How to handle this? */
  3674. }
  3675. }
  3676. }
  3677. flush_l2_cache();
  3678. return FTL_Write_Block_Table(FAIL);
  3679. }
  3680. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3681. * Function: GLOB_FTL_Page_Read
  3682. * Inputs: pointer to data
  3683. * logical address of data (u64 is LBA * Bytes/Page)
  3684. * Outputs: PASS=0 / FAIL=1
  3685. * Description: reads a page of data into RAM from the cache
  3686. * if the data is not already in cache, read from flash to cache
  3687. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3688. int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
  3689. {
  3690. u16 cache_item;
  3691. int res = PASS;
  3692. nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
  3693. "page_addr: %llu\n", logical_addr);
  3694. cache_item = FTL_Cache_If_Hit(logical_addr);
  3695. if (UNHIT_CACHE_ITEM == cache_item) {
  3696. nand_dbg_print(NAND_DBG_DEBUG,
  3697. "GLOB_FTL_Page_Read: Cache not hit\n");
  3698. res = FTL_Cache_Write();
  3699. if (ERR == FTL_Cache_Read(logical_addr))
  3700. res = ERR;
  3701. cache_item = Cache.LRU;
  3702. }
  3703. FTL_Cache_Read_Page(data, logical_addr, cache_item);
  3704. return res;
  3705. }
  3706. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3707. * Function: GLOB_FTL_Page_Write
  3708. * Inputs: pointer to data
  3709. * address of data (ADDRESSTYPE is LBA * Bytes/Page)
  3710. * Outputs: PASS=0 / FAIL=1
  3711. * Description: writes a page of data from RAM to the cache
  3712. * if the data is not already in cache, write back the
  3713. * least recently used block and read the addressed block
  3714. * from flash to cache
  3715. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3716. int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
  3717. {
  3718. u16 cache_blk;
  3719. u32 *pbt = (u32 *)g_pBlockTable;
  3720. int wResult = PASS;
  3721. nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
  3722. "dwPageAddr: %llu\n", dwPageAddr);
  3723. cache_blk = FTL_Cache_If_Hit(dwPageAddr);
  3724. if (UNHIT_CACHE_ITEM == cache_blk) {
  3725. wResult = FTL_Cache_Write();
  3726. if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
  3727. wResult = FTL_Replace_Block(dwPageAddr);
  3728. pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
  3729. if (wResult == FAIL)
  3730. return FAIL;
  3731. }
  3732. if (ERR == FTL_Cache_Read(dwPageAddr))
  3733. wResult = ERR;
  3734. cache_blk = Cache.LRU;
  3735. FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
  3736. } else {
  3737. #if CMD_DMA
  3738. FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
  3739. LLD_CMD_FLAG_ORDER_BEFORE_REST);
  3740. #else
  3741. FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
  3742. #endif
  3743. }
  3744. return wResult;
  3745. }
  3746. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3747. * Function: GLOB_FTL_Block_Erase
  3748. * Inputs: address of block to erase (now in byte format, should change to
  3749. * block format)
  3750. * Outputs: PASS=0 / FAIL=1
  3751. * Description: erases the specified block
  3752. * increments the erase count
  3753. * If erase count reaches its upper limit,call function to
  3754. * do the ajustment as per the relative erase count values
  3755. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3756. int GLOB_FTL_Block_Erase(u64 blk_addr)
  3757. {
  3758. int status;
  3759. u32 BlkIdx;
  3760. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3761. __FILE__, __LINE__, __func__);
  3762. BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
  3763. if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
  3764. printk(KERN_ERR "GLOB_FTL_Block_Erase: "
  3765. "This should never occur\n");
  3766. return FAIL;
  3767. }
  3768. #if CMD_DMA
  3769. status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
  3770. if (status == FAIL)
  3771. nand_dbg_print(NAND_DBG_WARN,
  3772. "NAND Program fail in %s, Line %d, "
  3773. "Function: %s, new Bad Block %d generated!\n",
  3774. __FILE__, __LINE__, __func__, BlkIdx);
  3775. #else
  3776. status = GLOB_LLD_Erase_Block(BlkIdx);
  3777. if (status == FAIL) {
  3778. nand_dbg_print(NAND_DBG_WARN,
  3779. "NAND Program fail in %s, Line %d, "
  3780. "Function: %s, new Bad Block %d generated!\n",
  3781. __FILE__, __LINE__, __func__, BlkIdx);
  3782. return status;
  3783. }
  3784. #endif
  3785. if (DeviceInfo.MLCDevice) {
  3786. g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
  3787. if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
  3788. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  3789. FTL_Write_IN_Progress_Block_Table_Page();
  3790. }
  3791. }
  3792. g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
  3793. #if CMD_DMA
  3794. p_BTableChangesDelta =
  3795. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3796. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3797. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  3798. p_BTableChangesDelta->WC_Index =
  3799. BlkIdx - DeviceInfo.wSpectraStartBlock;
  3800. p_BTableChangesDelta->WC_Entry_Value =
  3801. g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
  3802. p_BTableChangesDelta->ValidFields = 0x30;
  3803. if (DeviceInfo.MLCDevice) {
  3804. p_BTableChangesDelta =
  3805. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3806. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3807. p_BTableChangesDelta->ftl_cmd_cnt =
  3808. ftl_cmd_cnt;
  3809. p_BTableChangesDelta->RC_Index =
  3810. BlkIdx - DeviceInfo.wSpectraStartBlock;
  3811. p_BTableChangesDelta->RC_Entry_Value =
  3812. g_pReadCounter[BlkIdx -
  3813. DeviceInfo.wSpectraStartBlock];
  3814. p_BTableChangesDelta->ValidFields = 0xC0;
  3815. }
  3816. ftl_cmd_cnt++;
  3817. #endif
  3818. if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
  3819. FTL_Adjust_Relative_Erase_Count(BlkIdx);
  3820. return status;
  3821. }
  3822. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3823. * Function: FTL_Adjust_Relative_Erase_Count
  3824. * Inputs: index to block that was just incremented and is at the max
  3825. * Outputs: PASS=0 / FAIL=1
  3826. * Description: If any erase counts at MAX, adjusts erase count of every
  3827. * block by substracting least worn
  3828. * counter from counter value of every entry in wear table
  3829. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3830. static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
  3831. {
  3832. u8 wLeastWornCounter = MAX_BYTE_VALUE;
  3833. u8 wWearCounter;
  3834. u32 i, wWearIndex;
  3835. u32 *pbt = (u32 *)g_pBlockTable;
  3836. int wResult = PASS;
  3837. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  3838. __FILE__, __LINE__, __func__);
  3839. for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
  3840. if (IS_BAD_BLOCK(i))
  3841. continue;
  3842. wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
  3843. if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
  3844. printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
  3845. "This should never occur\n");
  3846. wWearCounter = g_pWearCounter[wWearIndex -
  3847. DeviceInfo.wSpectraStartBlock];
  3848. if (wWearCounter < wLeastWornCounter)
  3849. wLeastWornCounter = wWearCounter;
  3850. }
  3851. if (wLeastWornCounter == 0) {
  3852. nand_dbg_print(NAND_DBG_WARN,
  3853. "Adjusting Wear Levelling Counters: Special Case\n");
  3854. g_pWearCounter[Index_of_MAX -
  3855. DeviceInfo.wSpectraStartBlock]--;
  3856. #if CMD_DMA
  3857. p_BTableChangesDelta =
  3858. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3859. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3860. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  3861. p_BTableChangesDelta->WC_Index =
  3862. Index_of_MAX - DeviceInfo.wSpectraStartBlock;
  3863. p_BTableChangesDelta->WC_Entry_Value =
  3864. g_pWearCounter[Index_of_MAX -
  3865. DeviceInfo.wSpectraStartBlock];
  3866. p_BTableChangesDelta->ValidFields = 0x30;
  3867. #endif
  3868. FTL_Static_Wear_Leveling();
  3869. } else {
  3870. for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
  3871. if (!IS_BAD_BLOCK(i)) {
  3872. wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
  3873. g_pWearCounter[wWearIndex -
  3874. DeviceInfo.wSpectraStartBlock] =
  3875. (u8)(g_pWearCounter
  3876. [wWearIndex -
  3877. DeviceInfo.wSpectraStartBlock] -
  3878. wLeastWornCounter);
  3879. #if CMD_DMA
  3880. p_BTableChangesDelta =
  3881. (struct BTableChangesDelta *)g_pBTDelta_Free;
  3882. g_pBTDelta_Free +=
  3883. sizeof(struct BTableChangesDelta);
  3884. p_BTableChangesDelta->ftl_cmd_cnt =
  3885. ftl_cmd_cnt;
  3886. p_BTableChangesDelta->WC_Index = wWearIndex -
  3887. DeviceInfo.wSpectraStartBlock;
  3888. p_BTableChangesDelta->WC_Entry_Value =
  3889. g_pWearCounter[wWearIndex -
  3890. DeviceInfo.wSpectraStartBlock];
  3891. p_BTableChangesDelta->ValidFields = 0x30;
  3892. #endif
  3893. }
  3894. }
  3895. return wResult;
  3896. }
  3897. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3898. * Function: FTL_Write_IN_Progress_Block_Table_Page
  3899. * Inputs: None
  3900. * Outputs: None
  3901. * Description: It writes in-progress flag page to the page next to
  3902. * block table
  3903. ***********************************************************************/
  3904. static int FTL_Write_IN_Progress_Block_Table_Page(void)
  3905. {
  3906. int wResult = PASS;
  3907. u16 bt_pages;
  3908. u16 dwIPFPageAddr;
  3909. #if CMD_DMA
  3910. #else
  3911. u32 *pbt = (u32 *)g_pBlockTable;
  3912. u32 wTempBlockTableIndex;
  3913. #endif
  3914. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  3915. __FILE__, __LINE__, __func__);
  3916. bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
  3917. dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
  3918. nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
  3919. "Block %d Page %d\n",
  3920. g_wBlockTableIndex, dwIPFPageAddr);
  3921. #if CMD_DMA
  3922. wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
  3923. g_wBlockTableIndex, dwIPFPageAddr, 1,
  3924. LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
  3925. if (wResult == FAIL) {
  3926. nand_dbg_print(NAND_DBG_WARN,
  3927. "NAND Program fail in %s, Line %d, "
  3928. "Function: %s, new Bad Block %d generated!\n",
  3929. __FILE__, __LINE__, __func__,
  3930. g_wBlockTableIndex);
  3931. }
  3932. g_wBlockTableOffset = dwIPFPageAddr + 1;
  3933. p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
  3934. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  3935. p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
  3936. p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
  3937. p_BTableChangesDelta->ValidFields = 0x01;
  3938. ftl_cmd_cnt++;
  3939. #else
  3940. wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
  3941. g_wBlockTableIndex, dwIPFPageAddr, 1);
  3942. if (wResult == FAIL) {
  3943. nand_dbg_print(NAND_DBG_WARN,
  3944. "NAND Program fail in %s, Line %d, "
  3945. "Function: %s, new Bad Block %d generated!\n",
  3946. __FILE__, __LINE__, __func__,
  3947. (int)g_wBlockTableIndex);
  3948. MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
  3949. wTempBlockTableIndex = FTL_Replace_Block_Table();
  3950. bt_block_changed = 1;
  3951. if (BAD_BLOCK == wTempBlockTableIndex)
  3952. return ERR;
  3953. g_wBlockTableIndex = wTempBlockTableIndex;
  3954. g_wBlockTableOffset = 0;
  3955. /* Block table tag is '00'. Means it's used one */
  3956. pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
  3957. return FAIL;
  3958. }
  3959. g_wBlockTableOffset = dwIPFPageAddr + 1;
  3960. #endif
  3961. return wResult;
  3962. }
  3963. /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
  3964. * Function: FTL_Read_Disturbance
  3965. * Inputs: block address
  3966. * Outputs: PASS=0 / FAIL=1
  3967. * Description: used to handle read disturbance. Data in block that
  3968. * reaches its read limit is moved to new block
  3969. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
  3970. int FTL_Read_Disturbance(u32 blk_addr)
  3971. {
  3972. int wResult = FAIL;
  3973. u32 *pbt = (u32 *) g_pBlockTable;
  3974. u32 dwOldBlockAddr = blk_addr;
  3975. u32 wBlockNum;
  3976. u32 i;
  3977. u32 wLeastReadCounter = 0xFFFF;
  3978. u32 wLeastReadIndex = BAD_BLOCK;
  3979. u32 wSpareBlockNum = 0;
  3980. u32 wTempNode;
  3981. u32 wReplacedNode;
  3982. u8 *g_pTempBuf;
  3983. nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
  3984. __FILE__, __LINE__, __func__);
  3985. #if CMD_DMA
  3986. g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
  3987. cp_back_buf_idx++;
  3988. if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
  3989. printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
  3990. "Maybe too many pending commands in your CDMA chain.\n");
  3991. return FAIL;
  3992. }
  3993. #else
  3994. g_pTempBuf = tmp_buf_read_disturbance;
  3995. #endif
  3996. wBlockNum = FTL_Get_Block_Index(blk_addr);
  3997. do {
  3998. /* This is a bug.Here 'i' should be logical block number
  3999. * and start from 1 (0 is reserved for block table).
  4000. * Have fixed it. - Yunpeng 2008. 12. 19
  4001. */
  4002. for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
  4003. if (IS_SPARE_BLOCK(i)) {
  4004. u32 wPhysicalIndex =
  4005. (u32)((~SPARE_BLOCK) & pbt[i]);
  4006. if (g_pReadCounter[wPhysicalIndex -
  4007. DeviceInfo.wSpectraStartBlock] <
  4008. wLeastReadCounter) {
  4009. wLeastReadCounter =
  4010. g_pReadCounter[wPhysicalIndex -
  4011. DeviceInfo.wSpectraStartBlock];
  4012. wLeastReadIndex = i;
  4013. }
  4014. wSpareBlockNum++;
  4015. }
  4016. }
  4017. if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
  4018. wResult = GLOB_FTL_Garbage_Collection();
  4019. if (PASS == wResult)
  4020. continue;
  4021. else
  4022. break;
  4023. } else {
  4024. wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
  4025. wReplacedNode = (u32)((~SPARE_BLOCK) &
  4026. pbt[wLeastReadIndex]);
  4027. #if CMD_DMA
  4028. pbt[wBlockNum] = wReplacedNode;
  4029. pbt[wLeastReadIndex] = wTempNode;
  4030. p_BTableChangesDelta =
  4031. (struct BTableChangesDelta *)g_pBTDelta_Free;
  4032. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  4033. p_BTableChangesDelta->ftl_cmd_cnt =
  4034. ftl_cmd_cnt;
  4035. p_BTableChangesDelta->BT_Index = wBlockNum;
  4036. p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
  4037. p_BTableChangesDelta->ValidFields = 0x0C;
  4038. p_BTableChangesDelta =
  4039. (struct BTableChangesDelta *)g_pBTDelta_Free;
  4040. g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
  4041. p_BTableChangesDelta->ftl_cmd_cnt =
  4042. ftl_cmd_cnt;
  4043. p_BTableChangesDelta->BT_Index = wLeastReadIndex;
  4044. p_BTableChangesDelta->BT_Entry_Value =
  4045. pbt[wLeastReadIndex];
  4046. p_BTableChangesDelta->ValidFields = 0x0C;
  4047. wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
  4048. dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
  4049. LLD_CMD_FLAG_MODE_CDMA);
  4050. if (wResult == FAIL)
  4051. return wResult;
  4052. ftl_cmd_cnt++;
  4053. if (wResult != FAIL) {
  4054. if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
  4055. g_pTempBuf, pbt[wBlockNum], 0,
  4056. DeviceInfo.wPagesPerBlock)) {
  4057. nand_dbg_print(NAND_DBG_WARN,
  4058. "NAND Program fail in "
  4059. "%s, Line %d, Function: %s, "
  4060. "new Bad Block %d "
  4061. "generated!\n",
  4062. __FILE__, __LINE__, __func__,
  4063. (int)pbt[wBlockNum]);
  4064. wResult = FAIL;
  4065. MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
  4066. }
  4067. ftl_cmd_cnt++;
  4068. }
  4069. #else
  4070. wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
  4071. dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
  4072. if (wResult == FAIL)
  4073. return wResult;
  4074. if (wResult != FAIL) {
  4075. /* This is a bug. At this time, pbt[wBlockNum]
  4076. is still the physical address of
  4077. discard block, and should not be write.
  4078. Have fixed it as below.
  4079. -- Yunpeng 2008.12.19
  4080. */
  4081. wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
  4082. wReplacedNode, 0,
  4083. DeviceInfo.wPagesPerBlock);
  4084. if (wResult == FAIL) {
  4085. nand_dbg_print(NAND_DBG_WARN,
  4086. "NAND Program fail in "
  4087. "%s, Line %d, Function: %s, "
  4088. "new Bad Block %d "
  4089. "generated!\n",
  4090. __FILE__, __LINE__, __func__,
  4091. (int)wReplacedNode);
  4092. MARK_BLOCK_AS_BAD(wReplacedNode);
  4093. } else {
  4094. pbt[wBlockNum] = wReplacedNode;
  4095. pbt[wLeastReadIndex] = wTempNode;
  4096. }
  4097. }
  4098. if ((wResult == PASS) && (g_cBlockTableStatus !=
  4099. IN_PROGRESS_BLOCK_TABLE)) {
  4100. g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
  4101. FTL_Write_IN_Progress_Block_Table_Page();
  4102. }
  4103. #endif
  4104. }
  4105. } while (wResult != PASS)
  4106. ;
  4107. #if CMD_DMA
  4108. /* ... */
  4109. #endif
  4110. return wResult;
  4111. }